loader.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792
  1. /*
  2. * Copyright (C) 2011-2017 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "se_wrapper.h"
  32. #include "se_error_internal.h"
  33. #include "arch.h"
  34. #include "util.h"
  35. #include "loader.h"
  36. #include "se_page_attr.h"
  37. #include "enclave.h"
  38. #include "enclave_creator.h"
  39. #include "routine.h"
  40. #include "sgx_attributes.h"
  41. #include "se_vendor.h"
  42. #include "se_detect.h"
  43. #include "binparser.h"
  44. #include <assert.h>
  45. #include <vector>
  46. #include <algorithm>
  47. #define __STDC_FORMAT_MACROS
  48. #include <inttypes.h>
  49. #include <sys/mman.h>
  50. // enclave creator instance
  51. extern EnclaveCreator* g_enclave_creator;
  52. EnclaveCreator* get_enclave_creator(void)
  53. {
  54. return g_enclave_creator;
  55. }
  56. CLoader::CLoader(uint8_t *mapped_file_base, BinParser &parser)
  57. : m_mapped_file_base(mapped_file_base)
  58. , m_enclave_id(0)
  59. , m_start_addr(NULL)
  60. , m_metadata(NULL)
  61. , m_parser(parser)
  62. {
  63. memset(&m_secs, 0, sizeof(m_secs));
  64. }
  65. CLoader::~CLoader()
  66. {
  67. }
  68. sgx_enclave_id_t CLoader::get_enclave_id() const
  69. {
  70. return m_enclave_id;
  71. }
  72. const void* CLoader::get_start_addr() const
  73. {
  74. return m_start_addr;
  75. }
  76. const std::vector<tcs_t *>& CLoader::get_tcs_list() const
  77. {
  78. return m_tcs_list;
  79. }
  80. const secs_t& CLoader::get_secs() const
  81. {
  82. return m_secs;
  83. }
  84. void* CLoader::get_symbol_address(const char * const symbol)
  85. {
  86. uint64_t rva = m_parser.get_symbol_rva(symbol);
  87. if(0 == rva)
  88. return NULL;
  89. return GET_PTR(void, m_start_addr, rva);
  90. }
  91. // is_relocation_page returns true if the specified RVA is a writable relocation page based on the bitmap.
  92. bool CLoader::is_relocation_page(const uint64_t rva, vector<uint8_t> *bitmap)
  93. {
  94. uint64_t page_frame = rva >> SE_PAGE_SHIFT;
  95. //NOTE:
  96. // Current enclave size is not beyond 128G, so the type-casting from (uint64>>15) to (size_t) is OK.
  97. // In the future, if the max enclave size is extended to beyond (1<<49), this type-casting will not work.
  98. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
  99. size_t index = (size_t)(page_frame / 8);
  100. if(bitmap && (index < bitmap->size()))
  101. {
  102. return ((*bitmap)[index] & (1 << (page_frame % 8)));
  103. }
  104. return false;
  105. }
  106. int CLoader::build_mem_region(const section_info_t &sec_info)
  107. {
  108. int ret = SGX_SUCCESS;
  109. uint64_t offset = 0;
  110. sec_info_t sinfo;
  111. memset(&sinfo, 0, sizeof(sinfo));
  112. // Build pages of the section that are contain initialized data. Each page
  113. // needs to be added individually as the page may hold relocation data, in
  114. // which case the page needs to be marked writable.
  115. while(offset < sec_info.raw_data_size)
  116. {
  117. uint64_t rva = sec_info.rva + offset;
  118. uint64_t size = MIN((SE_PAGE_SIZE - PAGE_OFFSET(rva)), (sec_info.raw_data_size - offset));
  119. sinfo.flags = sec_info.flag;
  120. if(is_relocation_page(rva, sec_info.bitmap))
  121. sinfo.flags = sec_info.flag | SI_FLAG_W;
  122. if (size == SE_PAGE_SIZE)
  123. ret = build_pages(rva, size, sec_info.raw_data + offset, sinfo, ADD_EXTEND_PAGE);
  124. else
  125. ret = build_partial_page(rva, size, sec_info.raw_data + offset, sinfo, ADD_EXTEND_PAGE);
  126. if(SGX_SUCCESS != ret)
  127. return ret;
  128. // only the first time that rva may be not page aligned
  129. offset += SE_PAGE_SIZE - PAGE_OFFSET(rva);
  130. }
  131. assert(IS_PAGE_ALIGNED(sec_info.rva + offset));
  132. // Add any remaining uninitialized data. We can call build_pages directly
  133. // even if there are partial pages since the source is null, i.e. everything
  134. // is filled with '0'. Uninitialied data cannot be a relocation table, ergo
  135. // there is no need to check the relocation bitmap.
  136. if(sec_info.virtual_size > offset)
  137. {
  138. uint64_t rva = sec_info.rva + offset;
  139. size_t size = (size_t)(ROUND_TO_PAGE(sec_info.virtual_size - offset));
  140. sinfo.flags = sec_info.flag;
  141. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  142. return ret;
  143. }
  144. return SGX_SUCCESS;
  145. }
  146. int CLoader::build_sections(vector<uint8_t> *bitmap)
  147. {
  148. int ret = SGX_SUCCESS;
  149. std::vector<Section*> sections = m_parser.get_sections();
  150. uint64_t max_rva =0;
  151. Section* last_section = NULL;
  152. for(unsigned int i = 0; i < sections.size() ; i++)
  153. {
  154. if((META_DATA_MAKE_VERSION(SGX_1_5_MAJOR_VERSION,SGX_1_5_MINOR_VERSION ) == m_metadata->version) &&
  155. (last_section != NULL) &&
  156. (ROUND_TO_PAGE(last_section->virtual_size() + last_section->get_rva()) < ROUND_TO_PAGE(ROUND_TO_PAGE(last_section->virtual_size()) + last_section->get_rva())) &&
  157. (ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size()) < (sections[i]->get_rva() & (~(SE_PAGE_SIZE - 1)))))
  158. {
  159. size_t size = SE_PAGE_SIZE;
  160. sec_info_t sinfo;
  161. memset(&sinfo, 0, sizeof(sinfo));
  162. sinfo.flags = last_section->get_si_flags();
  163. uint64_t rva = ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size());
  164. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  165. return ret;
  166. }
  167. if(sections[i]->get_rva() > max_rva)
  168. {
  169. max_rva = sections[i]->get_rva();
  170. last_section = sections[i];
  171. }
  172. section_info_t sec_info = { sections[i]->raw_data(), sections[i]->raw_data_size(), sections[i]->get_rva(), sections[i]->virtual_size(), sections[i]->get_si_flags(), bitmap };
  173. if(SGX_SUCCESS != (ret = build_mem_region(sec_info)))
  174. return ret;
  175. }
  176. if((META_DATA_MAKE_VERSION(SGX_1_5_MAJOR_VERSION,SGX_1_5_MINOR_VERSION ) == m_metadata->version) &&
  177. (last_section != NULL) &&
  178. (ROUND_TO_PAGE(last_section->virtual_size() + last_section->get_rva()) < ROUND_TO_PAGE(ROUND_TO_PAGE(last_section->virtual_size()) + last_section->get_rva())))
  179. {
  180. size_t size = SE_PAGE_SIZE;
  181. sec_info_t sinfo;
  182. memset(&sinfo, 0, sizeof(sinfo));
  183. sinfo.flags = last_section->get_si_flags();
  184. uint64_t rva = ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size());
  185. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  186. return ret;
  187. }
  188. return SGX_SUCCESS;
  189. }
  190. int CLoader::build_partial_page(const uint64_t rva, const uint64_t size, const void *source, const sec_info_t &sinfo, const uint32_t attr)
  191. {
  192. // RVA may or may not be aligned.
  193. uint64_t offset = PAGE_OFFSET(rva);
  194. // Initialize the page with '0', this serves as both the padding at the start
  195. // of the page (if it's not aligned) as well as the fill for any unitilized
  196. // bytes at the end of the page, e.g. .bss data.
  197. uint8_t page_data[SE_PAGE_SIZE];
  198. memset(page_data, 0, SE_PAGE_SIZE);
  199. // The amount of raw data may be less than the number of bytes on the page,
  200. // but that portion of page_data has already been filled (see above).
  201. memcpy_s(&page_data[offset], (size_t)(SE_PAGE_SIZE - offset), source, (size_t)size);
  202. // Add the page, trimming the start address to make it page aligned.
  203. return build_pages(TRIM_TO_PAGE(rva), SE_PAGE_SIZE, page_data, sinfo, attr);
  204. }
  205. int CLoader::build_pages(const uint64_t start_rva, const uint64_t size, const void *source, const sec_info_t &sinfo, const uint32_t attr)
  206. {
  207. int ret = SGX_SUCCESS;
  208. uint64_t offset = 0;
  209. uint64_t rva = start_rva;
  210. assert(IS_PAGE_ALIGNED(start_rva) && IS_PAGE_ALIGNED(size));
  211. while(offset < size)
  212. {
  213. //call driver to add page;
  214. if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, GET_PTR(void, source, 0), rva, sinfo, attr)))
  215. {
  216. //if add page failed , we should remove enclave somewhere;
  217. return ret;
  218. }
  219. offset += SE_PAGE_SIZE;
  220. rva += SE_PAGE_SIZE;
  221. }
  222. return SGX_SUCCESS;
  223. }
  224. int CLoader::build_context(const uint64_t start_rva, layout_entry_t *layout)
  225. {
  226. int ret = SGX_ERROR_UNEXPECTED;
  227. uint8_t added_page[SE_PAGE_SIZE];
  228. sec_info_t sinfo;
  229. memset(&sinfo, 0, sizeof(sinfo));
  230. uint64_t rva = start_rva + layout->rva;
  231. assert(IS_PAGE_ALIGNED(rva));
  232. if (layout->content_offset)
  233. {
  234. // assume TCS is only 1 page
  235. if(layout->si_flags == SI_FLAGS_TCS)
  236. {
  237. memset(added_page, 0, SE_PAGE_SIZE);
  238. memcpy_s(added_page, SE_PAGE_SIZE, GET_PTR(uint8_t, m_metadata, layout->content_offset), layout->content_size);
  239. tcs_t *ptcs = reinterpret_cast<tcs_t*>(added_page);
  240. ptcs->ossa += rva;
  241. ptcs->ofs_base += rva;
  242. ptcs->ogs_base += rva;
  243. m_tcs_list.push_back(GET_PTR(tcs_t, m_start_addr, rva));
  244. sinfo.flags = layout->si_flags;
  245. if(SGX_SUCCESS != (ret = build_pages(rva, (uint64_t)layout->page_count << SE_PAGE_SHIFT, added_page, sinfo, layout->attributes)))
  246. {
  247. return ret;
  248. }
  249. }
  250. else // guard page should not have content_offset != 0
  251. {
  252. section_info_t sec_info = {GET_PTR(uint8_t, m_metadata, layout->content_offset), layout->content_size, rva, (uint64_t)layout->page_count << SE_PAGE_SHIFT, layout->si_flags, NULL};
  253. if(SGX_SUCCESS != (ret = build_mem_region(sec_info)))
  254. {
  255. return ret;
  256. }
  257. }
  258. }
  259. else if (layout->si_flags != SI_FLAG_NONE)
  260. {
  261. sinfo.flags = layout->si_flags;
  262. void *source = NULL;
  263. if(layout->content_size)
  264. {
  265. for(uint32_t *p = (uint32_t *)added_page; p < GET_PTR(uint32_t, added_page, SE_PAGE_SIZE); p++)
  266. {
  267. *p = layout->content_size;
  268. }
  269. source = added_page;
  270. }
  271. if(SGX_SUCCESS != (ret = build_pages(rva, (uint64_t)layout->page_count << SE_PAGE_SHIFT, source, sinfo, layout->attributes)))
  272. {
  273. return ret;
  274. }
  275. }
  276. return SGX_SUCCESS;
  277. }
  278. int CLoader::build_contexts(layout_t *layout_start, layout_t *layout_end, uint64_t delta)
  279. {
  280. int ret = SGX_ERROR_UNEXPECTED;
  281. for(layout_t *layout = layout_start; layout < layout_end; layout++)
  282. {
  283. if (!IS_GROUP_ID(layout->group.id))
  284. {
  285. if(SGX_SUCCESS != (ret = build_context(delta, &layout->entry)))
  286. {
  287. return ret;
  288. }
  289. }
  290. else
  291. {
  292. uint64_t step = 0;
  293. for(uint32_t j = 0; j < layout->group.load_times; j++)
  294. {
  295. step += layout->group.load_step;
  296. if(SGX_SUCCESS != (ret = build_contexts(&layout[-layout->group.entry_count], layout, step)))
  297. {
  298. return ret;
  299. }
  300. }
  301. }
  302. }
  303. return SGX_SUCCESS;
  304. }
  305. int CLoader::build_secs(sgx_attributes_t * const secs_attr, sgx_misc_attribute_t * const misc_attr)
  306. {
  307. memset(&m_secs, 0, sizeof(secs_t)); //should set resvered field of secs as 0.
  308. //create secs structure.
  309. m_secs.base = 0; //base is allocated by driver. set it as 0
  310. m_secs.size = m_metadata->enclave_size;
  311. m_secs.misc_select = misc_attr->misc_select;
  312. memcpy_s(&m_secs.attributes, sizeof(m_secs.attributes), secs_attr, sizeof(m_secs.attributes));
  313. m_secs.ssa_frame_size = m_metadata->ssa_frame_size;
  314. EnclaveCreator *enclave_creator = get_enclave_creator();
  315. if(NULL == enclave_creator)
  316. return SGX_ERROR_UNEXPECTED;
  317. int ret = enclave_creator->create_enclave(&m_secs, &m_enclave_id, &m_start_addr, is_ae(&m_metadata->enclave_css));
  318. if(SGX_SUCCESS == ret)
  319. {
  320. SE_TRACE(SE_TRACE_NOTICE, "enclave start address = %p, size = %x\n", m_start_addr, m_metadata->enclave_size);
  321. }
  322. return ret;
  323. }
  324. int CLoader::build_image(SGXLaunchToken * const lc, sgx_attributes_t * const secs_attr, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t * const misc_attr)
  325. {
  326. int ret = SGX_SUCCESS;
  327. if(SGX_SUCCESS != (ret = build_secs(secs_attr, misc_attr)))
  328. {
  329. SE_TRACE(SE_TRACE_WARNING, "build secs failed\n");
  330. return ret;
  331. };
  332. // read reloc bitmap before patch the enclave file
  333. // If load_enclave_ex try to load the enclave for the 2nd time,
  334. // the enclave image is already patched, and parser cannot read the information.
  335. // For linux, there's no map conflict. We assume load_enclave_ex will not do the retry.
  336. vector<uint8_t> bitmap;
  337. if(!m_parser.get_reloc_bitmap(bitmap))
  338. return SGX_ERROR_INVALID_ENCLAVE;
  339. // patch enclave file
  340. patch_entry_t *patch_start = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset);
  341. patch_entry_t *patch_end = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset + m_metadata->dirs[DIR_PATCH].size);
  342. for(patch_entry_t *patch = patch_start; patch < patch_end; patch++)
  343. {
  344. memcpy_s(GET_PTR(void, m_parser.get_start_addr(), patch->dst), patch->size, GET_PTR(void, m_metadata, patch->src), patch->size);
  345. }
  346. //build sections, copy export function table as well;
  347. if(SGX_SUCCESS != (ret = build_sections(&bitmap)))
  348. {
  349. SE_TRACE(SE_TRACE_WARNING, "build sections failed\n");
  350. goto fail;
  351. }
  352. // build heap/thread context
  353. if (SGX_SUCCESS != (ret = build_contexts(GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset),
  354. GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size),
  355. 0)))
  356. {
  357. SE_TRACE(SE_TRACE_WARNING, "build heap/thread context failed\n");
  358. goto fail;
  359. }
  360. //initialize Enclave
  361. ret = get_enclave_creator()->init_enclave(ENCLAVE_ID_IOCTL, const_cast<enclave_css_t *>(&m_metadata->enclave_css), lc, prd_css_file);
  362. if(SGX_SUCCESS != ret)
  363. {
  364. SE_TRACE(SE_TRACE_WARNING, "init_enclave failed\n");
  365. goto fail;
  366. }
  367. return SGX_SUCCESS;
  368. fail:
  369. get_enclave_creator()->destroy_enclave(ENCLAVE_ID_IOCTL, m_secs.size);
  370. return ret;
  371. }
  372. bool CLoader::is_metadata_buffer(uint32_t offset, uint32_t size)
  373. {
  374. if((offsetof(metadata_t, data) > offset) || (offset >= m_metadata->size))
  375. {
  376. return false;
  377. }
  378. uint32_t end = offset + size;
  379. if ((end < offset) || (end < size) || (end > m_metadata->size))
  380. {
  381. return false;
  382. }
  383. return true;
  384. }
  385. bool CLoader::is_enclave_buffer(uint64_t offset, uint64_t size)
  386. {
  387. if(offset >= m_metadata->enclave_size)
  388. {
  389. return false;
  390. }
  391. uint64_t end = offset + size;
  392. if ((end < offset) || (end < size) || (end > m_metadata->enclave_size))
  393. {
  394. return false;
  395. }
  396. return true;
  397. }
  398. int CLoader::validate_layout_table()
  399. {
  400. layout_t *layout_start = GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset);
  401. layout_t *layout_end = GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size);
  402. vector<pair<uint64_t, uint64_t>> rva_vector;
  403. for (layout_t *layout = layout_start; layout < layout_end; layout++)
  404. {
  405. if(!IS_GROUP_ID(layout->entry.id)) // layout entry
  406. {
  407. rva_vector.push_back(make_pair(layout->entry.rva, (uint64_t)layout->entry.page_count << SE_PAGE_SHIFT));
  408. if(layout->entry.content_offset)
  409. {
  410. if(false == is_metadata_buffer(layout->entry.content_offset, layout->entry.content_size))
  411. {
  412. return SGX_ERROR_INVALID_METADATA;
  413. }
  414. }
  415. }
  416. else // layout group
  417. {
  418. if (layout->group.entry_count > (uint32_t)(PTR_DIFF(layout, layout_start)/sizeof(layout_t)))
  419. {
  420. return SGX_ERROR_INVALID_METADATA;
  421. }
  422. uint64_t load_step = 0;
  423. for(uint32_t i = 0; i < layout->group.load_times; i++)
  424. {
  425. load_step += layout->group.load_step;
  426. if(load_step > m_metadata->enclave_size)
  427. {
  428. return SGX_ERROR_INVALID_METADATA;
  429. }
  430. for(layout_entry_t *entry = &layout[-layout->group.entry_count].entry; entry < &layout->entry; entry++)
  431. {
  432. if(IS_GROUP_ID(entry->id))
  433. {
  434. return SGX_ERROR_INVALID_METADATA;
  435. }
  436. rva_vector.push_back(make_pair(entry->rva + load_step, (uint64_t)entry->page_count << SE_PAGE_SHIFT));
  437. // no need to check integer overflow for entry->rva + load_step, because
  438. // entry->rva and load_step are less than enclave_size, whose size is no more than 37 bit
  439. }
  440. }
  441. }
  442. }
  443. sort(rva_vector.begin(), rva_vector.end());
  444. for (vector<pair<uint64_t, uint64_t>>::iterator it = rva_vector.begin(); it != rva_vector.end(); it++)
  445. {
  446. if(!IS_PAGE_ALIGNED(it->first))
  447. {
  448. return SGX_ERROR_INVALID_METADATA;
  449. }
  450. if(false == is_enclave_buffer(it->first, it->second))
  451. {
  452. return SGX_ERROR_INVALID_METADATA;
  453. }
  454. if((it+1) != rva_vector.end())
  455. {
  456. if((it->first+it->second) > (it+1)->first)
  457. {
  458. return SGX_ERROR_INVALID_METADATA;
  459. }
  460. }
  461. }
  462. return SGX_SUCCESS;
  463. }
  464. int CLoader::validate_patch_table()
  465. {
  466. patch_entry_t *patch_start = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset);
  467. patch_entry_t *patch_end = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset + m_metadata->dirs[DIR_PATCH].size);
  468. for(patch_entry_t *patch = patch_start; patch < patch_end; patch++)
  469. {
  470. if(false == is_metadata_buffer(patch->src, patch->size))
  471. {
  472. return SGX_ERROR_INVALID_METADATA;
  473. }
  474. if(false == is_enclave_buffer(patch->dst, patch->size))
  475. {
  476. return SGX_ERROR_INVALID_METADATA;
  477. }
  478. }
  479. return SGX_SUCCESS;
  480. }
  481. int CLoader::validate_metadata()
  482. {
  483. if(!m_metadata)
  484. return SGX_ERROR_INVALID_METADATA;
  485. uint64_t version2 = META_DATA_MAKE_VERSION(MAJOR_VERSION,MINOR_VERSION );
  486. uint64_t version1 = META_DATA_MAKE_VERSION(SGX_1_5_MAJOR_VERSION,SGX_1_5_MINOR_VERSION );
  487. //if the version of metadata does NOT match the version of metadata in urts, we should NOT launch enclave.
  488. if((m_metadata->version != version1) && (m_metadata->version != version2))
  489. {
  490. SE_TRACE(SE_TRACE_WARNING, "Mismatch between the metadata urts required and the metadata in use.\n");
  491. return SGX_ERROR_INVALID_VERSION;
  492. }
  493. if(m_metadata->size > sizeof(metadata_t))
  494. {
  495. return SGX_ERROR_INVALID_METADATA;
  496. }
  497. if(m_metadata->tcs_policy > TCS_POLICY_UNBIND)
  498. return SGX_ERROR_INVALID_METADATA;
  499. if(m_metadata->ssa_frame_size < SSA_FRAME_SIZE_MIN || m_metadata->ssa_frame_size > SSA_FRAME_SIZE_MAX)
  500. return SGX_ERROR_INVALID_METADATA;
  501. uint64_t size = m_metadata->enclave_size;
  502. if(size > m_parser.get_enclave_max_size())
  503. {
  504. return SGX_ERROR_INVALID_METADATA;
  505. }
  506. while ((size != 0) && ((size & 1) != 1))
  507. {
  508. size = size >> 1;
  509. }
  510. if(size != 1)
  511. {
  512. return SGX_ERROR_INVALID_METADATA;
  513. }
  514. // check dirs
  515. for(uint32_t i = 0; i < DIR_NUM; i++)
  516. {
  517. if(false == is_metadata_buffer(m_metadata->dirs[i].offset, m_metadata->dirs[i].size))
  518. {
  519. return SGX_ERROR_INVALID_METADATA;
  520. }
  521. }
  522. // check layout table
  523. int status = validate_layout_table();
  524. if(SGX_SUCCESS != status)
  525. {
  526. return status;
  527. }
  528. // check patch table
  529. status = validate_patch_table();
  530. if(SGX_SUCCESS != status)
  531. {
  532. return status;
  533. }
  534. return SGX_SUCCESS;
  535. }
  536. bool CLoader::is_ae(const enclave_css_t *enclave_css)
  537. {
  538. assert(NULL != enclave_css);
  539. if(INTEL_VENDOR_ID == enclave_css->header.module_vendor
  540. && AE_PRODUCT_ID == enclave_css->body.isv_prod_id)
  541. return true;
  542. return false;
  543. }
  544. int CLoader::load_enclave(SGXLaunchToken *lc, int debug, const metadata_t *metadata, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t *misc_attr)
  545. {
  546. int ret = SGX_SUCCESS;
  547. sgx_misc_attribute_t sgx_misc_attr;
  548. memset(&sgx_misc_attr, 0, sizeof(sgx_misc_attribute_t));
  549. m_metadata = metadata;
  550. ret = validate_metadata();
  551. if(SGX_SUCCESS != ret)
  552. {
  553. SE_TRACE(SE_TRACE_ERROR, "The metadata setting is not correct\n");
  554. return ret;
  555. }
  556. ret = get_enclave_creator()->get_misc_attr(&sgx_misc_attr, const_cast<metadata_t *>(m_metadata), lc, debug);
  557. if(SGX_SUCCESS != ret)
  558. {
  559. return ret;
  560. }
  561. ret = build_image(lc, &sgx_misc_attr.secs_attr, prd_css_file, &sgx_misc_attr);
  562. // Update misc_attr with secs.attr upon success.
  563. if(SGX_SUCCESS == ret)
  564. {
  565. if(misc_attr)
  566. {
  567. memcpy_s(misc_attr, sizeof(sgx_misc_attribute_t), &sgx_misc_attr, sizeof(sgx_misc_attribute_t));
  568. //When run here EINIT success, so SGX_FLAGS_INITTED should be set by ucode. uRTS align it with EINIT instruction.
  569. misc_attr->secs_attr.flags |= SGX_FLAGS_INITTED;
  570. }
  571. }
  572. return ret;
  573. }
  574. int CLoader::load_enclave_ex(SGXLaunchToken *lc, bool debug, const metadata_t *metadata, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t *misc_attr)
  575. {
  576. unsigned int ret = SGX_SUCCESS, map_conflict_count = 3;
  577. bool retry = true;
  578. while (retry)
  579. {
  580. ret = this->load_enclave(lc, debug, metadata, prd_css_file, misc_attr);
  581. switch(ret)
  582. {
  583. //If CreateEnclave failed due to power transition, we retry it.
  584. case SGX_ERROR_ENCLAVE_LOST: //caused by loading enclave while power transition occurs
  585. break;
  586. //If memroy map conflict occurs, we only retry 3 times.
  587. case SGX_ERROR_MEMORY_MAP_CONFLICT:
  588. if(0 == map_conflict_count)
  589. retry = false;
  590. else
  591. map_conflict_count--;
  592. break;
  593. //We don't re-load enclave due to other error code.
  594. default:
  595. retry = false;
  596. break;
  597. }
  598. }
  599. return ret;
  600. }
  601. int CLoader::destroy_enclave()
  602. {
  603. return get_enclave_creator()->destroy_enclave(ENCLAVE_ID_IOCTL, m_secs.size);
  604. }
  605. int CLoader::set_memory_protection()
  606. {
  607. uint64_t rva = 0;
  608. uint64_t len = 0;
  609. uint64_t last_section_end = 0;
  610. unsigned int i = 0;
  611. int ret = 0;
  612. //for sections
  613. std::vector<Section*> sections = m_parser.get_sections();
  614. for(i = 0; i < sections.size() ; i++)
  615. {
  616. //require the sec_info.rva be page aligned, we need handle the first page.
  617. //the first page;
  618. uint64_t offset = (sections[i]->get_rva() & (SE_PAGE_SIZE -1));
  619. uint64_t size = SE_PAGE_SIZE - offset;
  620. //the raw data may be smaller than the size, we get the min of them
  621. if(sections[i]->raw_data_size() < size)
  622. size = sections[i]->raw_data_size();
  623. len = SE_PAGE_SIZE;
  624. //if there is more pages, then calc the next paged aligned pages
  625. if((sections[i]->virtual_size() + offset) > SE_PAGE_SIZE)
  626. {
  627. uint64_t raw_data_size = sections[i]->raw_data_size() - size;
  628. //we need use (SE_PAGE_SIZE - offset), because (SE_PAGE_SIZE - offset) may larger than size
  629. uint64_t virtual_size = sections[i]->virtual_size() - (SE_PAGE_SIZE - offset);
  630. len += ROUND_TO_PAGE(raw_data_size);
  631. if(ROUND_TO_PAGE(virtual_size) > ROUND_TO_PAGE(raw_data_size))
  632. {
  633. len += ROUND_TO_PAGE(virtual_size) - ROUND_TO_PAGE(raw_data_size);
  634. }
  635. }
  636. rva = TRIM_TO_PAGE(sections[i]->get_rva()) + (uint64_t)m_start_addr;
  637. ret = mprotect((void*)rva, (size_t)len, (int)(sections[i]->get_si_flags()&SI_MASK_MEM_ATTRIBUTE));
  638. if(ret != 0)
  639. {
  640. SE_TRACE(SE_TRACE_WARNING, "section[%d]:mprotect(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%" PRIu64 ") failed\n",
  641. i, rva, len, (sections[i]->get_si_flags()));
  642. return SGX_ERROR_UNEXPECTED;
  643. }
  644. //there is a gap between sections, need to set those to NONE access
  645. if(last_section_end != 0)
  646. {
  647. ret = mprotect((void*)last_section_end, (size_t)(rva - last_section_end), (int)(SI_FLAG_NONE & SI_MASK_MEM_ATTRIBUTE));
  648. if(ret != 0)
  649. {
  650. SE_TRACE(SE_TRACE_WARNING, "set protection for gap before section[%d]:mprotect(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%" PRIu64 ") failed\n",
  651. i, last_section_end, rva - last_section_end, SI_FLAG_NONE);
  652. return SGX_ERROR_UNEXPECTED;
  653. }
  654. }
  655. last_section_end = rva + len;
  656. }
  657. ret = set_context_protection(GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset),
  658. GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size),
  659. 0);
  660. if (SGX_SUCCESS != ret)
  661. {
  662. return ret;
  663. }
  664. return SGX_SUCCESS;
  665. }
  666. int CLoader::set_context_protection(layout_t *layout_start, layout_t *layout_end, uint64_t delta)
  667. {
  668. int ret = SGX_ERROR_UNEXPECTED;
  669. for(layout_t *layout = layout_start; layout < layout_end; layout++)
  670. {
  671. if (!IS_GROUP_ID(layout->group.id))
  672. {
  673. int prot = 0 ;
  674. if(layout->entry.si_flags == SI_FLAG_NONE)
  675. {
  676. prot = SI_FLAG_NONE & SI_MASK_MEM_ATTRIBUTE;
  677. }
  678. else
  679. {
  680. prot = SI_FLAGS_RW & SI_MASK_MEM_ATTRIBUTE;
  681. }
  682. ret = mprotect(GET_PTR(void, m_start_addr, layout->entry.rva + delta),
  683. (size_t)layout->entry.page_count << SE_PAGE_SHIFT,
  684. prot);
  685. if(ret != 0)
  686. {
  687. SE_TRACE(SE_TRACE_WARNING, "mprotect(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%d) failed\n",
  688. (uint64_t)m_start_addr + layout->entry.rva + delta,
  689. (uint64_t)layout->entry.page_count << SE_PAGE_SHIFT,
  690. prot);
  691. return SGX_ERROR_UNEXPECTED;
  692. }
  693. }
  694. else
  695. {
  696. uint64_t step = 0;
  697. for(uint32_t j = 0; j < layout->group.load_times; j++)
  698. {
  699. step += layout->group.load_step;
  700. if(SGX_SUCCESS != (ret = set_context_protection(&layout[-layout->group.entry_count], layout, step)))
  701. {
  702. return ret;
  703. }
  704. }
  705. }
  706. }
  707. return SGX_SUCCESS;
  708. }