loader.cpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "se_wrapper.h"
  32. #include "se_error_internal.h"
  33. #include "arch.h"
  34. #include "util.h"
  35. #include "loader.h"
  36. #include "se_page_attr.h"
  37. #include "enclave.h"
  38. #include "enclave_creator.h"
  39. #include "routine.h"
  40. #include "sgx_attributes.h"
  41. #include "se_vendor.h"
  42. #include "se_detect.h"
  43. #include "binparser.h"
  44. #include <assert.h>
  45. #include <vector>
  46. #include <tuple>
  47. #include <algorithm>
  48. #define __STDC_FORMAT_MACROS
  49. #include <inttypes.h>
  50. #include <sys/mman.h>
  51. // enclave creator instance
  52. extern EnclaveCreator* g_enclave_creator;
  53. EnclaveCreator* get_enclave_creator(void)
  54. {
  55. return g_enclave_creator;
  56. }
  57. CLoader::CLoader(uint8_t *mapped_file_base, BinParser &parser)
  58. : m_mapped_file_base(mapped_file_base)
  59. , m_enclave_id(0)
  60. , m_start_addr(NULL)
  61. , m_metadata(NULL)
  62. , m_parser(parser)
  63. {
  64. memset(&m_secs, 0, sizeof(m_secs));
  65. }
  66. CLoader::~CLoader()
  67. {
  68. }
  69. sgx_enclave_id_t CLoader::get_enclave_id() const
  70. {
  71. return m_enclave_id;
  72. }
  73. const void* CLoader::get_start_addr() const
  74. {
  75. return m_start_addr;
  76. }
  77. const std::vector<std::pair<tcs_t *, bool>>& CLoader::get_tcs_list() const
  78. {
  79. return m_tcs_list;
  80. }
  81. const secs_t& CLoader::get_secs() const
  82. {
  83. return m_secs;
  84. }
  85. void* CLoader::get_symbol_address(const char * const symbol)
  86. {
  87. uint64_t rva = m_parser.get_symbol_rva(symbol);
  88. if(0 == rva)
  89. return NULL;
  90. return GET_PTR(void, m_start_addr, rva);
  91. }
  92. // is_relocation_page returns true if the specified RVA is a writable relocation page based on the bitmap.
  93. bool CLoader::is_relocation_page(const uint64_t rva, vector<uint8_t> *bitmap)
  94. {
  95. uint64_t page_frame = rva >> SE_PAGE_SHIFT;
  96. //NOTE:
  97. // Current enclave size is not beyond 128G, so the type-casting from (uint64>>15) to (size_t) is OK.
  98. // In the future, if the max enclave size is extended to beyond (1<<49), this type-casting will not work.
  99. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
  100. size_t index = (size_t)(page_frame / 8);
  101. if(bitmap && (index < bitmap->size()))
  102. {
  103. return ((*bitmap)[index] & (1 << (page_frame % 8)));
  104. }
  105. return false;
  106. }
  107. int CLoader::build_mem_region(const section_info_t &sec_info)
  108. {
  109. int ret = SGX_SUCCESS;
  110. uint64_t offset = 0;
  111. sec_info_t sinfo;
  112. memset(&sinfo, 0, sizeof(sinfo));
  113. // Build pages of the section that are contain initialized data. Each page
  114. // needs to be added individually as the page may hold relocation data, in
  115. // which case the page needs to be marked writable.
  116. while(offset < sec_info.raw_data_size)
  117. {
  118. uint64_t rva = sec_info.rva + offset;
  119. uint64_t size = MIN((SE_PAGE_SIZE - PAGE_OFFSET(rva)), (sec_info.raw_data_size - offset));
  120. sinfo.flags = sec_info.flag;
  121. if(is_relocation_page(rva, sec_info.bitmap) && !(sec_info.flag & SI_FLAG_W))
  122. {
  123. sinfo.flags = sec_info.flag | SI_FLAG_W;
  124. assert(g_enclave_creator != NULL);
  125. if(g_enclave_creator->use_se_hw() == true)
  126. {
  127. ret = mprotect((void*)(TRIM_TO_PAGE(rva) + (uint64_t)m_start_addr), SE_PAGE_SIZE,
  128. (int)(sinfo.flags & SI_MASK_MEM_ATTRIBUTE));
  129. if(ret != 0)
  130. {
  131. SE_TRACE(SE_TRACE_WARNING, "mprotect(rva=0x%llx, len=%d, flags=%d) failed\n",
  132. rva, SE_PAGE_SIZE, int(sinfo.flags & SI_MASK_MEM_ATTRIBUTE));
  133. return SGX_ERROR_UNEXPECTED;
  134. }
  135. }
  136. }
  137. if (size == SE_PAGE_SIZE)
  138. ret = build_pages(rva, size, sec_info.raw_data + offset, sinfo, ADD_EXTEND_PAGE);
  139. else
  140. ret = build_partial_page(rva, size, sec_info.raw_data + offset, sinfo, ADD_EXTEND_PAGE);
  141. if(SGX_SUCCESS != ret)
  142. return ret;
  143. // only the first time that rva may be not page aligned
  144. offset += SE_PAGE_SIZE - PAGE_OFFSET(rva);
  145. }
  146. assert(IS_PAGE_ALIGNED(sec_info.rva + offset));
  147. // Add any remaining uninitialized data. We can call build_pages directly
  148. // even if there are partial pages since the source is null, i.e. everything
  149. // is filled with '0'. Uninitialied data cannot be a relocation table, ergo
  150. // there is no need to check the relocation bitmap.
  151. if(sec_info.virtual_size > offset)
  152. {
  153. uint64_t rva = sec_info.rva + offset;
  154. size_t size = (size_t)(ROUND_TO_PAGE(sec_info.virtual_size - offset));
  155. sinfo.flags = sec_info.flag;
  156. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  157. return ret;
  158. }
  159. return SGX_SUCCESS;
  160. }
  161. int CLoader::build_sections(vector<uint8_t> *bitmap)
  162. {
  163. int ret = SGX_SUCCESS;
  164. std::vector<Section*> sections = m_parser.get_sections();
  165. uint64_t max_rva =0;
  166. Section* last_section = NULL;
  167. for(unsigned int i = 0; i < sections.size() ; i++)
  168. {
  169. if((META_DATA_MAKE_VERSION(SGX_1_5_MAJOR_VERSION,SGX_1_5_MINOR_VERSION ) == m_metadata->version) &&
  170. (last_section != NULL) &&
  171. (ROUND_TO_PAGE(last_section->virtual_size() + last_section->get_rva()) < ROUND_TO_PAGE(ROUND_TO_PAGE(last_section->virtual_size()) + last_section->get_rva())) &&
  172. (ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size()) < (sections[i]->get_rva() & (~(SE_PAGE_SIZE - 1)))))
  173. {
  174. size_t size = SE_PAGE_SIZE;
  175. sec_info_t sinfo;
  176. memset(&sinfo, 0, sizeof(sinfo));
  177. sinfo.flags = last_section->get_si_flags();
  178. uint64_t rva = ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size());
  179. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  180. return ret;
  181. }
  182. if(sections[i]->get_rva() > max_rva)
  183. {
  184. max_rva = sections[i]->get_rva();
  185. last_section = sections[i];
  186. }
  187. section_info_t sec_info = { sections[i]->raw_data(), sections[i]->raw_data_size(), sections[i]->get_rva(), sections[i]->virtual_size(), sections[i]->get_si_flags(), bitmap };
  188. if(SGX_SUCCESS != (ret = build_mem_region(sec_info)))
  189. return ret;
  190. }
  191. if((META_DATA_MAKE_VERSION(SGX_1_5_MAJOR_VERSION,SGX_1_5_MINOR_VERSION ) == m_metadata->version) &&
  192. (last_section != NULL) &&
  193. (ROUND_TO_PAGE(last_section->virtual_size() + last_section->get_rva()) < ROUND_TO_PAGE(ROUND_TO_PAGE(last_section->virtual_size()) + last_section->get_rva())))
  194. {
  195. size_t size = SE_PAGE_SIZE;
  196. sec_info_t sinfo;
  197. memset(&sinfo, 0, sizeof(sinfo));
  198. sinfo.flags = last_section->get_si_flags();
  199. uint64_t rva = ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size());
  200. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  201. return ret;
  202. }
  203. return SGX_SUCCESS;
  204. }
  205. int CLoader::build_partial_page(const uint64_t rva, const uint64_t size, const void *source, const sec_info_t &sinfo, const uint32_t attr)
  206. {
  207. // RVA may or may not be aligned.
  208. uint64_t offset = PAGE_OFFSET(rva);
  209. // Initialize the page with '0', this serves as both the padding at the start
  210. // of the page (if it's not aligned) as well as the fill for any unitilized
  211. // bytes at the end of the page, e.g. .bss data.
  212. uint8_t page_data[SE_PAGE_SIZE];
  213. memset(page_data, 0, SE_PAGE_SIZE);
  214. // The amount of raw data may be less than the number of bytes on the page,
  215. // but that portion of page_data has already been filled (see above).
  216. memcpy_s(&page_data[offset], (size_t)(SE_PAGE_SIZE - offset), source, (size_t)size);
  217. // Add the page, trimming the start address to make it page aligned.
  218. return build_pages(TRIM_TO_PAGE(rva), SE_PAGE_SIZE, page_data, sinfo, attr);
  219. }
  220. int CLoader::build_pages(const uint64_t start_rva, const uint64_t size, const void *source, const sec_info_t &sinfo, const uint32_t attr)
  221. {
  222. int ret = SGX_SUCCESS;
  223. uint64_t offset = 0;
  224. uint64_t rva = start_rva;
  225. assert(IS_PAGE_ALIGNED(start_rva) && IS_PAGE_ALIGNED(size));
  226. while(offset < size)
  227. {
  228. //call driver to add page;
  229. if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, GET_PTR(void, source, 0), rva, sinfo, attr)))
  230. {
  231. //if add page failed , we should remove enclave somewhere;
  232. return ret;
  233. }
  234. offset += SE_PAGE_SIZE;
  235. rva += SE_PAGE_SIZE;
  236. }
  237. return SGX_SUCCESS;
  238. }
  239. int CLoader::post_init_action(layout_t *layout_start, layout_t *layout_end, uint64_t delta)
  240. {
  241. int ret = SGX_SUCCESS;
  242. for(layout_t *layout = layout_start; layout < layout_end; layout++)
  243. {
  244. if (!IS_GROUP_ID(layout->group.id) && (layout->entry.attributes & PAGE_ATTR_POST_REMOVE))
  245. {
  246. uint64_t start_addr = layout->entry.rva + delta + (uint64_t)get_start_addr();
  247. uint64_t page_count = (uint64_t)layout->entry.page_count;
  248. if (SGX_SUCCESS != (ret = get_enclave_creator()->trim_range(start_addr, start_addr + (page_count << SE_PAGE_SHIFT))))
  249. return ret;
  250. }
  251. else if (IS_GROUP_ID(layout->group.id))
  252. {
  253. uint64_t step = 0;
  254. for(uint32_t j = 0; j < layout->group.load_times; j++)
  255. {
  256. step += layout->group.load_step;
  257. if(SGX_SUCCESS != (ret = post_init_action(&layout[-layout->group.entry_count], layout, step)))
  258. return ret;
  259. }
  260. }
  261. }
  262. return SGX_SUCCESS;
  263. }
  264. int CLoader::post_init_action_commit(layout_t *layout_start, layout_t *layout_end, uint64_t delta)
  265. {
  266. int ret = SGX_SUCCESS;
  267. for(layout_t *layout = layout_start; layout < layout_end; layout++)
  268. {
  269. if (!IS_GROUP_ID(layout->group.id) && (layout->entry.attributes & PAGE_ATTR_POST_REMOVE))
  270. {
  271. uint64_t start_addr = layout->entry.rva + delta + (uint64_t)get_start_addr();
  272. uint64_t page_count = (uint64_t)layout->entry.page_count;
  273. for (uint64_t i = 0; i < page_count; i++)
  274. {
  275. if (SGX_SUCCESS != (ret = get_enclave_creator()->trim_accept(start_addr + (i << SE_PAGE_SHIFT))))
  276. return ret;
  277. }
  278. }
  279. else if (IS_GROUP_ID(layout->group.id))
  280. {
  281. uint64_t step = 0;
  282. for(uint32_t j = 0; j < layout->group.load_times; j++)
  283. {
  284. step += layout->group.load_step;
  285. if(SGX_SUCCESS != (ret = post_init_action_commit(&layout[-layout->group.entry_count], layout, step)))
  286. return ret;
  287. }
  288. }
  289. }
  290. return SGX_SUCCESS;
  291. }
  292. int CLoader::build_context(const uint64_t start_rva, layout_entry_t *layout)
  293. {
  294. int ret = SGX_ERROR_UNEXPECTED;
  295. uint8_t added_page[SE_PAGE_SIZE];
  296. sec_info_t sinfo;
  297. memset(added_page, 0, SE_PAGE_SIZE);
  298. memset(&sinfo, 0, sizeof(sinfo));
  299. uint64_t rva = start_rva + layout->rva;
  300. //uint64_t start_addr = (uint64_t)get_start_addr();
  301. assert(IS_PAGE_ALIGNED(rva));
  302. if (layout->attributes & PAGE_ATTR_EADD)
  303. {
  304. uint16_t attributes = layout->attributes;
  305. #ifdef SE_SIM
  306. attributes = attributes & (uint16_t)(~PAGE_ATTR_EREMOVE);
  307. #endif
  308. if (layout->content_offset)
  309. {
  310. if(layout->si_flags == SI_FLAGS_TCS)
  311. {
  312. memset(added_page, 0, SE_PAGE_SIZE);
  313. memcpy_s(added_page, SE_PAGE_SIZE, GET_PTR(uint8_t, m_metadata, layout->content_offset), layout->content_size);
  314. tcs_t *ptcs = reinterpret_cast<tcs_t*>(added_page);
  315. ptcs->ossa += rva;
  316. ptcs->ofs_base += rva;
  317. ptcs->ogs_base += rva;
  318. if(!(attributes & PAGE_ATTR_EREMOVE))
  319. {
  320. m_tcs_list.push_back(make_pair(GET_PTR(tcs_t, m_start_addr, rva), false));
  321. }
  322. sinfo.flags = layout->si_flags;
  323. if(SGX_SUCCESS != (ret = build_pages(rva, ((uint64_t)layout->page_count) << SE_PAGE_SHIFT, added_page, sinfo, attributes)))
  324. {
  325. return ret;
  326. }
  327. }
  328. else // guard page should not have content_offset != 0
  329. {
  330. section_info_t sec_info = {GET_PTR(uint8_t, m_metadata, layout->content_offset), layout->content_size, rva, ((uint64_t)layout->page_count) << SE_PAGE_SHIFT, layout->si_flags, NULL};
  331. if(SGX_SUCCESS != (ret = build_mem_region(sec_info)))
  332. {
  333. return ret;
  334. }
  335. }
  336. }
  337. else if (layout->si_flags != SI_FLAG_NONE)
  338. {
  339. sinfo.flags = layout->si_flags;
  340. void *source = NULL;
  341. if(layout->content_size)
  342. {
  343. for(uint32_t *p = (uint32_t *)added_page; p < GET_PTR(uint32_t, added_page, SE_PAGE_SIZE); p++)
  344. {
  345. *p = layout->content_size;
  346. }
  347. source = added_page;
  348. }
  349. if(SGX_SUCCESS != (ret = build_pages(rva, ((uint64_t)layout->page_count) << SE_PAGE_SHIFT, source, sinfo, layout->attributes)))
  350. {
  351. return ret;
  352. }
  353. }
  354. }
  355. if(layout->attributes & PAGE_ATTR_POST_ADD)
  356. {
  357. #ifndef SE_SIM
  358. if(layout->id == LAYOUT_ID_TCS_DYN)
  359. {
  360. m_tcs_list.push_back(make_pair(GET_PTR(tcs_t, m_start_addr, rva), true));
  361. }
  362. #endif
  363. }
  364. return SGX_SUCCESS;
  365. }
  366. int CLoader::build_contexts(layout_t *layout_start, layout_t *layout_end, uint64_t delta)
  367. {
  368. int ret = SGX_ERROR_UNEXPECTED;
  369. for(layout_t *layout = layout_start; layout < layout_end; layout++)
  370. {
  371. if (!IS_GROUP_ID(layout->group.id))
  372. {
  373. if(SGX_SUCCESS != (ret = build_context(delta, &layout->entry)))
  374. {
  375. return ret;
  376. }
  377. }
  378. else
  379. {
  380. uint64_t step = 0;
  381. for(uint32_t j = 0; j < layout->group.load_times; j++)
  382. {
  383. step += layout->group.load_step;
  384. if(SGX_SUCCESS != (ret = build_contexts(&layout[-layout->group.entry_count], layout, step)))
  385. {
  386. return ret;
  387. }
  388. }
  389. }
  390. }
  391. return SGX_SUCCESS;
  392. }
  393. int CLoader::build_secs(sgx_attributes_t * const secs_attr, sgx_misc_attribute_t * const misc_attr)
  394. {
  395. memset(&m_secs, 0, sizeof(secs_t)); //should set resvered field of secs as 0.
  396. //create secs structure.
  397. m_secs.base = 0; //base is allocated by driver. set it as 0
  398. m_secs.size = m_metadata->enclave_size;
  399. m_secs.misc_select = misc_attr->misc_select;
  400. memcpy_s(&m_secs.attributes, sizeof(m_secs.attributes), secs_attr, sizeof(m_secs.attributes));
  401. m_secs.ssa_frame_size = m_metadata->ssa_frame_size;
  402. EnclaveCreator *enclave_creator = get_enclave_creator();
  403. if(NULL == enclave_creator)
  404. return SGX_ERROR_UNEXPECTED;
  405. int ret = enclave_creator->create_enclave(&m_secs, &m_enclave_id, &m_start_addr, is_ae(&m_metadata->enclave_css));
  406. if(SGX_SUCCESS == ret)
  407. {
  408. SE_TRACE(SE_TRACE_NOTICE, "enclave start address = %p, size = 0x%llx\n", m_start_addr, m_metadata->enclave_size);
  409. if(enclave_creator->use_se_hw() == true)
  410. {
  411. set_memory_protection(false);
  412. }
  413. }
  414. return ret;
  415. }
  416. int CLoader::build_image(SGXLaunchToken * const lc, sgx_attributes_t * const secs_attr, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t * const misc_attr)
  417. {
  418. int ret = SGX_SUCCESS;
  419. if(SGX_SUCCESS != (ret = build_secs(secs_attr, misc_attr)))
  420. {
  421. SE_TRACE(SE_TRACE_WARNING, "build secs failed\n");
  422. return ret;
  423. };
  424. // read reloc bitmap before patch the enclave file
  425. // If load_enclave_ex try to load the enclave for the 2nd time,
  426. // the enclave image is already patched, and parser cannot read the information.
  427. // For linux, there's no map conflict. We assume load_enclave_ex will not do the retry.
  428. vector<uint8_t> bitmap;
  429. if(!m_parser.get_reloc_bitmap(bitmap))
  430. return SGX_ERROR_INVALID_ENCLAVE;
  431. // patch enclave file
  432. patch_entry_t *patch_start = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset);
  433. patch_entry_t *patch_end = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset + m_metadata->dirs[DIR_PATCH].size);
  434. for(patch_entry_t *patch = patch_start; patch < patch_end; patch++)
  435. {
  436. memcpy_s(GET_PTR(void, m_parser.get_start_addr(), patch->dst), patch->size, GET_PTR(void, m_metadata, patch->src), patch->size);
  437. }
  438. //build sections, copy export function table as well;
  439. if(SGX_SUCCESS != (ret = build_sections(&bitmap)))
  440. {
  441. SE_TRACE(SE_TRACE_WARNING, "build sections failed\n");
  442. goto fail;
  443. }
  444. // build heap/thread context
  445. if (SGX_SUCCESS != (ret = build_contexts(GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset),
  446. GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size),
  447. 0)))
  448. {
  449. SE_TRACE(SE_TRACE_WARNING, "build heap/thread context failed\n");
  450. goto fail;
  451. }
  452. //initialize Enclave
  453. ret = get_enclave_creator()->init_enclave(ENCLAVE_ID_IOCTL, const_cast<enclave_css_t *>(&m_metadata->enclave_css), lc, prd_css_file);
  454. if(SGX_SUCCESS != ret)
  455. {
  456. SE_TRACE(SE_TRACE_WARNING, "init_enclave failed\n");
  457. goto fail;
  458. }
  459. return SGX_SUCCESS;
  460. fail:
  461. get_enclave_creator()->destroy_enclave(ENCLAVE_ID_IOCTL, m_secs.size);
  462. return ret;
  463. }
  464. bool CLoader::is_metadata_buffer(uint32_t offset, uint32_t size)
  465. {
  466. if((offsetof(metadata_t, data) > offset) || (offset >= m_metadata->size))
  467. {
  468. return false;
  469. }
  470. uint32_t end = offset + size;
  471. if ((end < offset) || (end < size) || (end > m_metadata->size))
  472. {
  473. return false;
  474. }
  475. return true;
  476. }
  477. bool CLoader::is_enclave_buffer(uint64_t offset, uint64_t size)
  478. {
  479. if(offset >= m_metadata->enclave_size)
  480. {
  481. return false;
  482. }
  483. uint64_t end = offset + size;
  484. if ((end < offset) || (end < size) || (end > m_metadata->enclave_size))
  485. {
  486. return false;
  487. }
  488. return true;
  489. }
  490. int CLoader::validate_layout_table()
  491. {
  492. layout_t *layout_start = GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset);
  493. layout_t *layout_end = GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size);
  494. vector<pair<uint64_t, uint64_t>> rva_vector;
  495. for (layout_t *layout = layout_start; layout < layout_end; layout++)
  496. {
  497. if(!IS_GROUP_ID(layout->entry.id)) // layout entry
  498. {
  499. rva_vector.push_back(make_pair(layout->entry.rva, ((uint64_t)layout->entry.page_count) << SE_PAGE_SHIFT));
  500. if(layout->entry.content_offset)
  501. {
  502. if(false == is_metadata_buffer(layout->entry.content_offset, layout->entry.content_size))
  503. {
  504. return SGX_ERROR_INVALID_METADATA;
  505. }
  506. }
  507. }
  508. else // layout group
  509. {
  510. if (layout->group.entry_count > (uint32_t)(PTR_DIFF(layout, layout_start)/sizeof(layout_t)))
  511. {
  512. return SGX_ERROR_INVALID_METADATA;
  513. }
  514. uint64_t load_step = 0;
  515. for(uint32_t i = 0; i < layout->group.load_times; i++)
  516. {
  517. load_step += layout->group.load_step;
  518. if(load_step > m_metadata->enclave_size)
  519. {
  520. return SGX_ERROR_INVALID_METADATA;
  521. }
  522. for(layout_entry_t *entry = &layout[-layout->group.entry_count].entry; entry < &layout->entry; entry++)
  523. {
  524. if(IS_GROUP_ID(entry->id))
  525. {
  526. return SGX_ERROR_INVALID_METADATA;
  527. }
  528. rva_vector.push_back(make_pair(entry->rva + load_step, ((uint64_t)entry->page_count) << SE_PAGE_SHIFT));
  529. // no need to check integer overflow for entry->rva + load_step, because
  530. // entry->rva and load_step are less than enclave_size, whose size is no more than 37 bit
  531. }
  532. }
  533. }
  534. }
  535. sort(rva_vector.begin(), rva_vector.end());
  536. for (vector<pair<uint64_t, uint64_t>>::iterator it = rva_vector.begin(); it != rva_vector.end(); it++)
  537. {
  538. if(!IS_PAGE_ALIGNED(it->first))
  539. {
  540. return SGX_ERROR_INVALID_METADATA;
  541. }
  542. if(false == is_enclave_buffer(it->first, it->second))
  543. {
  544. return SGX_ERROR_INVALID_METADATA;
  545. }
  546. if((it+1) != rva_vector.end())
  547. {
  548. if((it->first+it->second) > (it+1)->first)
  549. {
  550. return SGX_ERROR_INVALID_METADATA;
  551. }
  552. }
  553. }
  554. return SGX_SUCCESS;
  555. }
  556. int CLoader::validate_patch_table()
  557. {
  558. patch_entry_t *patch_start = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset);
  559. patch_entry_t *patch_end = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset + m_metadata->dirs[DIR_PATCH].size);
  560. for(patch_entry_t *patch = patch_start; patch < patch_end; patch++)
  561. {
  562. if(false == is_metadata_buffer(patch->src, patch->size))
  563. {
  564. return SGX_ERROR_INVALID_METADATA;
  565. }
  566. if(false == is_enclave_buffer(patch->dst, patch->size))
  567. {
  568. return SGX_ERROR_INVALID_METADATA;
  569. }
  570. }
  571. return SGX_SUCCESS;
  572. }
  573. int CLoader::validate_metadata()
  574. {
  575. if(!m_metadata)
  576. return SGX_ERROR_INVALID_METADATA;
  577. uint64_t urts_version = META_DATA_MAKE_VERSION(MAJOR_VERSION,MINOR_VERSION);
  578. //if the version of metadata does NOT match the version of metadata in urts, we should NOT launch enclave.
  579. if(MAJOR_VERSION_OF_METADATA(urts_version) < MAJOR_VERSION_OF_METADATA(m_metadata->version))
  580. {
  581. SE_TRACE(SE_TRACE_WARNING, "Mismatch between the metadata urts required and the metadata in use.\n");
  582. return SGX_ERROR_INVALID_VERSION;
  583. }
  584. if(m_metadata->tcs_policy > TCS_POLICY_UNBIND)
  585. return SGX_ERROR_INVALID_METADATA;
  586. if(m_metadata->ssa_frame_size < SSA_FRAME_SIZE_MIN || m_metadata->ssa_frame_size > SSA_FRAME_SIZE_MAX)
  587. return SGX_ERROR_INVALID_METADATA;
  588. uint64_t size = m_metadata->enclave_size;
  589. if(size > m_parser.get_enclave_max_size())
  590. {
  591. return SGX_ERROR_INVALID_METADATA;
  592. }
  593. while ((size != 0) && ((size & 1) != 1))
  594. {
  595. size = size >> 1;
  596. }
  597. if(size != 1)
  598. {
  599. return SGX_ERROR_INVALID_METADATA;
  600. }
  601. // check dirs
  602. for(uint32_t i = 0; i < DIR_NUM; i++)
  603. {
  604. if(false == is_metadata_buffer(m_metadata->dirs[i].offset, m_metadata->dirs[i].size))
  605. {
  606. return SGX_ERROR_INVALID_METADATA;
  607. }
  608. }
  609. // check layout table
  610. int status = validate_layout_table();
  611. if(SGX_SUCCESS != status)
  612. {
  613. return status;
  614. }
  615. // check patch table
  616. status = validate_patch_table();
  617. if(SGX_SUCCESS != status)
  618. {
  619. return status;
  620. }
  621. return SGX_SUCCESS;
  622. }
  623. bool CLoader::is_ae(const enclave_css_t *enclave_css)
  624. {
  625. assert(NULL != enclave_css);
  626. if(INTEL_VENDOR_ID == enclave_css->header.module_vendor
  627. && AE_PRODUCT_ID == enclave_css->body.isv_prod_id)
  628. return true;
  629. return false;
  630. }
  631. int CLoader::load_enclave(SGXLaunchToken *lc, int debug, const metadata_t *metadata, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t *misc_attr)
  632. {
  633. int ret = SGX_SUCCESS;
  634. sgx_misc_attribute_t sgx_misc_attr;
  635. memset(&sgx_misc_attr, 0, sizeof(sgx_misc_attribute_t));
  636. m_metadata = metadata;
  637. ret = validate_metadata();
  638. if(SGX_SUCCESS != ret)
  639. {
  640. SE_TRACE(SE_TRACE_ERROR, "The metadata setting is not correct\n");
  641. return ret;
  642. }
  643. ret = get_enclave_creator()->get_misc_attr(&sgx_misc_attr, const_cast<metadata_t *>(m_metadata), lc, debug);
  644. if(SGX_SUCCESS != ret)
  645. {
  646. return ret;
  647. }
  648. ret = build_image(lc, &sgx_misc_attr.secs_attr, prd_css_file, &sgx_misc_attr);
  649. // Update misc_attr with secs.attr upon success.
  650. if(SGX_SUCCESS == ret)
  651. {
  652. if(misc_attr)
  653. {
  654. memcpy_s(misc_attr, sizeof(sgx_misc_attribute_t), &sgx_misc_attr, sizeof(sgx_misc_attribute_t));
  655. //When run here EINIT success, so SGX_FLAGS_INITTED should be set by ucode. uRTS align it with EINIT instruction.
  656. misc_attr->secs_attr.flags |= SGX_FLAGS_INITTED;
  657. }
  658. }
  659. return ret;
  660. }
  661. int CLoader::load_enclave_ex(SGXLaunchToken *lc, bool debug, const metadata_t *metadata, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t *misc_attr)
  662. {
  663. unsigned int ret = SGX_SUCCESS, map_conflict_count = 3;
  664. bool retry = true;
  665. while (retry)
  666. {
  667. ret = this->load_enclave(lc, debug, metadata, prd_css_file, misc_attr);
  668. switch(ret)
  669. {
  670. //If CreateEnclave failed due to power transition, we retry it.
  671. case SGX_ERROR_ENCLAVE_LOST: //caused by loading enclave while power transition occurs
  672. break;
  673. //If memroy map conflict occurs, we only retry 3 times.
  674. case SGX_ERROR_MEMORY_MAP_CONFLICT:
  675. if(0 == map_conflict_count)
  676. retry = false;
  677. else
  678. map_conflict_count--;
  679. break;
  680. //We don't re-load enclave due to other error code.
  681. default:
  682. retry = false;
  683. break;
  684. }
  685. }
  686. return ret;
  687. }
  688. int CLoader::destroy_enclave()
  689. {
  690. return get_enclave_creator()->destroy_enclave(ENCLAVE_ID_IOCTL, m_secs.size);
  691. }
  692. int CLoader::set_memory_protection(bool is_after_initialization)
  693. {
  694. int ret = 0;
  695. //set memory protection for segments
  696. if(m_parser.set_memory_protection((uint64_t)m_start_addr, is_after_initialization) != true)
  697. {
  698. return SGX_ERROR_UNEXPECTED;
  699. }
  700. if (is_after_initialization &&
  701. (META_DATA_MAKE_VERSION(MAJOR_VERSION,MINOR_VERSION) <= m_metadata->version) &&
  702. get_enclave_creator()->is_EDMM_supported(get_enclave_id()))
  703. {
  704. std::vector<std::tuple<uint64_t, uint64_t, uint32_t>> pages_to_protect;
  705. m_parser.get_pages_to_protect((uint64_t)m_start_addr, pages_to_protect);
  706. for (auto page : pages_to_protect)
  707. { uint64_t start = 0, len = 0;
  708. uint32_t perm = 0;
  709. std::tie(start, len, perm) = page;
  710. ret = get_enclave_creator()->emodpr(start, len, (uint64_t)perm);
  711. if (ret != SGX_SUCCESS)
  712. return SGX_ERROR_UNEXPECTED;
  713. }
  714. }
  715. //set memory protection for context
  716. ret = set_context_protection(GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset),
  717. GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size),
  718. 0);
  719. if (SGX_SUCCESS != ret)
  720. {
  721. return ret;
  722. }
  723. return SGX_SUCCESS;
  724. }
  725. int CLoader::set_context_protection(layout_t *layout_start, layout_t *layout_end, uint64_t delta)
  726. {
  727. int ret = SGX_ERROR_UNEXPECTED;
  728. for(layout_t *layout = layout_start; layout < layout_end; layout++)
  729. {
  730. if (!IS_GROUP_ID(layout->group.id))
  731. {
  732. int prot = 0 ;
  733. if(layout->entry.si_flags == SI_FLAG_NONE)
  734. {
  735. prot = SI_FLAG_NONE & SI_MASK_MEM_ATTRIBUTE;
  736. }
  737. else
  738. {
  739. prot = SI_FLAGS_RW & SI_MASK_MEM_ATTRIBUTE;
  740. #ifndef SE_SIM
  741. //when a page is eremoved when loading, we should set this page to none access.
  742. //if this page is accessed, a sigbus exception will be raised.
  743. uint16_t attributes = layout->entry.attributes;
  744. if(attributes & PAGE_ATTR_EADD && attributes & PAGE_ATTR_EREMOVE)
  745. {
  746. if(attributes & PAGE_ATTR_EREMOVE)
  747. {
  748. prot = SI_FLAG_NONE & SI_MASK_MEM_ATTRIBUTE;
  749. }
  750. }
  751. #endif
  752. }
  753. ret = mprotect(GET_PTR(void, m_start_addr, layout->entry.rva + delta),
  754. (size_t)layout->entry.page_count << SE_PAGE_SHIFT,
  755. prot);
  756. if(ret != 0)
  757. {
  758. SE_TRACE(SE_TRACE_WARNING, "mprotect(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%d) failed\n",
  759. (uint64_t)m_start_addr + layout->entry.rva + delta,
  760. (uint64_t)layout->entry.page_count << SE_PAGE_SHIFT,
  761. prot);
  762. return SGX_ERROR_UNEXPECTED;
  763. }
  764. }
  765. else
  766. {
  767. uint64_t step = 0;
  768. for(uint32_t j = 0; j < layout->group.load_times; j++)
  769. {
  770. step += layout->group.load_step;
  771. if(SGX_SUCCESS != (ret = set_context_protection(&layout[-layout->group.entry_count], layout, step)))
  772. {
  773. return ret;
  774. }
  775. }
  776. }
  777. }
  778. return SGX_SUCCESS;
  779. }