loader.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808
  1. /*
  2. * Copyright (C) 2011-2016 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "se_wrapper.h"
  32. #include "se_error_internal.h"
  33. #include "arch.h"
  34. #include "util.h"
  35. #include "loader.h"
  36. #include "se_page_attr.h"
  37. #include "enclave.h"
  38. #include "enclave_creator.h"
  39. #include "routine.h"
  40. #include "sgx_attributes.h"
  41. #include "se_vendor.h"
  42. #include "se_detect.h"
  43. #include "binparser.h"
  44. #include <assert.h>
  45. #include <vector>
  46. #include <algorithm>
  47. #define __STDC_FORMAT_MACROS
  48. #include <inttypes.h>
  49. #include <sys/mman.h>
  50. // enclave creator instance
  51. extern EnclaveCreator* g_enclave_creator;
  52. EnclaveCreator* get_enclave_creator(void)
  53. {
  54. return g_enclave_creator;
  55. }
  56. CLoader::CLoader(uint8_t *mapped_file_base, BinParser &parser)
  57. : m_mapped_file_base(mapped_file_base)
  58. , m_enclave_id(0)
  59. , m_start_addr(NULL)
  60. , m_metadata(NULL)
  61. , m_parser(parser)
  62. {
  63. memset(&m_secs, 0, sizeof(m_secs));
  64. }
  65. CLoader::~CLoader()
  66. {
  67. }
  68. sgx_enclave_id_t CLoader::get_enclave_id() const
  69. {
  70. return m_enclave_id;
  71. }
  72. const void* CLoader::get_start_addr() const
  73. {
  74. return m_start_addr;
  75. }
  76. const std::vector<tcs_t *>& CLoader::get_tcs_list() const
  77. {
  78. return m_tcs_list;
  79. }
  80. const secs_t& CLoader::get_secs() const
  81. {
  82. return m_secs;
  83. }
  84. void* CLoader::get_symbol_address(const char * const symbol)
  85. {
  86. uint64_t rva = m_parser.get_symbol_rva(symbol);
  87. if(0 == rva)
  88. return NULL;
  89. return GET_PTR(void, m_start_addr, rva);
  90. }
  91. int CLoader::build_mem_region(const section_info_t * const sec_info)
  92. {
  93. int ret = SGX_SUCCESS;
  94. uint8_t added_page[SE_PAGE_SIZE];
  95. uint64_t offset = 0;
  96. uint8_t *raw_ptr = NULL;
  97. uint64_t rva = 0;
  98. sec_info_t sinfo;
  99. memset(&sinfo, 0, sizeof(sinfo));
  100. rva = sec_info->rva + offset;
  101. while(offset < TRIM_TO_PAGE(sec_info->raw_data_size))
  102. {
  103. raw_ptr = sec_info->raw_data + offset;
  104. sinfo.flags = sec_info->flag;
  105. //check if the page is writable.
  106. if(sec_info->bitmap && sec_info->bitmap->size())
  107. {
  108. uint64_t page_frame = rva >> SE_PAGE_SHIFT;
  109. //NOTE:
  110. // Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
  111. // In the future, if the max enclave size is extended to beyond (1<<49), this type-casting will not work.
  112. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
  113. if((*sec_info->bitmap)[(size_t)(page_frame / 8)] & (1 << (page_frame % 8)))
  114. sinfo.flags = sec_info->flag | SI_FLAG_W;
  115. }
  116. //call driver API to add page; raw_ptr needn't be page align, driver will handle page align;
  117. if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, raw_ptr, rva, sinfo, ADD_EXTEND_PAGE)))
  118. {
  119. //if add page failed , we should remove enclave somewhere;
  120. return ret;
  121. }
  122. offset += SE_PAGE_SIZE;
  123. rva = sec_info->rva + offset;
  124. }
  125. //add the remaider of last page of raw data
  126. if(!IS_PAGE_ALIGNED(sec_info->raw_data_size))
  127. {
  128. sinfo.flags = sec_info->flag;
  129. //the padding be 0
  130. memset(added_page, 0, SE_PAGE_SIZE);
  131. raw_ptr = sec_info->raw_data + offset;
  132. rva = sec_info->rva + offset;
  133. memcpy_s(added_page, SE_PAGE_SIZE, raw_ptr, sec_info->raw_data_size & (SE_PAGE_SIZE-1));
  134. //check if the page is writable.
  135. if(sec_info->bitmap && sec_info->bitmap->size())
  136. {
  137. uint64_t page_frame = rva >> SE_PAGE_SHIFT;
  138. //NOTE:
  139. // Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
  140. // In the future, if the max enclave size is extended to beyond (1<<49), this type-casting will not work.
  141. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
  142. if((*sec_info->bitmap)[(size_t)(page_frame / 8)] & (1 << (page_frame % 8)))
  143. sinfo.flags = sec_info->flag | SI_FLAG_W;
  144. }
  145. //call driver to add page;
  146. if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, added_page, rva, sinfo, ADD_EXTEND_PAGE)))
  147. {
  148. //if add page failed , we should remove enclave somewhere;
  149. return ret;
  150. }
  151. rva += SE_PAGE_SIZE;
  152. }
  153. //add unintialized page.If the section have no raw data, the offset should be 0.
  154. if(ROUND_TO_PAGE(sec_info->virtual_size) > ROUND_TO_PAGE(sec_info->raw_data_size))
  155. {
  156. size_t size = (size_t)(ROUND_TO_PAGE(sec_info->virtual_size) - ROUND_TO_PAGE(sec_info->raw_data_size));
  157. sinfo.flags = sec_info->flag;
  158. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  159. return ret;
  160. }
  161. return SGX_SUCCESS;
  162. }
  163. int CLoader::build_sections(vector<uint8_t> *bitmap)
  164. {
  165. int ret = SGX_SUCCESS;
  166. std::vector<Section*> sections = m_parser.get_sections();
  167. uint64_t max_rva =0;
  168. Section* last_section = NULL;
  169. for(unsigned int i = 0; i < sections.size() ; i++)
  170. {
  171. if((last_section != NULL) &&
  172. (ROUND_TO_PAGE(last_section->virtual_size() + last_section->get_rva()) < ROUND_TO_PAGE(ROUND_TO_PAGE(last_section->virtual_size()) + last_section->get_rva())) &&
  173. (ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size()) < (sections[i]->get_rva() & (~(SE_PAGE_SIZE - 1)))))
  174. {
  175. size_t size = SE_PAGE_SIZE;
  176. sec_info_t sinfo;
  177. memset(&sinfo, 0, sizeof(sinfo));
  178. sinfo.flags = last_section->get_si_flags();
  179. uint64_t rva = ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size());
  180. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  181. return ret;
  182. }
  183. if(sections[i]->get_rva() > max_rva)
  184. {
  185. max_rva = sections[i]->get_rva();
  186. last_section = sections[i];
  187. }
  188. //since build_mem_region require the sec_info.rva be page aligned, we need handle the first page.
  189. //build the first page;
  190. uint64_t offset = (sections[i]->get_rva() & (SE_PAGE_SIZE -1));
  191. uint64_t size = SE_PAGE_SIZE - offset;
  192. uint8_t first_page[SE_PAGE_SIZE];
  193. //the raw data may be smaller than the size, we get the min of them
  194. if(sections[i]->raw_data_size() < size)
  195. size = sections[i]->raw_data_size();
  196. //the padding is '0'
  197. memset(first_page, 0, SE_PAGE_SIZE);
  198. memcpy_s(&first_page[offset], (size_t)size, sections[i]->raw_data(), (size_t)size);
  199. section_info_t sec_info = { first_page, SE_PAGE_SIZE, sections[i]->get_rva() & (~(SE_PAGE_SIZE - 1)), SE_PAGE_SIZE, sections[i]->get_si_flags(), bitmap };
  200. if(SGX_SUCCESS != (ret = build_mem_region(&sec_info)))
  201. {
  202. return ret;
  203. }
  204. //if there is more pages, then build the next paged aligned pages
  205. if((sections[i]->virtual_size() + offset) > SE_PAGE_SIZE)
  206. {
  207. sec_info.raw_data = GET_PTR(uint8_t, sections[i]->raw_data(), size);
  208. sec_info.raw_data_size = sections[i]->raw_data_size() - size;
  209. sec_info.rva = sections[i]->get_rva() + (SE_PAGE_SIZE - offset);
  210. assert(0 == (sec_info.rva & (SE_PAGE_SIZE - 1)));
  211. //we need use (SE_PAGE_SIZE - offset), because (SE_PAGE_SIZE - offset) may larger than size
  212. sec_info.virtual_size = sections[i]->virtual_size() - (SE_PAGE_SIZE - offset);
  213. sec_info.flag = sections[i]->get_si_flags();
  214. sec_info.bitmap = bitmap;
  215. if(SGX_SUCCESS != (ret = build_mem_region(&sec_info)))
  216. {
  217. return ret;
  218. }
  219. }
  220. }
  221. if((last_section != NULL) &&
  222. (ROUND_TO_PAGE(last_section->virtual_size() + last_section->get_rva()) < ROUND_TO_PAGE(ROUND_TO_PAGE(last_section->virtual_size()) + last_section->get_rva())))
  223. {
  224. size_t size = SE_PAGE_SIZE;
  225. sec_info_t sinfo;
  226. memset(&sinfo, 0, sizeof(sinfo));
  227. sinfo.flags = last_section->get_si_flags();
  228. uint64_t rva = ROUND_TO_PAGE(last_section->get_rva() + last_section->virtual_size());
  229. if(SGX_SUCCESS != (ret = build_pages(rva, size, 0, sinfo, ADD_EXTEND_PAGE)))
  230. return ret;
  231. }
  232. return SGX_SUCCESS;
  233. }
  234. int CLoader::build_pages(const uint64_t start_rva, const uint64_t size, void *source, const sec_info_t &sinfo, const uint32_t attr)
  235. {
  236. int ret = SGX_SUCCESS;
  237. uint64_t offset = 0;
  238. uint64_t rva = start_rva;
  239. while(offset < size)
  240. {
  241. //call driver to add page;
  242. if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, source, rva, sinfo, attr)))
  243. {
  244. //if add page failed , we should remove enclave somewhere;
  245. return ret;
  246. }
  247. offset += SE_PAGE_SIZE;
  248. rva += SE_PAGE_SIZE;
  249. }
  250. return SGX_SUCCESS;
  251. }
  252. int CLoader::build_context(const uint64_t start_rva, layout_entry_t *layout)
  253. {
  254. int ret = SGX_ERROR_UNEXPECTED;
  255. uint8_t added_page[SE_PAGE_SIZE];
  256. sec_info_t sinfo;
  257. memset(&sinfo, 0, sizeof(sinfo));
  258. uint64_t rva = start_rva + layout->rva;
  259. if (layout->content_offset)
  260. {
  261. // assume TCS is only 1 page
  262. if(layout->si_flags == SI_FLAGS_TCS)
  263. {
  264. memset(added_page, 0, SE_PAGE_SIZE);
  265. memcpy_s(added_page, SE_PAGE_SIZE, GET_PTR(uint8_t, m_metadata, layout->content_offset), layout->content_size);
  266. tcs_t *ptcs = reinterpret_cast<tcs_t*>(added_page);
  267. ptcs->ossa += rva;
  268. ptcs->ofs_base += rva;
  269. ptcs->ogs_base += rva;
  270. m_tcs_list.push_back(GET_PTR(tcs_t, m_start_addr, rva));
  271. sinfo.flags = layout->si_flags;
  272. if(SGX_SUCCESS != (ret = build_pages(rva, layout->page_count << SE_PAGE_SHIFT, added_page, sinfo, layout->attributes)))
  273. {
  274. return ret;
  275. }
  276. }
  277. else // guard page should not have content_offset != 0
  278. {
  279. section_info_t sec_info = {GET_PTR(uint8_t, m_metadata, layout->content_offset), layout->content_size, rva, layout->page_count << SE_PAGE_SHIFT, layout->si_flags, NULL};
  280. if(SGX_SUCCESS != (ret = build_mem_region(&sec_info)))
  281. {
  282. return ret;
  283. }
  284. }
  285. }
  286. else if (layout->si_flags != SI_FLAG_NONE)
  287. {
  288. sinfo.flags = layout->si_flags;
  289. void *source = NULL;
  290. if(layout->content_size)
  291. {
  292. for(uint32_t *p = (uint32_t *)added_page; p < GET_PTR(uint32_t, added_page, SE_PAGE_SIZE); p++)
  293. {
  294. *p = layout->content_size;
  295. }
  296. source = added_page;
  297. }
  298. if(SGX_SUCCESS != (ret = build_pages(rva, layout->page_count << SE_PAGE_SHIFT, source, sinfo, layout->attributes)))
  299. {
  300. return ret;
  301. }
  302. }
  303. return SGX_SUCCESS;
  304. }
  305. int CLoader::build_contexts(layout_t *layout_start, layout_t *layout_end, uint64_t delta)
  306. {
  307. int ret = SGX_ERROR_UNEXPECTED;
  308. for(layout_t *layout = layout_start; layout < layout_end; layout++)
  309. {
  310. if (!IS_GROUP_ID(layout->group.id))
  311. {
  312. if(SGX_SUCCESS != (ret = build_context(delta, &layout->entry)))
  313. {
  314. return ret;
  315. }
  316. }
  317. else
  318. {
  319. uint64_t step = 0;
  320. for(uint32_t j = 0; j < layout->group.load_times; j++)
  321. {
  322. step += layout->group.load_step;
  323. if(SGX_SUCCESS != (ret = build_contexts(&layout[-layout->group.entry_count], layout, step)))
  324. {
  325. return ret;
  326. }
  327. }
  328. }
  329. }
  330. return SGX_SUCCESS;
  331. }
  332. int CLoader::build_secs(sgx_attributes_t * const secs_attr, sgx_misc_attribute_t * const misc_attr)
  333. {
  334. memset(&m_secs, 0, sizeof(secs_t)); //should set resvered field of secs as 0.
  335. //create secs structure.
  336. m_secs.base = 0; //base is allocated by driver. set it as 0
  337. m_secs.size = m_metadata->enclave_size;
  338. m_secs.misc_select = misc_attr->misc_select;
  339. memcpy_s(&m_secs.attributes, sizeof(m_secs.attributes), secs_attr, sizeof(m_secs.attributes));
  340. m_secs.ssa_frame_size = m_metadata->ssa_frame_size;
  341. EnclaveCreator *enclave_creator = get_enclave_creator();
  342. if(NULL == enclave_creator)
  343. return SGX_ERROR_UNEXPECTED;
  344. int ret = enclave_creator->create_enclave(&m_secs, &m_enclave_id, &m_start_addr, is_ae(&m_metadata->enclave_css));
  345. if(SGX_SUCCESS == ret)
  346. {
  347. SE_TRACE(SE_TRACE_NOTICE, "enclave start address = %p, size = %x\n", m_start_addr, m_metadata->enclave_size);
  348. }
  349. return ret;
  350. }
  351. int CLoader::build_image(SGXLaunchToken * const lc, sgx_attributes_t * const secs_attr, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t * const misc_attr)
  352. {
  353. int ret = SGX_SUCCESS;
  354. if(SGX_SUCCESS != (ret = build_secs(secs_attr, misc_attr)))
  355. {
  356. SE_TRACE(SE_TRACE_WARNING, "build secs failed\n");
  357. return ret;
  358. };
  359. // read reloc bitmap before patch the enclave file
  360. // If load_enclave_ex try to load the enclave for the 2nd time,
  361. // the enclave image is already patched, and parser cannot read the information.
  362. // For linux, there's no map conflict. We assume load_enclave_ex will not do the retry.
  363. vector<uint8_t> bitmap;
  364. if(!m_parser.get_reloc_bitmap(bitmap))
  365. return SGX_ERROR_INVALID_ENCLAVE;
  366. // patch enclave file
  367. patch_entry_t *patch_start = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset);
  368. patch_entry_t *patch_end = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset + m_metadata->dirs[DIR_PATCH].size);
  369. for(patch_entry_t *patch = patch_start; patch < patch_end; patch++)
  370. {
  371. memcpy_s(GET_PTR(void, m_parser.get_start_addr(), patch->dst), patch->size, GET_PTR(void, m_metadata, patch->src), patch->size);
  372. }
  373. //build sections, copy export function table as well;
  374. if(SGX_SUCCESS != (ret = build_sections(&bitmap)))
  375. {
  376. SE_TRACE(SE_TRACE_WARNING, "build sections failed\n");
  377. goto fail;
  378. }
  379. // build heap/thread context
  380. if (SGX_SUCCESS != (ret = build_contexts(GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset),
  381. GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size),
  382. 0)))
  383. {
  384. SE_TRACE(SE_TRACE_WARNING, "build heap/thread context failed\n");
  385. goto fail;
  386. }
  387. //initialize Enclave
  388. ret = get_enclave_creator()->init_enclave(ENCLAVE_ID_IOCTL, const_cast<enclave_css_t *>(&m_metadata->enclave_css), lc, prd_css_file);
  389. if(SGX_SUCCESS != ret)
  390. {
  391. SE_TRACE(SE_TRACE_WARNING, "init_enclave failed\n");
  392. goto fail;
  393. }
  394. return SGX_SUCCESS;
  395. fail:
  396. get_enclave_creator()->destroy_enclave(ENCLAVE_ID_IOCTL);
  397. return ret;
  398. }
  399. bool CLoader::is_metadata_buffer(uint32_t offset, uint32_t size)
  400. {
  401. if((offsetof(metadata_t, data) > offset) || (offset >= m_metadata->size))
  402. {
  403. return false;
  404. }
  405. uint32_t end = offset + size;
  406. if ((end < offset) || (end < size) || (end > m_metadata->size))
  407. {
  408. return false;
  409. }
  410. return true;
  411. }
  412. bool CLoader::is_enclave_buffer(uint64_t offset, uint64_t size)
  413. {
  414. if(offset >= m_metadata->enclave_size)
  415. {
  416. return false;
  417. }
  418. uint64_t end = offset + size;
  419. if ((end < offset) || (end < size) || (end > m_metadata->enclave_size))
  420. {
  421. return false;
  422. }
  423. return true;
  424. }
  425. int CLoader::validate_layout_table()
  426. {
  427. layout_t *layout_start = GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset);
  428. layout_t *layout_end = GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size);
  429. vector<pair<uint64_t, uint64_t>> rva_vector;
  430. for (layout_t *layout = layout_start; layout < layout_end; layout++)
  431. {
  432. if(!IS_GROUP_ID(layout->entry.id)) // layout entry
  433. {
  434. rva_vector.push_back(make_pair(layout->entry.rva, layout->entry.page_count << SE_PAGE_SHIFT));
  435. if(layout->entry.content_offset)
  436. {
  437. if(false == is_metadata_buffer(layout->entry.content_offset, layout->entry.content_size))
  438. {
  439. return SGX_ERROR_INVALID_METADATA;
  440. }
  441. }
  442. }
  443. else // layout group
  444. {
  445. if (layout->group.entry_count > (uint32_t)(PTR_DIFF(layout, layout_start)/sizeof(layout_t)))
  446. {
  447. return SGX_ERROR_INVALID_METADATA;
  448. }
  449. uint64_t load_step = 0;
  450. for(uint32_t i = 0; i < layout->group.load_times; i++)
  451. {
  452. load_step += layout->group.load_step;
  453. if(load_step > m_metadata->enclave_size)
  454. {
  455. return SGX_ERROR_INVALID_METADATA;
  456. }
  457. for(layout_entry_t *entry = &layout[-layout->group.entry_count].entry; entry < &layout->entry; entry++)
  458. {
  459. if(IS_GROUP_ID(entry->id))
  460. {
  461. return SGX_ERROR_INVALID_METADATA;
  462. }
  463. rva_vector.push_back(make_pair(entry->rva + load_step, entry->page_count << SE_PAGE_SHIFT));
  464. // no need to check integer overflow for entry->rva + load_step, because
  465. // entry->rva and load_step are less than enclave_size, whose size is no more than 37 bit
  466. }
  467. }
  468. }
  469. }
  470. sort(rva_vector.begin(), rva_vector.end());
  471. for (vector<pair<uint64_t, uint64_t>>::iterator it = rva_vector.begin(); it != rva_vector.end(); it++)
  472. {
  473. if(!IS_PAGE_ALIGNED(it->first))
  474. {
  475. return SGX_ERROR_INVALID_METADATA;
  476. }
  477. if(false == is_enclave_buffer(it->first, it->second))
  478. {
  479. return SGX_ERROR_INVALID_METADATA;
  480. }
  481. if((it+1) != rva_vector.end())
  482. {
  483. if((it->first+it->second) > (it+1)->first)
  484. {
  485. return SGX_ERROR_INVALID_METADATA;
  486. }
  487. }
  488. }
  489. return SGX_SUCCESS;
  490. }
  491. int CLoader::validate_patch_table()
  492. {
  493. patch_entry_t *patch_start = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset);
  494. patch_entry_t *patch_end = GET_PTR(patch_entry_t, m_metadata, m_metadata->dirs[DIR_PATCH].offset + m_metadata->dirs[DIR_PATCH].size);
  495. for(patch_entry_t *patch = patch_start; patch < patch_end; patch++)
  496. {
  497. if(false == is_metadata_buffer(patch->src, patch->size))
  498. {
  499. return SGX_ERROR_INVALID_METADATA;
  500. }
  501. if(false == is_enclave_buffer(patch->dst, patch->size))
  502. {
  503. return SGX_ERROR_INVALID_METADATA;
  504. }
  505. }
  506. return SGX_SUCCESS;
  507. }
  508. int CLoader::validate_metadata()
  509. {
  510. if(!m_metadata)
  511. return SGX_ERROR_INVALID_METADATA;
  512. uint64_t version = META_DATA_MAKE_VERSION(MAJOR_VERSION,MINOR_VERSION );
  513. //if the version of metadata does NOT match the version of metadata in urts, we should NOT launch enclave.
  514. if(m_metadata->version != version)
  515. {
  516. SE_TRACE(SE_TRACE_WARNING, "Mismatch between the metadata urts required and the metadata in use.\n");
  517. return SGX_ERROR_INVALID_VERSION;
  518. }
  519. if(m_metadata->size > sizeof(metadata_t))
  520. {
  521. return SGX_ERROR_INVALID_METADATA;
  522. }
  523. if(m_metadata->tcs_policy > TCS_POLICY_UNBIND)
  524. return SGX_ERROR_INVALID_METADATA;
  525. if(m_metadata->ssa_frame_size < SSA_FRAME_SIZE_MIN || m_metadata->ssa_frame_size > SSA_FRAME_SIZE_MAX)
  526. return SGX_ERROR_INVALID_METADATA;
  527. uint64_t size = m_metadata->enclave_size;
  528. if(size > m_parser.get_enclave_max_size())
  529. {
  530. return SGX_ERROR_INVALID_METADATA;
  531. }
  532. while ((size != 0) && ((size & 1) != 1))
  533. {
  534. size = size >> 1;
  535. }
  536. if(size != 1)
  537. {
  538. return SGX_ERROR_INVALID_METADATA;
  539. }
  540. // check dirs
  541. for(uint32_t i = 0; i < DIR_NUM; i++)
  542. {
  543. if(false == is_metadata_buffer(m_metadata->dirs[i].offset, m_metadata->dirs[i].size))
  544. {
  545. return SGX_ERROR_INVALID_METADATA;
  546. }
  547. }
  548. // check layout table
  549. int status = validate_layout_table();
  550. if(SGX_SUCCESS != status)
  551. {
  552. return status;
  553. }
  554. // check patch table
  555. status = validate_patch_table();
  556. if(SGX_SUCCESS != status)
  557. {
  558. return status;
  559. }
  560. return SGX_SUCCESS;
  561. }
  562. bool CLoader::is_ae(const enclave_css_t *enclave_css)
  563. {
  564. assert(NULL != enclave_css);
  565. if(INTEL_VENDOR_ID == enclave_css->header.module_vendor
  566. && AE_PRODUCT_ID == enclave_css->body.isv_prod_id)
  567. return true;
  568. return false;
  569. }
  570. int CLoader::load_enclave(SGXLaunchToken *lc, int debug, const metadata_t *metadata, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t *misc_attr)
  571. {
  572. int ret = SGX_SUCCESS;
  573. sgx_misc_attribute_t sgx_misc_attr;
  574. memset(&sgx_misc_attr, 0, sizeof(sgx_misc_attribute_t));
  575. m_metadata = metadata;
  576. ret = validate_metadata();
  577. if(SGX_SUCCESS != ret)
  578. {
  579. SE_TRACE(SE_TRACE_ERROR, "The metadata setting is not correct\n");
  580. return ret;
  581. }
  582. ret = get_enclave_creator()->get_misc_attr(&sgx_misc_attr, const_cast<metadata_t *>(m_metadata), lc, debug);
  583. if(SGX_SUCCESS != ret)
  584. {
  585. return ret;
  586. }
  587. ret = build_image(lc, &sgx_misc_attr.secs_attr, prd_css_file, &sgx_misc_attr);
  588. //Update misc_attr with secs.attr upon success.
  589. if(SGX_SUCCESS == ret)
  590. {
  591. if(misc_attr)
  592. {
  593. memcpy_s(misc_attr, sizeof(sgx_misc_attribute_t), &sgx_misc_attr, sizeof(sgx_misc_attribute_t));
  594. //When run here EINIT success, so SGX_FLAGS_INITTED should be set by ucode. uRTS align it with EINIT instruction.
  595. misc_attr->secs_attr.flags |= SGX_FLAGS_INITTED;
  596. }
  597. }
  598. return ret;
  599. }
  600. int CLoader::load_enclave_ex(SGXLaunchToken *lc, bool debug, const metadata_t *metadata, le_prd_css_file_t *prd_css_file, sgx_misc_attribute_t *misc_attr)
  601. {
  602. unsigned int ret = SGX_SUCCESS, map_conflict_count = 3;
  603. bool retry = true;
  604. while (retry)
  605. {
  606. ret = this->load_enclave(lc, debug, metadata, prd_css_file, misc_attr);
  607. switch(ret)
  608. {
  609. //If CreateEnclave failed due to power transition, we retry it.
  610. case SGX_ERROR_ENCLAVE_LOST: //caused by loading enclave while power transition occurs
  611. break;
  612. //If memroy map conflict occurs, we only retry 3 times.
  613. case SGX_ERROR_MEMORY_MAP_CONFLICT:
  614. if(0 == map_conflict_count)
  615. retry = false;
  616. else
  617. map_conflict_count--;
  618. break;
  619. //We don't re-load enclave due to other error code.
  620. default:
  621. retry = false;
  622. break;
  623. }
  624. }
  625. return ret;
  626. }
  627. int CLoader::destroy_enclave()
  628. {
  629. return get_enclave_creator()->destroy_enclave(ENCLAVE_ID_IOCTL);
  630. }
  631. int CLoader::set_memory_protection()
  632. {
  633. uint64_t rva = 0;
  634. uint64_t len = 0;
  635. uint64_t last_section_end = 0;
  636. unsigned int i = 0;
  637. int ret = 0;
  638. //for sections
  639. std::vector<Section*> sections = m_parser.get_sections();
  640. for(i = 0; i < sections.size() ; i++)
  641. {
  642. //require the sec_info.rva be page aligned, we need handle the first page.
  643. //the first page;
  644. uint64_t offset = (sections[i]->get_rva() & (SE_PAGE_SIZE -1));
  645. uint64_t size = SE_PAGE_SIZE - offset;
  646. //the raw data may be smaller than the size, we get the min of them
  647. if(sections[i]->raw_data_size() < size)
  648. size = sections[i]->raw_data_size();
  649. len = SE_PAGE_SIZE;
  650. //if there is more pages, then calc the next paged aligned pages
  651. if((sections[i]->virtual_size() + offset) > SE_PAGE_SIZE)
  652. {
  653. uint64_t raw_data_size = sections[i]->raw_data_size() - size;
  654. //we need use (SE_PAGE_SIZE - offset), because (SE_PAGE_SIZE - offset) may larger than size
  655. uint64_t virtual_size = sections[i]->virtual_size() - (SE_PAGE_SIZE - offset);
  656. len += ROUND_TO_PAGE(raw_data_size);
  657. if(ROUND_TO_PAGE(virtual_size) > ROUND_TO_PAGE(raw_data_size))
  658. {
  659. len += ROUND_TO_PAGE(virtual_size) - ROUND_TO_PAGE(raw_data_size);
  660. }
  661. }
  662. rva = TRIM_TO_PAGE(sections[i]->get_rva()) + (uint64_t)m_start_addr;
  663. ret = mprotect((void*)rva, (size_t)len, (int)(sections[i]->get_si_flags()&SI_MASK_MEM_ATTRIBUTE));
  664. if(ret != 0)
  665. {
  666. SE_TRACE(SE_TRACE_WARNING, "section[%d]:mprotect(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%" PRIu64 ") failed\n",
  667. i, rva, len, (sections[i]->get_si_flags()));
  668. return SGX_ERROR_UNEXPECTED;
  669. }
  670. //there is a gap between sections, need to set those to NONE access
  671. if(last_section_end != 0)
  672. {
  673. ret = mprotect((void*)last_section_end, (size_t)(rva - last_section_end), (int)(SI_FLAG_NONE & SI_MASK_MEM_ATTRIBUTE));
  674. if(ret != 0)
  675. {
  676. SE_TRACE(SE_TRACE_WARNING, "set protection for gap before section[%d]:mprotect(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%" PRIu64 ") failed\n",
  677. i, last_section_end, rva - last_section_end, SI_FLAG_NONE);
  678. return SGX_ERROR_UNEXPECTED;
  679. }
  680. }
  681. last_section_end = rva + len;
  682. }
  683. ret = set_context_protection(GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset),
  684. GET_PTR(layout_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size),
  685. 0);
  686. if (SGX_SUCCESS != ret)
  687. {
  688. return ret;
  689. }
  690. return SGX_SUCCESS;
  691. }
  692. int CLoader::set_context_protection(layout_t *layout_start, layout_t *layout_end, uint64_t delta)
  693. {
  694. int ret = SGX_ERROR_UNEXPECTED;
  695. for(layout_t *layout = layout_start; layout < layout_end; layout++)
  696. {
  697. if (!IS_GROUP_ID(layout->group.id))
  698. {
  699. int prot = 0 ;
  700. if(layout->entry.attributes == SI_FLAG_NONE)
  701. {
  702. prot = SI_FLAG_NONE & SI_MASK_MEM_ATTRIBUTE;
  703. }
  704. else
  705. {
  706. prot = SI_FLAGS_RW & SI_MASK_MEM_ATTRIBUTE;
  707. }
  708. ret = mprotect(GET_PTR(void, m_start_addr, layout->entry.rva + delta),
  709. (size_t)(layout->entry.page_count << SE_PAGE_SHIFT),
  710. prot);
  711. if(ret != 0)
  712. {
  713. SE_TRACE(SE_TRACE_WARNING, "mprotect(rva=%" PRIu64 ", len=%" PRIu64 ", flags=%d) failed\n",
  714. (uint64_t)m_start_addr + layout->entry.rva + delta,
  715. (uint64_t)(layout->entry.page_count << SE_PAGE_SHIFT),
  716. prot);
  717. return SGX_ERROR_UNEXPECTED;
  718. }
  719. }
  720. else
  721. {
  722. uint64_t step = 0;
  723. for(uint32_t j = 0; j < layout->group.load_times; j++)
  724. {
  725. step += layout->group.load_step;
  726. if(SGX_SUCCESS != (ret = set_context_protection(&layout[-layout->group.entry_count], layout, step)))
  727. {
  728. return ret;
  729. }
  730. }
  731. }
  732. }
  733. return SGX_SUCCESS;
  734. }