manage_metadata.cpp 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. * Copyright (C) 2011-2017 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. /**
  32. * File:
  33. * manage_metadata.cpp
  34. * Description:
  35. * Parse the xml file to get the metadata and generate the output DLL
  36. * with metadata.
  37. */
  38. #include "metadata.h"
  39. #include "tinyxml2.h"
  40. #include "manage_metadata.h"
  41. #include "se_trace.h"
  42. #include "util_st.h"
  43. #include "section.h"
  44. #include "se_page_attr.h"
  45. #include "elf_util.h"
  46. #include <stdio.h>
  47. #include <stdlib.h>
  48. #include <errno.h>
  49. #include <assert.h>
  50. #include <iostream>
  51. using namespace tinyxml2;
  52. #define ALIGN_SIZE 0x1000
  53. static bool traverser_parameter(const char *temp_name, const char *temp_text, xml_parameter_t *parameter, int parameter_count)
  54. {
  55. assert(temp_name != NULL && parameter != NULL);
  56. uint64_t temp_value=0;
  57. if(temp_text == NULL)
  58. {
  59. se_trace(SE_TRACE_ERROR, LACK_VALUE_FOR_ELEMENT_ERROR, temp_name);
  60. return false;
  61. }
  62. else
  63. {
  64. if(strchr(temp_text, '-'))
  65. {
  66. se_trace(SE_TRACE_ERROR, INVALID_VALUE_FOR_ELEMENT_ERROR, temp_name);
  67. return false;
  68. }
  69. errno = 0;
  70. char* endptr = NULL;
  71. temp_value = (uint64_t)strtoull(temp_text, &endptr, 0);
  72. if(*endptr!='\0'||errno!=0) //Invalid value or valid value but out of the representable range
  73. {
  74. se_trace(SE_TRACE_ERROR, INVALID_VALUE_FOR_ELEMENT_ERROR, temp_name);
  75. return false;
  76. }
  77. }
  78. //Look for the matched one
  79. int i=0;
  80. for(; i<parameter_count&&STRCMP(temp_name,parameter[i].name); i++);
  81. if(i>=parameter_count) //no matched, return false
  82. {
  83. se_trace(SE_TRACE_ERROR, UNREC_ELEMENT_ERROR, temp_name);
  84. return false;
  85. }
  86. //found one matched
  87. if(parameter[i].flag==1) //repeated definition of XML element, return false
  88. {
  89. se_trace(SE_TRACE_ERROR, REPEATED_DEFINE_ERROR, temp_name);
  90. return false;
  91. }
  92. parameter[i].flag = 1;
  93. if((temp_value<parameter[i].min_value)||
  94. (temp_value>parameter[i].max_value)) // the value is invalid, return false
  95. {
  96. se_trace(SE_TRACE_ERROR, VALUE_OUT_OF_RANGE_ERROR, temp_name);
  97. return false;
  98. }
  99. parameter[i].value = temp_value;
  100. return true;
  101. }
  102. bool parse_metadata_file(const char *xmlpath, xml_parameter_t *parameter, int parameter_count)
  103. {
  104. const char* temp_name=NULL;
  105. assert(parameter != NULL);
  106. if(xmlpath == NULL) // user didn't define the metadata xml file.
  107. {
  108. se_trace(SE_TRACE_NOTICE, "Use default metadata...\n");
  109. return true;
  110. }
  111. //use the metadata file that user gives us. parse xml file
  112. tinyxml2::XMLDocument doc;
  113. XMLError loadOkay = doc.LoadFile(xmlpath);
  114. if(loadOkay != XML_SUCCESS)
  115. {
  116. if(doc.ErrorID() == XML_ERROR_FILE_COULD_NOT_BE_OPENED)
  117. {
  118. se_trace(SE_TRACE_ERROR, OPEN_FILE_ERROR, xmlpath);
  119. }
  120. else
  121. {
  122. se_trace(SE_TRACE_ERROR, XML_FORMAT_ERROR);
  123. }
  124. return false;
  125. }
  126. doc.Print();//Write the document to standard out using formatted printing ("pretty print").
  127. XMLElement *pmetadata_element = doc.FirstChildElement("EnclaveConfiguration");
  128. if(!pmetadata_element || pmetadata_element->GetText() != NULL)
  129. {
  130. se_trace(SE_TRACE_ERROR, XML_FORMAT_ERROR);
  131. return false;
  132. }
  133. XMLElement *sub_element = NULL;
  134. sub_element = pmetadata_element->FirstChildElement();
  135. const char *temp_text = NULL;
  136. while(sub_element)//parse xml node
  137. {
  138. if(sub_element->FirstAttribute() != NULL)
  139. {
  140. se_trace(SE_TRACE_ERROR, XML_FORMAT_ERROR);
  141. return false;
  142. }
  143. temp_name = sub_element->Value();
  144. temp_text = sub_element->GetText();
  145. //traverse every node. Compare with the default value.
  146. if(traverser_parameter(temp_name, temp_text, parameter, parameter_count) == false)
  147. {
  148. se_trace(SE_TRACE_ERROR, XML_FORMAT_ERROR);
  149. return false;
  150. }
  151. sub_element= sub_element->NextSiblingElement();
  152. }
  153. return true;
  154. }
  155. CMetadata::CMetadata(metadata_t *metadata, BinParser *parser)
  156. : m_metadata(metadata)
  157. , m_parser(parser)
  158. {
  159. memset(m_metadata, 0, sizeof(metadata_t));
  160. memset(&m_create_param, 0, sizeof(m_create_param));
  161. }
  162. CMetadata::~CMetadata()
  163. {
  164. }
  165. bool CMetadata::build_metadata(const xml_parameter_t *parameter)
  166. {
  167. if(!modify_metadata(parameter))
  168. {
  169. return false;
  170. }
  171. // layout table
  172. if(!build_layout_table())
  173. {
  174. return false;
  175. }
  176. // patch table
  177. if(!build_patch_table())
  178. {
  179. return false;
  180. }
  181. return true;
  182. }
  183. bool CMetadata::modify_metadata(const xml_parameter_t *parameter)
  184. {
  185. assert(parameter != NULL);
  186. m_metadata->version = META_DATA_MAKE_VERSION(MAJOR_VERSION,MINOR_VERSION );
  187. m_metadata->size = offsetof(metadata_t, data);
  188. m_metadata->tcs_policy = (uint32_t)parameter[TCSPOLICY].value;
  189. m_metadata->ssa_frame_size = SSA_FRAME_SIZE;
  190. //stack/heap must be page-align
  191. if(parameter[STACKMAXSIZE].value % ALIGN_SIZE)
  192. {
  193. se_trace(SE_TRACE_ERROR, SET_STACK_SIZE_ERROR);
  194. return false;
  195. }
  196. if(parameter[HEAPMAXSIZE].value % ALIGN_SIZE)
  197. {
  198. se_trace(SE_TRACE_ERROR, SET_HEAP_SIZE_ERROR);
  199. return false;
  200. }
  201. // LE setting: HW != 0, Licensekey = 1
  202. // Other enclave setting: HW = 0, Licensekey = 0
  203. if((parameter[HW].value == 0 && parameter[LAUNCHKEY].value != 0) ||
  204. (parameter[HW].value != 0 && parameter[LAUNCHKEY].value == 0))
  205. {
  206. se_trace(SE_TRACE_ERROR, SET_HW_LE_ERROR);
  207. return false;
  208. }
  209. m_metadata->max_save_buffer_size = MAX_SAVE_BUF_SIZE;
  210. m_metadata->magic_num = METADATA_MAGIC;
  211. m_metadata->desired_misc_select = 0;
  212. m_metadata->enclave_css.body.misc_select = (uint32_t)parameter[MISCSELECT].value;
  213. m_metadata->enclave_css.body.misc_mask = (uint32_t)parameter[MISCMASK].value;
  214. m_create_param.heap_max_size = parameter[HEAPMAXSIZE].value;
  215. m_create_param.ssa_frame_size = SSA_FRAME_SIZE;
  216. m_create_param.stack_max_size = parameter[STACKMAXSIZE].value;
  217. m_create_param.tcs_max_num = (uint32_t)parameter[TCSNUM].value;
  218. m_create_param.tcs_policy = m_metadata->tcs_policy;
  219. return true;
  220. }
  221. void *CMetadata::alloc_buffer_from_metadata(uint32_t size)
  222. {
  223. void *addr = GET_PTR(void, m_metadata, m_metadata->size);
  224. m_metadata->size += size;
  225. if((m_metadata->size < size) || (m_metadata->size > METADATA_SIZE))
  226. {
  227. return NULL;
  228. }
  229. return addr;
  230. }
  231. bool CMetadata::build_layout_entries(vector<layout_t> &layouts)
  232. {
  233. uint32_t size = (uint32_t)(layouts.size() * sizeof(layout_t));
  234. layout_t *layout_table = (layout_t *) alloc_buffer_from_metadata(size);
  235. if(layout_table == NULL)
  236. {
  237. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  238. return false;
  239. }
  240. m_metadata->dirs[DIR_LAYOUT].offset = (uint32_t)PTR_DIFF(layout_table, m_metadata);
  241. m_metadata->dirs[DIR_LAYOUT].size = size;
  242. uint64_t rva = calculate_sections_size();
  243. if(rva == 0)
  244. {
  245. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  246. return false;
  247. }
  248. for(uint32_t i = 0; i < layouts.size(); i++)
  249. {
  250. memcpy_s(layout_table, sizeof(layout_t), &layouts[i], sizeof(layout_t));
  251. if(!IS_GROUP_ID(layouts[i].entry.id))
  252. {
  253. layout_table->entry.rva = rva;
  254. rva += (uint64_t)layouts[i].entry.page_count << SE_PAGE_SHIFT;
  255. }
  256. else
  257. {
  258. for (uint32_t j = 0; j < layouts[i].group.entry_count; j++)
  259. {
  260. layout_table->group.load_step += (uint64_t)layouts[i-j-1].entry.page_count << SE_PAGE_SHIFT;
  261. }
  262. rva += layouts[i].group.load_times * layout_table->group.load_step;
  263. }
  264. layout_table++;
  265. }
  266. // enclave virtual size
  267. m_metadata->enclave_size = calculate_enclave_size(rva);
  268. if(m_metadata->enclave_size == (uint64_t)-1)
  269. {
  270. se_trace(SE_TRACE_ERROR, OUT_OF_EPC_ERROR);
  271. return false;
  272. }
  273. // the last guard page entry to round the enclave size to power of 2
  274. if(m_metadata->enclave_size - rva > 0)
  275. {
  276. layout_table = (layout_t *)alloc_buffer_from_metadata(sizeof(layout_t));
  277. if(layout_table == NULL)
  278. {
  279. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  280. return false;
  281. }
  282. layout_table->entry.id = LAYOUT_ID_GUARD;
  283. layout_table->entry.rva = rva;
  284. layout_table->entry.page_count = (uint32_t)((m_metadata->enclave_size - rva) >> SE_PAGE_SHIFT);
  285. m_metadata->dirs[DIR_LAYOUT].size += (uint32_t)sizeof(layout_t);
  286. }
  287. return true;
  288. }
  289. bool CMetadata::build_layout_table()
  290. {
  291. vector <layout_t> layouts;
  292. layout_t layout;
  293. memset(&layout, 0, sizeof(layout));
  294. layout_t guard_page;
  295. memset(&guard_page, 0, sizeof(guard_page));
  296. guard_page.entry.id = LAYOUT_ID_GUARD;
  297. guard_page.entry.page_count = SE_GUARD_PAGE_SIZE >> SE_PAGE_SHIFT;
  298. // heap
  299. layout.entry.id = LAYOUT_ID_HEAP;
  300. layout.entry.page_count = (uint32_t)(m_create_param.heap_max_size >> SE_PAGE_SHIFT);
  301. layout.entry.attributes = ADD_PAGE_ONLY;
  302. layout.entry.si_flags = SI_FLAGS_RW;
  303. layouts.push_back(layout);
  304. // thread context memory layout
  305. // guard page | stack | TCS | SSA | guard page | TLS
  306. // guard page
  307. layouts.push_back(guard_page);
  308. // stack
  309. layout.entry.id = LAYOUT_ID_STACK;
  310. layout.entry.page_count = (uint32_t)(m_create_param.stack_max_size >> SE_PAGE_SHIFT);
  311. layout.entry.attributes = ADD_EXTEND_PAGE;
  312. layout.entry.si_flags = SI_FLAGS_RW;
  313. layout.entry.content_size = 0xCCCCCCCC;
  314. layouts.push_back(layout);
  315. // guard page
  316. layouts.push_back(guard_page);
  317. // tcs
  318. layout.entry.id = LAYOUT_ID_TCS;
  319. layout.entry.page_count = TCS_SIZE >> SE_PAGE_SHIFT;
  320. layout.entry.attributes = ADD_EXTEND_PAGE;
  321. layout.entry.si_flags = SI_FLAGS_TCS;
  322. tcs_t *tcs_template = (tcs_t *) alloc_buffer_from_metadata(TCS_TEMPLATE_SIZE);
  323. if(tcs_template == NULL)
  324. {
  325. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  326. return false;
  327. }
  328. layout.entry.content_offset = (uint32_t)PTR_DIFF(tcs_template, m_metadata),
  329. layout.entry.content_size = TCS_TEMPLATE_SIZE;
  330. layouts.push_back(layout);
  331. memset(&layout, 0, sizeof(layout));
  332. // ssa
  333. layout.entry.id = LAYOUT_ID_SSA;
  334. layout.entry.page_count = SSA_FRAME_SIZE * SSA_NUM;
  335. layout.entry.attributes = ADD_EXTEND_PAGE;
  336. layout.entry.si_flags = SI_FLAGS_RW;
  337. layouts.push_back(layout);
  338. // guard page
  339. layouts.push_back(guard_page);
  340. // td
  341. layout.entry.id = LAYOUT_ID_TD;
  342. layout.entry.page_count = 1;
  343. const Section *section = m_parser->get_tls_section();
  344. if(section)
  345. {
  346. layout.entry.page_count += (uint32_t)(ROUND_TO_PAGE(section->virtual_size()) >> SE_PAGE_SHIFT);
  347. }
  348. layout.entry.attributes = ADD_EXTEND_PAGE;
  349. layout.entry.si_flags = SI_FLAGS_RW;
  350. layouts.push_back(layout);
  351. // group for thread context
  352. if (m_create_param.tcs_max_num > 1)
  353. {
  354. memset(&layout, 0, sizeof(layout));
  355. layout.group.id = LAYOUT_ID_THREAD_GROUP;
  356. layout.group.entry_count = (uint16_t) (layouts.size() - 1);
  357. layout.group.load_times = m_create_param.tcs_max_num-1;
  358. layouts.push_back(layout);
  359. }
  360. // build layout table
  361. if(false == build_layout_entries(layouts))
  362. {
  363. return false;
  364. }
  365. // tcs template
  366. if(false == build_tcs_template(tcs_template))
  367. {
  368. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  369. return false;
  370. }
  371. return true;
  372. }
  373. bool CMetadata::build_patch_entries(vector<patch_entry_t> &patches)
  374. {
  375. uint32_t size = (uint32_t)(patches.size() * sizeof(patch_entry_t));
  376. patch_entry_t *patch_table = (patch_entry_t *) alloc_buffer_from_metadata(size);
  377. if(patch_table == NULL)
  378. {
  379. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  380. return false;
  381. }
  382. m_metadata->dirs[DIR_PATCH].offset = (uint32_t)PTR_DIFF(patch_table, m_metadata);
  383. m_metadata->dirs[DIR_PATCH].size = size;
  384. for(uint32_t i = 0; i < patches.size(); i++)
  385. {
  386. memcpy_s(patch_table, sizeof(patch_entry_t), &patches[i], sizeof(patch_entry_t));
  387. patch_table++;
  388. }
  389. return true;
  390. }
  391. bool CMetadata::build_patch_table()
  392. {
  393. const uint8_t *base_addr = (const uint8_t *)m_parser->get_start_addr();
  394. vector<patch_entry_t> patches;
  395. patch_entry_t patch;
  396. memset(&patch, 0, sizeof(patch));
  397. // td template
  398. uint8_t buf[200];
  399. uint32_t size = 200;
  400. memset(buf, 0, size);
  401. if(false == build_gd_template(buf, &size))
  402. {
  403. return false;
  404. }
  405. uint8_t *gd_template = (uint8_t *)alloc_buffer_from_metadata(size);
  406. if(gd_template == NULL)
  407. {
  408. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  409. return false;
  410. }
  411. memcpy_s(gd_template, size, buf, size);
  412. uint64_t rva = m_parser->get_symbol_rva("g_global_data");
  413. if(0 == rva)
  414. {
  415. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  416. return false;
  417. }
  418. patch.dst = (uint64_t)PTR_DIFF(get_rawdata_by_rva(rva), base_addr);
  419. patch.src = (uint32_t)PTR_DIFF(gd_template, m_metadata);
  420. patch.size = size;
  421. patches.push_back(patch);
  422. // patch the image header
  423. uint64_t *zero = (uint64_t *)alloc_buffer_from_metadata(sizeof(*zero));
  424. if(zero == NULL)
  425. {
  426. se_trace(SE_TRACE_ERROR, INVALID_ENCLAVE_ERROR);
  427. return false;
  428. }
  429. *zero = 0;
  430. bin_fmt_t bf = m_parser->get_bin_format();
  431. if(bf == BF_ELF32)
  432. {
  433. Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *)base_addr;
  434. patch.dst = (uint64_t)PTR_DIFF(&elf_hdr->e_shnum, base_addr);
  435. patch.src = (uint32_t)PTR_DIFF(zero, m_metadata);
  436. patch.size = (uint32_t)sizeof(elf_hdr->e_shnum);
  437. patches.push_back(patch);
  438. patch.dst = (uint64_t)PTR_DIFF(&elf_hdr->e_shoff, base_addr);
  439. patch.src = (uint32_t)PTR_DIFF(zero, m_metadata);
  440. patch.size = (uint32_t)sizeof(elf_hdr->e_shoff);
  441. patches.push_back(patch);
  442. patch.dst = (uint64_t)PTR_DIFF(&elf_hdr->e_shstrndx, base_addr);
  443. patch.src = (uint32_t)PTR_DIFF(zero, m_metadata);
  444. patch.size = (uint32_t)sizeof(elf_hdr->e_shstrndx);
  445. patches.push_back(patch);
  446. // Modify GNU_RELRO info to eliminate the impact of enclave measurement.
  447. Elf32_Phdr *prg_hdr = GET_PTR(Elf32_Phdr, base_addr, elf_hdr->e_phoff);
  448. for (unsigned idx = 0; idx < elf_hdr->e_phnum; ++idx, ++prg_hdr)
  449. {
  450. if(prg_hdr->p_type == PT_GNU_RELRO)
  451. {
  452. patch.dst = (uint64_t)PTR_DIFF(prg_hdr, base_addr);
  453. patch.src = (uint32_t)PTR_DIFF(zero, m_metadata);
  454. patch.size = (uint32_t)sizeof(Elf32_Phdr);
  455. patches.push_back(patch);
  456. break;
  457. }
  458. }
  459. }
  460. else if(bf == BF_ELF64)
  461. {
  462. Elf64_Ehdr *elf_hdr = (Elf64_Ehdr *)base_addr;
  463. patch.dst = (uint64_t)PTR_DIFF(&elf_hdr->e_shnum, base_addr);
  464. patch.src = (uint32_t)PTR_DIFF(zero, m_metadata);
  465. patch.size = (uint32_t)sizeof(elf_hdr->e_shnum);
  466. patches.push_back(patch);
  467. patch.dst = (uint64_t)PTR_DIFF(&elf_hdr->e_shoff, base_addr);
  468. patch.src = (uint32_t)PTR_DIFF(zero, m_metadata);
  469. patch.size = (uint32_t)sizeof(elf_hdr->e_shoff);
  470. patches.push_back(patch);
  471. patch.dst = (uint64_t)PTR_DIFF(&elf_hdr->e_shstrndx, base_addr);
  472. patch.src = (uint32_t)PTR_DIFF(zero, m_metadata);
  473. patch.size = (uint32_t)sizeof(elf_hdr->e_shstrndx);
  474. patches.push_back(patch);
  475. }
  476. if(false == build_patch_entries(patches))
  477. {
  478. return false;
  479. }
  480. return true;
  481. }
  482. layout_entry_t *CMetadata::get_entry_by_id(uint16_t id)
  483. {
  484. layout_entry_t *layout_start = GET_PTR(layout_entry_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset);
  485. layout_entry_t *layout_end = GET_PTR(layout_entry_t, m_metadata, m_metadata->dirs[DIR_LAYOUT].offset + m_metadata->dirs[DIR_LAYOUT].size);
  486. for (layout_entry_t *layout = layout_start; layout < layout_end; layout++)
  487. {
  488. if(layout->id == id)
  489. return layout;
  490. }
  491. assert(false);
  492. return NULL;
  493. }
  494. bool CMetadata::build_gd_template(uint8_t *data, uint32_t *data_size)
  495. {
  496. m_create_param.stack_limit_addr = get_entry_by_id(LAYOUT_ID_STACK)->rva - get_entry_by_id(LAYOUT_ID_TCS)->rva;
  497. m_create_param.stack_base_addr = ((uint64_t)get_entry_by_id(LAYOUT_ID_STACK)->page_count << SE_PAGE_SHIFT) + m_create_param.stack_limit_addr;
  498. m_create_param.first_ssa_gpr = get_entry_by_id(LAYOUT_ID_SSA)->rva - get_entry_by_id(LAYOUT_ID_TCS)->rva
  499. + SSA_FRAME_SIZE * SE_PAGE_SIZE - (uint64_t)sizeof(ssa_gpr_t);
  500. m_create_param.enclave_size = m_metadata->enclave_size;
  501. m_create_param.heap_offset = get_entry_by_id(LAYOUT_ID_HEAP)->rva;
  502. uint64_t tmp_tls_addr = get_entry_by_id(LAYOUT_ID_TD)->rva - get_entry_by_id(LAYOUT_ID_TCS)->rva;
  503. m_create_param.td_addr = tmp_tls_addr + (((uint64_t)get_entry_by_id(LAYOUT_ID_TD)->page_count - 1) << SE_PAGE_SHIFT);
  504. const Section *section = m_parser->get_tls_section();
  505. if(section)
  506. {
  507. /* adjust the tls_addr to be the pointer to the actual TLS data area */
  508. m_create_param.tls_addr = m_create_param.td_addr - section->virtual_size();
  509. assert(TRIM_TO_PAGE(m_create_param.tls_addr) == tmp_tls_addr);
  510. }
  511. else
  512. m_create_param.tls_addr = tmp_tls_addr;
  513. if(false == m_parser->update_global_data(&m_create_param, data, data_size))
  514. {
  515. se_trace(SE_TRACE_ERROR, NO_MEMORY_ERROR); // metadata structure doesnot have enough memory for global_data template
  516. return false;
  517. }
  518. return true;
  519. }
  520. bool CMetadata::build_tcs_template(tcs_t *tcs)
  521. {
  522. tcs->oentry = m_parser->get_symbol_rva("enclave_entry");
  523. if(tcs->oentry == 0)
  524. {
  525. return false;
  526. }
  527. tcs->nssa = SSA_NUM;
  528. tcs->cssa = 0;
  529. tcs->ossa = get_entry_by_id(LAYOUT_ID_SSA)->rva - get_entry_by_id(LAYOUT_ID_TCS)->rva;
  530. //fs/gs pointer at TLS/TD
  531. tcs->ofs_base = tcs->ogs_base = get_entry_by_id(LAYOUT_ID_TD)->rva - get_entry_by_id(LAYOUT_ID_TCS)->rva + (((uint64_t)get_entry_by_id(LAYOUT_ID_TD)->page_count - 1) << SE_PAGE_SHIFT);
  532. tcs->ofs_limit = tcs->ogs_limit = (uint32_t)-1;
  533. return true;
  534. }
  535. void* CMetadata::get_rawdata_by_rva(uint64_t rva)
  536. {
  537. std::vector<Section*> sections = m_parser->get_sections();
  538. for(unsigned int i = 0; i < sections.size() ; i++)
  539. {
  540. uint64_t start_rva = TRIM_TO_PAGE(sections[i]->get_rva());
  541. uint64_t end_rva = ROUND_TO_PAGE(sections[i]->get_rva() + sections[i]->virtual_size());
  542. if(start_rva <= rva && rva < end_rva)
  543. {
  544. uint64_t offset = rva - sections[i]->get_rva();
  545. if (offset > sections[i]->raw_data_size())
  546. {
  547. return 0;
  548. }
  549. return GET_PTR(void, sections[i]->raw_data(), offset);
  550. }
  551. }
  552. return 0;
  553. }
  554. uint64_t CMetadata::calculate_sections_size()
  555. {
  556. std::vector<Section*> sections = m_parser->get_sections();
  557. uint64_t max_rva = 0;
  558. Section *last_section = NULL;
  559. for(unsigned int i = 0; i < sections.size() ; i++)
  560. {
  561. if(sections[i]->get_rva() > max_rva) {
  562. max_rva = sections[i]->get_rva();
  563. last_section = sections[i];
  564. }
  565. }
  566. uint64_t size = (NULL == last_section) ? (0) : (last_section->get_rva() + last_section->virtual_size());
  567. size = ROUND_TO_PAGE(size);
  568. if(last_section != NULL && size < ROUND_TO_PAGE(last_section->get_rva() + ROUND_TO_PAGE(last_section->virtual_size())))
  569. {
  570. size += SE_PAGE_SIZE;
  571. }
  572. return size;
  573. }
  574. uint64_t CMetadata::calculate_enclave_size(uint64_t size)
  575. {
  576. uint64_t enclave_max_size = m_parser->get_enclave_max_size();
  577. if(size > enclave_max_size)
  578. return (uint64_t)-1;
  579. uint64_t round_size = 1;
  580. while (round_size < size)
  581. {
  582. round_size <<=1;
  583. if(!round_size)
  584. return (uint64_t)-1;
  585. }
  586. if(round_size > enclave_max_size)
  587. return (uint64_t)-1;
  588. return round_size;
  589. }
  590. bool update_metadata(const char *path, const metadata_t *metadata, uint64_t meta_offset)
  591. {
  592. assert(path != NULL && metadata != NULL);
  593. return write_data_to_file(path, std::ios::in | std::ios::binary| std::ios::out,
  594. reinterpret_cast<uint8_t *>(const_cast<metadata_t *>( metadata)), metadata->size, (long)meta_offset);
  595. }