elfparser.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866
  1. /*
  2. * Copyright (C) 2011-2017 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "elfparser.h"
  32. #include "cpputil.h"
  33. #include "se_trace.h"
  34. #include "se_memcpy.h"
  35. #include "global_data.h"
  36. #define META_SECTION_ALIGNMENT 1 /*metadata section no alignment*/
  37. namespace {
  38. /** the callback function to filter a section.
  39. *
  40. * @shstrtab: the section header string table
  41. * @shdr: the current section header to be examined
  42. * @user_data: user supplied data for the callback
  43. *
  44. * @return: true if current section header is what we are looking for.
  45. */
  46. typedef bool (* section_filter_f)(const char* shstrtab,
  47. const ElfW(Shdr)* shdr,
  48. const void* user_data);
  49. bool compare_section_name(const char* shstrtab,
  50. const ElfW(Shdr)* shdr,
  51. const void* user_data)
  52. {
  53. // `shstrtab + shdr->sh_name' is the section name.
  54. return (!strcmp(shstrtab + shdr->sh_name, (const char*)user_data));
  55. }
  56. bool compare_section_addr(const char* shstrtab,
  57. const ElfW(Shdr)* shdr,
  58. const void* user_data)
  59. {
  60. UNUSED(shstrtab);
  61. return (shdr->sh_addr == (ElfW(Addr))(size_t)user_data);
  62. }
  63. const ElfW(Shdr)* get_section(const ElfW(Ehdr) *elf_hdr,
  64. section_filter_f f,
  65. const void* user_data)
  66. {
  67. const ElfW(Shdr) *shdr = GET_PTR(ElfW(Shdr), elf_hdr, elf_hdr->e_shoff);
  68. assert(sizeof(ElfW(Shdr)) == elf_hdr->e_shentsize);
  69. // section header string table
  70. const char *shstrtab = GET_PTR(char, elf_hdr, shdr[elf_hdr->e_shstrndx].sh_offset);
  71. for (unsigned idx = 0; idx < elf_hdr->e_shnum; ++idx, ++shdr)
  72. {
  73. SE_TRACE(SE_TRACE_DEBUG, "section [%u] %s: sh_addr = %x, sh_size = %x, sh_offset = %x, sh_name = %x\n",
  74. idx, shstrtab + shdr->sh_name, shdr->sh_addr, shdr->sh_size, shdr->sh_offset, shdr->sh_name);
  75. if (f(shstrtab, shdr, user_data))
  76. return shdr;
  77. }
  78. return NULL;
  79. }
  80. const ElfW(Shdr)* get_section_by_name(const ElfW(Ehdr) *elf_hdr, const char *name)
  81. {
  82. return get_section(elf_hdr, compare_section_name, name);
  83. }
  84. const ElfW(Shdr)* get_section_by_addr(const ElfW(Ehdr) *elf_hdr, ElfW(Addr) start_addr)
  85. {
  86. return get_section(elf_hdr, compare_section_addr, (const void*)(size_t)start_addr);
  87. }
  88. template <typename T>
  89. const T* get_section_raw_data(const ElfW(Ehdr) *elf_hdr, ElfW(Addr) start_addr)
  90. {
  91. const ElfW(Shdr)* shdr = get_section_by_addr(elf_hdr, start_addr);
  92. if (shdr == NULL)
  93. return NULL;
  94. return GET_PTR(T, elf_hdr, shdr->sh_offset);
  95. }
  96. bool validate_elf_header(const ElfW(Ehdr) *elf_hdr)
  97. {
  98. // validate magic number
  99. if (memcmp(&elf_hdr->e_ident, ELFMAG, SELFMAG))
  100. return false;
  101. #if RTS_SYSTEM_WORDSIZE == 64
  102. if (ELFCLASS64 != elf_hdr->e_ident[EI_CLASS])
  103. return false;
  104. #else
  105. if (ELFCLASS32 != elf_hdr->e_ident[EI_CLASS])
  106. return false;
  107. #endif
  108. if (ELFDATA2LSB!= elf_hdr->e_ident[EI_DATA])
  109. return false;
  110. if (EV_CURRENT != elf_hdr->e_ident[EI_VERSION])
  111. return false;
  112. if (ET_DYN != elf_hdr->e_type)
  113. return false;
  114. if (sizeof(ElfW(Phdr)) != elf_hdr->e_phentsize)
  115. return false;
  116. return true;
  117. }
  118. bool parse_dyn(const ElfW(Ehdr) *elf_hdr, ElfW(Dyn)* dyn_info)
  119. {
  120. const ElfW(Phdr) *prg_hdr = GET_PTR(ElfW(Phdr), elf_hdr, elf_hdr->e_phoff);
  121. bool has_dyn = false;
  122. for (unsigned idx = 0; idx < elf_hdr->e_phnum; ++idx, ++prg_hdr)
  123. {
  124. if (PT_DYNAMIC == prg_hdr->p_type)
  125. {
  126. const ElfW(Dyn) *dyn_entry = GET_PTR(ElfW(Dyn), elf_hdr, prg_hdr->p_offset);
  127. // parse dynamic segment
  128. // An entry with a DT_NULL tag marks the end.
  129. while (dyn_entry->d_tag != DT_NULL)
  130. {
  131. SE_TRACE(SE_TRACE_DEBUG, "dynamic tag = %x, ptr = %x\n", dyn_entry->d_tag, dyn_entry->d_un.d_ptr);
  132. if (dyn_entry->d_tag < DT_NUM)
  133. {
  134. memcpy_s(&dyn_info[dyn_entry->d_tag], sizeof(ElfW(Dyn)), dyn_entry, sizeof(ElfW(Dyn)));
  135. }
  136. else if (dyn_entry->d_tag > DT_ADDRRNGLO && dyn_entry->d_tag <= DT_ADDRRNGHI)
  137. {
  138. memcpy_s(&dyn_info[DT_ADDRTAGIDX(dyn_entry->d_tag) + DT_NUM], sizeof(ElfW(Dyn)), dyn_entry, sizeof(ElfW(Dyn)));
  139. }
  140. dyn_entry++;
  141. has_dyn = true;
  142. }
  143. return has_dyn;
  144. }
  145. }
  146. return false;
  147. }
  148. /** Check whether there are undefined symbols and save the address
  149. * for a few reserved symbols.
  150. *
  151. * ELF format defined two symbol tables, `.symtab' and `.dynsym'.
  152. *
  153. * `.symtab' is non-allocable, and might be stripped.
  154. * `.dynsym' is allocable, and only contains global symbols.
  155. *
  156. * We only need to search `.dynsym' for undefined symbols.
  157. */
  158. bool check_symbol_table(const ElfW(Ehdr) *elf_hdr, const ElfW(Dyn) *dyn_info,
  159. map<string, uint64_t>& sym_table)
  160. {
  161. const ElfW(Shdr) *sh_symtab = get_section_by_addr(elf_hdr, dyn_info[DT_SYMTAB].d_un.d_ptr);
  162. if (sh_symtab == NULL)
  163. {
  164. // We must at least have "enclave_entry"
  165. SE_TRACE(SE_TRACE_WARNING, "There is no .dynsym section");
  166. return false;
  167. }
  168. if (sh_symtab->sh_entsize == 0)
  169. {
  170. SE_TRACE(SE_TRACE_WARNING, "In section .dynsym, sh_entsize is 0.");
  171. return false;
  172. }
  173. const ElfW(Sym) *symtab = GET_PTR(ElfW(Sym), elf_hdr, sh_symtab->sh_offset);
  174. uint32_t sym_num = (uint32_t)(sh_symtab->sh_size/sh_symtab->sh_entsize);
  175. const char *strtab = get_section_raw_data<char>(elf_hdr, dyn_info[DT_STRTAB].d_un.d_ptr);
  176. // We only store "enclave_entry", "g_global_data_sim" and "g_peak_heap_used".
  177. // To export new symbols, add them here.
  178. //
  179. // "g_global_data_sim" is needed so that we can check that whether
  180. // an simulated enclave is given when running an HW loader.
  181. const char* str[] = { "enclave_entry", "g_global_data_sim", "g_peak_heap_used", "g_global_data" };
  182. // The first entry is reserved, and must be all zeros
  183. for (uint32_t idx = 1; idx < sym_num; ++idx)
  184. {
  185. // st_name == 0 indicates the symble table entry has no name.
  186. if (symtab[idx].st_name == 0) continue;
  187. const char* sym = strtab + symtab[idx].st_name;
  188. if (sym == NULL)
  189. {
  190. SE_TRACE(SE_TRACE_WARNING, "Malformed enclave with NULL symbol name\n");
  191. return false;
  192. }
  193. if (SHN_UNDEF == symtab[idx].st_shndx
  194. && STB_WEAK != ELFW(ST_BIND)(symtab[idx].st_info))
  195. {
  196. SE_TRACE(SE_TRACE_WARNING, "symbol '%s' is undefined\n", sym);
  197. return false;
  198. }
  199. #define SYMBOL_NUM (ARRAY_LENGTH(str))
  200. for (size_t i = 0; i < SYMBOL_NUM; ++i)
  201. {
  202. if (0 == strcmp(str[i], sym))
  203. {
  204. sym_table[sym] = (uint64_t)symtab[idx].st_value;
  205. }
  206. }
  207. }
  208. // If the enclave if compiled/linked with -fpie/-pie, and setting the
  209. // enclave entry to `enclave_entry', the `st_name' for `enclave_entry'
  210. // will be 0 in `.dynsym'.
  211. map<string, uint64_t>::const_iterator it = sym_table.find("enclave_entry");
  212. if (it == sym_table.end())
  213. {
  214. sym_table["enclave_entry"] = (uint64_t)elf_hdr->e_entry;
  215. }
  216. return true;
  217. }
  218. bool do_validate_reltab(const ElfW(Rel) *reltab, size_t nr_rel)
  219. {
  220. if (reltab == NULL && nr_rel != 0) return false;
  221. #if RTS_SYSTEM_WORDSIZE == 64
  222. const ElfW(Rel) *rela = reltab;
  223. for (unsigned idx = 0; idx < nr_rel; idx++, rela++)
  224. {
  225. switch (ELF64_R_TYPE(rela->r_info))
  226. {
  227. case R_X86_64_RELATIVE:
  228. break;
  229. case R_X86_64_GLOB_DAT:
  230. case R_X86_64_JUMP_SLOT:
  231. case R_X86_64_64:
  232. break;
  233. case R_X86_64_NONE:
  234. break;
  235. case R_X86_64_DTPMOD64:
  236. case R_X86_64_DTPOFF64:
  237. case R_X86_64_TPOFF64:
  238. break;
  239. #else
  240. const ElfW(Rel) *rel = reltab;
  241. for (unsigned idx = 0; idx < nr_rel; idx++, rel++)
  242. {
  243. switch (ELF32_R_TYPE(rel->r_info))
  244. {
  245. case R_386_RELATIVE: /* B+A */
  246. break;
  247. case R_386_GLOB_DAT:
  248. case R_386_JMP_SLOT: /* S */
  249. break;
  250. case R_386_32: /* S+A */
  251. break;
  252. case R_386_PC32: /* S+A-P */
  253. break;
  254. case R_386_NONE:
  255. break;
  256. case R_386_TLS_DTPMOD32:
  257. break;
  258. case R_386_TLS_DTPOFF32:
  259. break;
  260. case R_386_TLS_TPOFF:
  261. break;
  262. case R_386_TLS_TPOFF32:
  263. break;
  264. #endif
  265. default: /* unsupported relocs */
  266. SE_TRACE(SE_TRACE_WARNING, "unsupported relocation type detected\n");
  267. return false;
  268. }
  269. }
  270. return true;
  271. }
  272. bool validate_reltabs(const ElfW(Ehdr) *elf_hdr, const ElfW(Dyn) *dyn_info)
  273. {
  274. #if RTS_SYSTEM_WORDSIZE == 64
  275. // The relocation struct must be rela on x64.
  276. if (dyn_info[DT_REL].d_un.d_ptr)
  277. {
  278. SE_TRACE(SE_TRACE_WARNING, "Rel struct detected on x64\n");
  279. return false;
  280. }
  281. #else
  282. // The relocation struct must be rel on x86.
  283. if (dyn_info[DT_RELA].d_un.d_ptr)
  284. {
  285. SE_TRACE(SE_TRACE_WARNING, "Rela struct detected on x86\n");
  286. return false;
  287. }
  288. #endif
  289. const ElfW(Rel) *reltab = get_section_raw_data<ElfW(Rel)>(elf_hdr, dyn_info[RTS_DT_REL].d_un.d_ptr);
  290. const ElfW(Word) reltab_sz = (ElfW(Word))dyn_info[RTS_DT_RELSZ].d_un.d_val;
  291. const ElfW(Rel) *jmpreltab = get_section_raw_data<ElfW(Rel)>(elf_hdr, dyn_info[DT_JMPREL].d_un.d_ptr);
  292. const ElfW(Word) jmpreltab_sz = (ElfW(Word))dyn_info[DT_PLTRELSZ].d_un.d_val;
  293. return (do_validate_reltab(reltab, reltab_sz / sizeof(ElfW(Rel)))
  294. && do_validate_reltab(jmpreltab, jmpreltab_sz / sizeof(ElfW(Rel))));
  295. }
  296. bool has_ctor_section(const ElfW(Ehdr) *elf_hdr)
  297. {
  298. const ElfW(Shdr) *shdr = get_section_by_name(elf_hdr, ".ctors");
  299. if (NULL == shdr) return false;
  300. se_trace(SE_TRACE_ERROR, "ERROR: .ctors section is found, global initializers will not be invoked correctly!\n");
  301. return true;
  302. }
  303. inline bool is_tls_segment(const ElfW(Phdr)* prg_hdr)
  304. {
  305. return (PT_TLS == prg_hdr->p_type);
  306. }
  307. bool get_meta_property(const uint8_t *start_addr, const ElfW(Ehdr) *elf_hdr, uint64_t &meta_offset, uint64_t &meta_block_size)
  308. {
  309. const ElfW(Shdr)* shdr = get_section_by_name(elf_hdr, ".note.sgxmeta");
  310. if (shdr == NULL)
  311. {
  312. se_trace(SE_TRACE_ERROR, "ERROR: The enclave image should have '.note.sgxmeta' section\n");
  313. return false;
  314. }
  315. /* We require that enclaves should have .note.sgxmeta section to store the metadata information
  316. * We limit this section is used for metadata only and ISV should not extend this section.
  317. *
  318. * .note.sgxmeta layout:
  319. *
  320. * | namesz |
  321. * | metadata size |
  322. * | type |
  323. * | name |
  324. * | metadata |
  325. */
  326. if (shdr->sh_addralign != META_SECTION_ALIGNMENT)
  327. {
  328. se_trace(SE_TRACE_ERROR, "ERROR: The '.note.sgxmeta' section must be 4byte aligned\n");
  329. return false;
  330. }
  331. const ElfW(Note) *note = GET_PTR(ElfW(Note), start_addr, shdr->sh_offset);
  332. assert(note != NULL);
  333. if (shdr->sh_size != ROUND_TO(sizeof(ElfW(Note)) + note->namesz + note->descsz, META_SECTION_ALIGNMENT))
  334. {
  335. se_trace(SE_TRACE_ERROR, "ERROR: The '.note.sgxmeta' section size is not correct.\n");
  336. return false;
  337. }
  338. if (memcmp(GET_PTR(void, start_addr, shdr->sh_offset + sizeof(ElfW(Note))), "sgx_metadata", note->namesz))
  339. {
  340. se_trace(SE_TRACE_ERROR, "ERROR: The note in the '.note.sgxmeta' section must be named as \"sgx_metadata\"\n");
  341. return false;
  342. }
  343. meta_offset = static_cast<uint64_t>(shdr->sh_offset + sizeof(ElfW(Note)) + note->namesz);
  344. meta_block_size = note->descsz;
  345. return true;
  346. }
  347. bool validate_segment(const ElfW(Ehdr) *elf_hdr, uint64_t len)
  348. {
  349. const ElfW(Phdr) *prg_hdr = GET_PTR(ElfW(Phdr), elf_hdr, elf_hdr->e_phoff);
  350. assert(sizeof(ElfW(Phdr)) == elf_hdr->e_phentsize);
  351. std::vector< std::pair<ElfW(Addr), ElfW(Addr)> > load_seg(elf_hdr->e_phnum, std::make_pair(0, 0));
  352. int k = 0;
  353. for (int idx = 0; idx < elf_hdr->e_phnum; idx++, prg_hdr++)
  354. {
  355. /* Validate the size of the buffer */
  356. if (len < (uint64_t)prg_hdr->p_offset + prg_hdr->p_filesz)
  357. return false;
  358. if (PT_LOAD == prg_hdr->p_type)
  359. {
  360. // The default align is max page size. On x86-64, the max page size is 2M, but EPC page size is 4K,
  361. // so in x86-64, we just treat it as EPC page size. The (2M - 4K) size is not eadded. We leave it
  362. // as a hole.
  363. if (!IS_PAGE_ALIGNED(prg_hdr->p_align))
  364. {
  365. SE_TRACE(SE_TRACE_WARNING, "A segment is not PAGE aligned, alignment = %x\n", prg_hdr->p_align);
  366. return false;
  367. }
  368. // Verify the overlap of segment. we don't verify here, because a well compiled file has no overlapped segment.
  369. load_seg[k].first = prg_hdr->p_vaddr;
  370. load_seg[k].second = prg_hdr->p_vaddr + ROUND_TO(prg_hdr->p_memsz, prg_hdr->p_align) - 1;
  371. for (int j = 0; j < k; j++)
  372. {
  373. if (is_overlap(load_seg[k], load_seg[j]))
  374. {
  375. SE_TRACE(SE_TRACE_WARNING, "there is overlap segment [%x : %x] [%x : %x]\n",
  376. load_seg[k].first, load_seg[k].second, load_seg[j].first, load_seg[j].second);
  377. return false;
  378. }
  379. }
  380. k++;
  381. }
  382. }
  383. return true;
  384. }
  385. bool get_bin_fmt(const ElfW(Ehdr) *elf_hdr, bin_fmt_t& bf)
  386. {
  387. switch(elf_hdr->e_machine)
  388. {
  389. #if RTS_SYSTEM_WORDSIZE == 32
  390. case EM_386:
  391. bf = BF_ELF32;
  392. return true;
  393. #endif
  394. #if RTS_SYSTEM_WORDSIZE == 64
  395. case EM_X86_64:
  396. bf = BF_ELF64;
  397. return true;
  398. #endif
  399. }
  400. return false;
  401. }
  402. si_flags_t page_attr_to_si_flags(uint32_t page_attr)
  403. {
  404. si_flags_t res = SI_FLAG_REG;
  405. if (page_attr & PF_R)
  406. res |= SI_FLAG_R;
  407. if (page_attr & PF_W)
  408. res |= SI_FLAG_W;
  409. if (page_attr & PF_X)
  410. res |= SI_FLAG_X;
  411. return res;
  412. }
  413. Section* build_section(const uint8_t* raw_data, uint64_t size, uint64_t virtual_size,
  414. uint64_t rva, uint32_t page_attr)
  415. {
  416. si_flags_t sf = page_attr_to_si_flags(page_attr);
  417. if (sf != SI_FLAG_REG)
  418. return new Section(raw_data, size, virtual_size, rva, sf);
  419. return NULL;
  420. }
  421. bool build_regular_sections(const uint8_t* start_addr,
  422. vector<Section *>& sections,
  423. const Section*& tls_sec,
  424. uint64_t& metadata_offset,
  425. uint64_t& metadata_block_size)
  426. {
  427. const ElfW(Ehdr) *elf_hdr = (const ElfW(Ehdr) *)start_addr;
  428. const ElfW(Phdr) *prg_hdr = GET_PTR(ElfW(Phdr), start_addr, elf_hdr->e_phoff);
  429. uint64_t virtual_size = 0, alignment = 0, aligned_virtual_size = 0;
  430. if (get_meta_property(start_addr, elf_hdr, metadata_offset, metadata_block_size) == false)
  431. return false;
  432. for (unsigned idx = 0; idx < elf_hdr->e_phnum; ++idx, ++prg_hdr)
  433. {
  434. Section* sec = NULL;
  435. switch (prg_hdr->p_type)
  436. {
  437. case PT_LOAD:
  438. sec = build_section(GET_PTR(uint8_t, start_addr, prg_hdr->p_offset),
  439. (uint64_t)prg_hdr->p_filesz, (uint64_t)prg_hdr->p_memsz,
  440. (uint64_t)prg_hdr->p_vaddr, (uint32_t) prg_hdr->p_flags);
  441. break;
  442. case PT_TLS:
  443. virtual_size = (uint64_t)prg_hdr->p_memsz;
  444. alignment = (uint64_t)prg_hdr->p_align;
  445. /* according to ELF spec, alignment equals zero or one means no align requirement */
  446. if (alignment == 0 || alignment == 1)
  447. aligned_virtual_size = virtual_size;
  448. else
  449. aligned_virtual_size = (virtual_size + alignment - 1) & (~(alignment - 1));
  450. sec = build_section(GET_PTR(uint8_t, start_addr, prg_hdr->p_offset),
  451. (uint64_t)prg_hdr->p_filesz, aligned_virtual_size,
  452. (uint64_t)prg_hdr->p_vaddr, (uint32_t) prg_hdr->p_flags);
  453. break;
  454. default:
  455. continue;
  456. }
  457. if (sec == NULL)
  458. return false;
  459. /* We've filtered segments that are not of PT_LOAD or PT_TLS type. */
  460. if (!is_tls_segment(prg_hdr))
  461. {
  462. /* A PT_LOAD segment. */
  463. sections.push_back(sec);
  464. continue;
  465. }
  466. /* It is a TLS segment. */
  467. tls_sec = sec;
  468. }
  469. return true;
  470. }
  471. const Section* get_max_rva_section(const vector<Section*> sections)
  472. {
  473. size_t sec_size = sections.size();
  474. if (sec_size == 0)
  475. return NULL;
  476. const Section* psec = sections[0];
  477. for (size_t idx = 1; idx < sec_size; ++idx)
  478. {
  479. if (sections[idx]->get_rva() > psec->get_rva())
  480. psec = sections[idx];
  481. }
  482. return psec;
  483. }
  484. }
  485. ElfParser::ElfParser (const uint8_t* start_addr, uint64_t len)
  486. :m_start_addr(start_addr), m_len(len), m_bin_fmt(BF_UNKNOWN),
  487. m_tls_section(NULL), m_metadata_offset(0), m_metadata_block_size(0)
  488. {
  489. memset(&m_dyn_info, 0, sizeof(m_dyn_info));
  490. }
  491. sgx_status_t ElfParser::run_parser()
  492. {
  493. /* We only need to run the parser once. */
  494. if (m_sections.size() != 0) return SGX_SUCCESS;
  495. const ElfW(Ehdr) *elf_hdr = (const ElfW(Ehdr) *)m_start_addr;
  496. if (elf_hdr == NULL || m_len < sizeof(ElfW(Ehdr)))
  497. return SGX_ERROR_INVALID_ENCLAVE;
  498. /* Check elf header*/
  499. if (!validate_elf_header(elf_hdr))
  500. return SGX_ERROR_INVALID_ENCLAVE;
  501. /* Get and check machine mode */
  502. if (!get_bin_fmt(elf_hdr, m_bin_fmt))
  503. return SGX_ERROR_MODE_INCOMPATIBLE;
  504. /* Check if there is any overlap segment, and make sure the segment is 1 page aligned;
  505. * TLS segment must exist.
  506. */
  507. if (!validate_segment(elf_hdr, m_len))
  508. return SGX_ERROR_INVALID_ENCLAVE;
  509. if (!parse_dyn(elf_hdr, &m_dyn_info[0]))
  510. return SGX_ERROR_INVALID_ENCLAVE;
  511. /* Check if there is any undefined symbol */
  512. if (!check_symbol_table(elf_hdr, m_dyn_info, m_sym_table))
  513. {
  514. return SGX_ERROR_UNDEFINED_SYMBOL;
  515. }
  516. /* Check if there is unexpected relocation type */
  517. if (!validate_reltabs(elf_hdr, m_dyn_info))
  518. return SGX_ERROR_INVALID_ENCLAVE;
  519. /* Check if there is .ctor section */
  520. if (has_ctor_section(elf_hdr))
  521. return SGX_ERROR_INVALID_ENCLAVE;
  522. /* build regular sections */
  523. if (build_regular_sections(m_start_addr, m_sections, m_tls_section, m_metadata_offset, m_metadata_block_size))
  524. return SGX_SUCCESS;
  525. else
  526. return SGX_ERROR_INVALID_ENCLAVE;
  527. }
  528. ElfParser::~ElfParser()
  529. {
  530. delete_ptrs_from_container(m_sections);
  531. if (m_tls_section) delete m_tls_section;
  532. }
  533. bin_fmt_t ElfParser::get_bin_format() const
  534. {
  535. return m_bin_fmt;
  536. }
  537. uint64_t ElfParser::get_enclave_max_size() const
  538. {
  539. if(m_bin_fmt == BF_ELF64)
  540. return ENCLAVE_MAX_SIZE_64;
  541. else
  542. return ENCLAVE_MAX_SIZE_32;
  543. }
  544. uint64_t ElfParser::get_metadata_offset() const
  545. {
  546. return m_metadata_offset;
  547. }
  548. uint64_t ElfParser::get_metadata_block_size() const
  549. {
  550. return m_metadata_block_size;
  551. }
  552. const uint8_t* ElfParser::get_start_addr() const
  553. {
  554. return m_start_addr;
  555. }
  556. const vector<Section *>& ElfParser::get_sections() const
  557. {
  558. return m_sections;
  559. }
  560. const Section* ElfParser::get_tls_section() const
  561. {
  562. return m_tls_section;
  563. }
  564. uint64_t ElfParser::get_symbol_rva(const char* name) const
  565. {
  566. map<string, uint64_t>::const_iterator it = m_sym_table.find(name);
  567. if (it != m_sym_table.end())
  568. return it->second;
  569. else
  570. return 0;
  571. }
  572. bool ElfParser::get_reloc_bitmap(vector<uint8_t>& bitmap)
  573. {
  574. // Clear the `bitmap' so that it is in a known state
  575. bitmap.clear();
  576. if (!m_dyn_info[DT_TEXTREL].d_tag)
  577. return true;
  578. const ElfW(Ehdr) *elf_hdr = (const ElfW(Ehdr) *)m_start_addr;
  579. const ElfW(Rel) *rel[4] = { NULL, NULL, NULL, NULL };
  580. if (m_dyn_info[DT_JMPREL].d_tag)
  581. {
  582. rel[2] = get_section_raw_data<ElfW(Rel)>(elf_hdr, m_dyn_info[DT_JMPREL].d_un.d_ptr);
  583. rel[3] = GET_PTR(const ElfW(Rel), rel[2], m_dyn_info[DT_PLTRELSZ].d_un.d_val);
  584. }
  585. if (m_dyn_info[RTS_DT_REL].d_tag)
  586. {
  587. rel[0] = get_section_raw_data<ElfW(Rel)>(elf_hdr, m_dyn_info[RTS_DT_REL].d_un.d_ptr);
  588. rel[1] = GET_PTR(const ElfW(Rel), rel[0], m_dyn_info[RTS_DT_RELSZ].d_un.d_val);
  589. assert(sizeof(ElfW(Rel)) == m_dyn_info[RTS_DT_RELENT].d_un.d_val);
  590. }
  591. // The enclave size mapped in memory is calculated by
  592. // sec->get_rva() + sec->virtual_size();
  593. // where the `sec' is the section with maximum RVA value.
  594. uint64_t image_size = 0;
  595. const Section* max_rva_sec = get_max_rva_section(this->m_sections);
  596. if (max_rva_sec == NULL)
  597. return false;
  598. image_size = max_rva_sec->get_rva() + max_rva_sec->virtual_size();
  599. // NOTE:
  600. // Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
  601. // In the future, if the max enclave size is extended to beyond 1<<49, this type-casting will not work.
  602. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
  603. // allocate bitmap
  604. bitmap.resize((size_t)((((image_size + (SE_PAGE_SIZE - 1)) >> SE_PAGE_SHIFT) + 7) / 8));
  605. for (unsigned idx = 0; idx < ARRAY_LENGTH(rel); idx += 2)
  606. {
  607. const ElfW(Rel) *rel_entry = rel[idx], *rel_end = rel[idx+1];
  608. if (NULL == rel_entry)
  609. continue;
  610. for (; rel_entry < rel_end; rel_entry++)
  611. {
  612. #if RTS_SYSTEM_WORDSIZE == 64
  613. if (ELF64_R_TYPE(rel_entry->r_info) == R_X86_64_NONE)
  614. #else
  615. if (ELF32_R_TYPE(rel_entry->r_info) == R_386_NONE)
  616. #endif
  617. continue;
  618. ElfW(Addr) reloc_addr = rel_entry->r_offset;
  619. uint64_t page_frame = (uint64_t)(reloc_addr >> SE_PAGE_SHIFT);
  620. // NOTE:
  621. // Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
  622. // In the future, if the max enclave size is extended to beyond 1<<49, this type-casting will not work.
  623. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
  624. // If there is more than one relocation in one page, then "|" works as there
  625. // is only one relocation in one page.
  626. bitmap[(size_t)(page_frame/8)] = (uint8_t)(bitmap[(size_t)(page_frame/8)] | (uint8_t)(1 << (page_frame % 8)));
  627. // Check if the relocation across boundary
  628. if ((reloc_addr & (SE_PAGE_SIZE - 1)) > (SE_PAGE_SIZE - sizeof(sys_word_t)))
  629. {
  630. page_frame++;
  631. bitmap[(size_t)(page_frame/8)] = (uint8_t)(bitmap[(size_t)(page_frame/8)] | (uint8_t)(1 << (page_frame % 8)));
  632. }
  633. }
  634. }
  635. return true;
  636. }
  637. void ElfParser::get_reloc_entry_offset(const char* sec_name, vector<uint64_t>& offsets)
  638. {
  639. if (sec_name == NULL)
  640. return;
  641. const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr) *)m_start_addr;
  642. const ElfW(Shdr) *shdr = get_section_by_name(ehdr, sec_name);
  643. if (shdr == NULL)
  644. return;
  645. /* find the start and end offset of the target section */
  646. const uint64_t start = shdr->sh_addr;
  647. const uint64_t end = start + shdr->sh_size;
  648. offsets.clear();
  649. SE_TRACE(SE_TRACE_DEBUG, "found section '%s' - offset %#lx, size %#lx\n",
  650. sec_name, (long)start, (long)shdr->sh_size);
  651. /* iterate sections to find the relocs */
  652. shdr = GET_PTR(ElfW(Shdr), m_start_addr, ehdr->e_shoff);
  653. for (unsigned idx = 0; idx < ehdr->e_shnum; ++idx, ++shdr)
  654. {
  655. if (shdr->sh_type != SHT_RELA &&
  656. shdr->sh_type != SHT_REL)
  657. continue;
  658. uint64_t rel_size = shdr->sh_size;
  659. uint64_t rel_offset = shdr->sh_offset;
  660. uint64_t nr_rel = rel_size / shdr->sh_entsize;
  661. /* for each reloc, check its target address */
  662. const ElfW(Rel) *rel = GET_PTR(ElfW(Rel), m_start_addr, rel_offset);
  663. for (; nr_rel > 0; --nr_rel, ++rel)
  664. {
  665. if (rel->r_offset >= start && rel->r_offset < end)
  666. {
  667. uint64_t offset = DIFF64(rel, m_start_addr);
  668. SE_TRACE(SE_TRACE_DEBUG, "found one reloc at offset %#lx\n", offset);
  669. offsets.push_back(offset);
  670. }
  671. }
  672. }
  673. }
  674. #include "update_global_data.hxx"
  675. bool ElfParser::update_global_data(const create_param_t* const create_param,
  676. uint8_t *data,
  677. uint32_t *data_size)
  678. {
  679. if(*data_size < sizeof(global_data_t))
  680. {
  681. *data_size = sizeof(global_data_t);
  682. return false;
  683. }
  684. do_update_global_data(create_param, (global_data_t *)data);
  685. *data_size = sizeof(global_data_t);
  686. return true;
  687. }
  688. sgx_status_t ElfParser::modify_info(enclave_diff_info_t *enclave_diff_info)
  689. {
  690. UNUSED(enclave_diff_info);
  691. return SGX_SUCCESS;
  692. }
  693. sgx_status_t ElfParser::get_info(enclave_diff_info_t *enclave_diff_info)
  694. {
  695. UNUSED(enclave_diff_info);
  696. return SGX_SUCCESS;
  697. }
  698. void ElfParser::get_executable_sections(vector<const char *>& xsec_names) const
  699. {
  700. xsec_names.clear();
  701. const ElfW(Ehdr) *elf_hdr = (const ElfW(Ehdr) *)m_start_addr;
  702. const ElfW(Shdr) *shdr = GET_PTR(ElfW(Shdr), elf_hdr, elf_hdr->e_shoff);
  703. const char *shstrtab = GET_PTR(char, elf_hdr, shdr[elf_hdr->e_shstrndx].sh_offset);
  704. for (unsigned idx = 0; idx < elf_hdr->e_shnum; ++idx, ++shdr)
  705. {
  706. if ((shdr->sh_flags & SHF_EXECINSTR) == SHF_EXECINSTR)
  707. xsec_names.push_back(shstrtab + shdr->sh_name);
  708. }
  709. return;
  710. }