elfparser.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858
  1. /*
  2. * Copyright (C) 2011-2017 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. #include "elfparser.h"
  32. #include "cpputil.h"
  33. #include "se_trace.h"
  34. #include "se_memcpy.h"
  35. #include "global_data.h"
  36. namespace {
  37. /** the callback function to filter a section.
  38. *
  39. * @shstrtab: the section header string table
  40. * @shdr: the current section header to be examined
  41. * @user_data: user supplied data for the callback
  42. *
  43. * @return: true if current section header is what we are looking for.
  44. */
  45. typedef bool (* section_filter_f)(const char* shstrtab,
  46. const ElfW(Shdr)* shdr,
  47. const void* user_data);
  48. bool compare_section_name(const char* shstrtab,
  49. const ElfW(Shdr)* shdr,
  50. const void* user_data)
  51. {
  52. // `shstrtab + shdr->sh_name' is the section name.
  53. return (!strcmp(shstrtab + shdr->sh_name, (const char*)user_data));
  54. }
  55. bool compare_section_addr(const char* shstrtab,
  56. const ElfW(Shdr)* shdr,
  57. const void* user_data)
  58. {
  59. UNUSED(shstrtab);
  60. return (shdr->sh_addr == (ElfW(Addr))(size_t)user_data);
  61. }
  62. const ElfW(Shdr)* get_section(const ElfW(Ehdr) *elf_hdr,
  63. section_filter_f f,
  64. const void* user_data)
  65. {
  66. const ElfW(Shdr) *shdr = GET_PTR(ElfW(Shdr), elf_hdr, elf_hdr->e_shoff);
  67. assert(sizeof(ElfW(Shdr)) == elf_hdr->e_shentsize);
  68. // section header string table
  69. const char *shstrtab = GET_PTR(char, elf_hdr, shdr[elf_hdr->e_shstrndx].sh_offset);
  70. for (unsigned idx = 0; idx < elf_hdr->e_shnum; ++idx, ++shdr)
  71. {
  72. SE_TRACE(SE_TRACE_DEBUG, "section [%u] %s: sh_addr = %x, sh_size = %x, sh_offset = %x, sh_name = %x\n",
  73. idx, shstrtab + shdr->sh_name, shdr->sh_addr, shdr->sh_size, shdr->sh_offset, shdr->sh_name);
  74. if (f(shstrtab, shdr, user_data))
  75. return shdr;
  76. }
  77. return NULL;
  78. }
  79. const ElfW(Shdr)* get_section_by_name(const ElfW(Ehdr) *elf_hdr, const char *name)
  80. {
  81. return get_section(elf_hdr, compare_section_name, name);
  82. }
  83. const ElfW(Shdr)* get_section_by_addr(const ElfW(Ehdr) *elf_hdr, ElfW(Addr) start_addr)
  84. {
  85. return get_section(elf_hdr, compare_section_addr, (const void*)(size_t)start_addr);
  86. }
  87. template <typename T>
  88. const T* get_section_raw_data(const ElfW(Ehdr) *elf_hdr, ElfW(Addr) start_addr)
  89. {
  90. const ElfW(Shdr)* shdr = get_section_by_addr(elf_hdr, start_addr);
  91. if (shdr == NULL)
  92. return NULL;
  93. return GET_PTR(T, elf_hdr, shdr->sh_offset);
  94. }
  95. bool validate_elf_header(const ElfW(Ehdr) *elf_hdr)
  96. {
  97. // validate magic number
  98. if (memcmp(&elf_hdr->e_ident, ELFMAG, SELFMAG))
  99. return false;
  100. #if RTS_SYSTEM_WORDSIZE == 64
  101. if (ELFCLASS64 != elf_hdr->e_ident[EI_CLASS])
  102. return false;
  103. #else
  104. if (ELFCLASS32 != elf_hdr->e_ident[EI_CLASS])
  105. return false;
  106. #endif
  107. if (ELFDATA2LSB!= elf_hdr->e_ident[EI_DATA])
  108. return false;
  109. if (EV_CURRENT != elf_hdr->e_ident[EI_VERSION])
  110. return false;
  111. if (ET_DYN != elf_hdr->e_type)
  112. return false;
  113. if (sizeof(ElfW(Phdr)) != elf_hdr->e_phentsize)
  114. return false;
  115. return true;
  116. }
  117. bool parse_dyn(const ElfW(Ehdr) *elf_hdr, ElfW(Dyn)* dyn_info)
  118. {
  119. const ElfW(Phdr) *prg_hdr = GET_PTR(ElfW(Phdr), elf_hdr, elf_hdr->e_phoff);
  120. bool has_dyn = false;
  121. for (unsigned idx = 0; idx < elf_hdr->e_phnum; ++idx, ++prg_hdr)
  122. {
  123. if (PT_DYNAMIC == prg_hdr->p_type)
  124. {
  125. const ElfW(Dyn) *dyn_entry = GET_PTR(ElfW(Dyn), elf_hdr, prg_hdr->p_offset);
  126. // parse dynamic segment
  127. // An entry with a DT_NULL tag marks the end.
  128. while (dyn_entry->d_tag != DT_NULL)
  129. {
  130. SE_TRACE(SE_TRACE_DEBUG, "dynamic tag = %x, ptr = %x\n", dyn_entry->d_tag, dyn_entry->d_un.d_ptr);
  131. if (dyn_entry->d_tag < DT_NUM)
  132. {
  133. memcpy_s(&dyn_info[dyn_entry->d_tag], sizeof(ElfW(Dyn)), dyn_entry, sizeof(ElfW(Dyn)));
  134. }
  135. else if (dyn_entry->d_tag > DT_ADDRRNGLO && dyn_entry->d_tag <= DT_ADDRRNGHI)
  136. {
  137. memcpy_s(&dyn_info[DT_ADDRTAGIDX(dyn_entry->d_tag) + DT_NUM], sizeof(ElfW(Dyn)), dyn_entry, sizeof(ElfW(Dyn)));
  138. }
  139. dyn_entry++;
  140. has_dyn = true;
  141. }
  142. return has_dyn;
  143. }
  144. }
  145. return false;
  146. }
  147. /** Check whether there are undefined symbols and save the address
  148. * for a few reserved symbols.
  149. *
  150. * ELF format defined two symbol tables, `.symtab' and `.dynsym'.
  151. *
  152. * `.symtab' is non-allocable, and might be stripped.
  153. * `.dynsym' is allocable, and only contains global symbols.
  154. *
  155. * We only need to search `.dynsym' for undefined symbols.
  156. */
  157. bool check_symbol_table(const ElfW(Ehdr) *elf_hdr, const ElfW(Dyn) *dyn_info,
  158. map<string, uint64_t>& sym_table)
  159. {
  160. const ElfW(Shdr) *sh_symtab = get_section_by_addr(elf_hdr, dyn_info[DT_SYMTAB].d_un.d_ptr);
  161. if (sh_symtab == NULL)
  162. {
  163. // We must at least have "enclave_entry"
  164. SE_TRACE(SE_TRACE_WARNING, "There is no .dynsym section");
  165. return false;
  166. }
  167. if (sh_symtab->sh_entsize == 0)
  168. {
  169. SE_TRACE(SE_TRACE_WARNING, "In section .dynsym, sh_entsize is 0.");
  170. return false;
  171. }
  172. const ElfW(Sym) *symtab = GET_PTR(ElfW(Sym), elf_hdr, sh_symtab->sh_offset);
  173. uint32_t sym_num = (uint32_t)(sh_symtab->sh_size/sh_symtab->sh_entsize);
  174. const char *strtab = get_section_raw_data<char>(elf_hdr, dyn_info[DT_STRTAB].d_un.d_ptr);
  175. // We only store "enclave_entry", "g_global_data_sim" and "g_peak_heap_used".
  176. // To export new symbols, add them here.
  177. //
  178. // "g_global_data_sim" is needed so that we can check that whether
  179. // an simulated enclave is given when running an HW loader.
  180. const char* str[] = { "enclave_entry", "g_global_data_sim", "g_peak_heap_used", "g_global_data" };
  181. // The first entry is reserved, and must be all zeros
  182. for (uint32_t idx = 1; idx < sym_num; ++idx)
  183. {
  184. // st_name == 0 indicates the symble table entry has no name.
  185. if (symtab[idx].st_name == 0) continue;
  186. const char* sym = strtab + symtab[idx].st_name;
  187. if (sym == NULL)
  188. {
  189. SE_TRACE(SE_TRACE_WARNING, "Malformed enclave with NULL symbol name\n");
  190. return false;
  191. }
  192. if (SHN_UNDEF == symtab[idx].st_shndx
  193. && STB_WEAK != ELFW(ST_BIND)(symtab[idx].st_info))
  194. {
  195. SE_TRACE(SE_TRACE_WARNING, "symbol '%s' is undefined\n", sym);
  196. return false;
  197. }
  198. #define SYMBOL_NUM (ARRAY_LENGTH(str))
  199. for (size_t i = 0; i < SYMBOL_NUM; ++i)
  200. {
  201. if (0 == strcmp(str[i], sym))
  202. {
  203. sym_table[sym] = (uint64_t)symtab[idx].st_value;
  204. }
  205. }
  206. }
  207. // If the enclave if compiled/linked with -fpie/-pie, and setting the
  208. // enclave entry to `enclave_entry', the `st_name' for `enclave_entry'
  209. // will be 0 in `.dynsym'.
  210. map<string, uint64_t>::const_iterator it = sym_table.find("enclave_entry");
  211. if (it == sym_table.end())
  212. {
  213. sym_table["enclave_entry"] = (uint64_t)elf_hdr->e_entry;
  214. }
  215. return true;
  216. }
  217. bool do_validate_reltab(const ElfW(Rel) *reltab, size_t nr_rel)
  218. {
  219. if (reltab == NULL && nr_rel != 0) return false;
  220. #if RTS_SYSTEM_WORDSIZE == 64
  221. const ElfW(Rel) *rela = reltab;
  222. for (unsigned idx = 0; idx < nr_rel; idx++, rela++)
  223. {
  224. switch (ELF64_R_TYPE(rela->r_info))
  225. {
  226. case R_X86_64_RELATIVE:
  227. break;
  228. case R_X86_64_GLOB_DAT:
  229. case R_X86_64_JUMP_SLOT:
  230. case R_X86_64_64:
  231. break;
  232. case R_X86_64_NONE:
  233. break;
  234. case R_X86_64_DTPMOD64:
  235. case R_X86_64_DTPOFF64:
  236. case R_X86_64_TPOFF64:
  237. break;
  238. #else
  239. const ElfW(Rel) *rel = reltab;
  240. for (unsigned idx = 0; idx < nr_rel; idx++, rel++)
  241. {
  242. switch (ELF32_R_TYPE(rel->r_info))
  243. {
  244. case R_386_RELATIVE: /* B+A */
  245. break;
  246. case R_386_GLOB_DAT:
  247. case R_386_JMP_SLOT: /* S */
  248. break;
  249. case R_386_32: /* S+A */
  250. break;
  251. case R_386_PC32: /* S+A-P */
  252. break;
  253. case R_386_NONE:
  254. break;
  255. case R_386_TLS_DTPMOD32:
  256. break;
  257. case R_386_TLS_DTPOFF32:
  258. break;
  259. case R_386_TLS_TPOFF:
  260. break;
  261. case R_386_TLS_TPOFF32:
  262. break;
  263. #endif
  264. default: /* unsupported relocs */
  265. SE_TRACE(SE_TRACE_WARNING, "unsupported relocation type detected\n");
  266. return false;
  267. }
  268. }
  269. return true;
  270. }
  271. bool validate_reltabs(const ElfW(Ehdr) *elf_hdr, const ElfW(Dyn) *dyn_info)
  272. {
  273. #if RTS_SYSTEM_WORDSIZE == 64
  274. // The relocation struct must be rela on x64.
  275. if (dyn_info[DT_REL].d_un.d_ptr)
  276. {
  277. SE_TRACE(SE_TRACE_WARNING, "Rel struct detected on x64\n");
  278. return false;
  279. }
  280. #else
  281. // The relocation struct must be rel on x86.
  282. if (dyn_info[DT_RELA].d_un.d_ptr)
  283. {
  284. SE_TRACE(SE_TRACE_WARNING, "Rela struct detected on x86\n");
  285. return false;
  286. }
  287. #endif
  288. const ElfW(Rel) *reltab = get_section_raw_data<ElfW(Rel)>(elf_hdr, dyn_info[RTS_DT_REL].d_un.d_ptr);
  289. const ElfW(Word) reltab_sz = (ElfW(Word))dyn_info[RTS_DT_RELSZ].d_un.d_val;
  290. const ElfW(Rel) *jmpreltab = get_section_raw_data<ElfW(Rel)>(elf_hdr, dyn_info[DT_JMPREL].d_un.d_ptr);
  291. const ElfW(Word) jmpreltab_sz = (ElfW(Word))dyn_info[DT_PLTRELSZ].d_un.d_val;
  292. return (do_validate_reltab(reltab, reltab_sz / sizeof(ElfW(Rel)))
  293. && do_validate_reltab(jmpreltab, jmpreltab_sz / sizeof(ElfW(Rel))));
  294. }
  295. bool has_ctor_section(const ElfW(Ehdr) *elf_hdr)
  296. {
  297. const ElfW(Shdr) *shdr = get_section_by_name(elf_hdr, ".ctors");
  298. if (NULL == shdr) return false;
  299. se_trace(SE_TRACE_ERROR, "ERROR: .ctors section is found, global initializers will not be invoked correctly!\n");
  300. return true;
  301. }
  302. inline bool is_tls_segment(const ElfW(Phdr)* prg_hdr)
  303. {
  304. return (PT_TLS == prg_hdr->p_type);
  305. }
  306. bool get_meta_property(const uint8_t *start_addr, const ElfW(Ehdr) *elf_hdr, uint64_t &meta_offset, uint64_t &meta_block_size)
  307. {
  308. const ElfW(Shdr)* shdr = get_section_by_name(elf_hdr, ".note.sgxmeta");
  309. if (shdr == NULL)
  310. {
  311. se_trace(SE_TRACE_ERROR, "ERROR: The enclave image should have '.note.sgxmeta' section\n");
  312. return false;
  313. }
  314. /* We require that enclaves should have .note.sgxmeta section to store the metadata information
  315. * We limit this section is used for metadata only and ISV should not extend this section.
  316. *
  317. * .note.sgxmeta layout:
  318. *
  319. * | namesz |
  320. * | metadata size |
  321. * | type |
  322. * | name |
  323. * | metadata |
  324. */
  325. const ElfW(Note) *note = GET_PTR(ElfW(Note), start_addr, shdr->sh_offset);
  326. assert(note != NULL);
  327. if (shdr->sh_size != ROUND_TO(sizeof(ElfW(Note)) + note->namesz + note->descsz, shdr->sh_addralign))
  328. {
  329. se_trace(SE_TRACE_ERROR, "ERROR: The '.note.sgxmeta' section size is not correct.\n");
  330. return false;
  331. }
  332. if (memcmp(GET_PTR(void, start_addr, shdr->sh_offset + sizeof(ElfW(Note))), "sgx_metadata", note->namesz))
  333. {
  334. se_trace(SE_TRACE_ERROR, "ERROR: The note in the '.note.sgxmeta' section must be named as \"sgx_metadata\"\n");
  335. return false;
  336. }
  337. meta_offset = static_cast<uint64_t>(shdr->sh_offset + sizeof(ElfW(Note)) + note->namesz);
  338. meta_block_size = note->descsz;
  339. return true;
  340. }
  341. bool validate_segment(const ElfW(Ehdr) *elf_hdr, uint64_t len)
  342. {
  343. const ElfW(Phdr) *prg_hdr = GET_PTR(ElfW(Phdr), elf_hdr, elf_hdr->e_phoff);
  344. assert(sizeof(ElfW(Phdr)) == elf_hdr->e_phentsize);
  345. std::vector< std::pair<ElfW(Addr), ElfW(Addr)> > load_seg(elf_hdr->e_phnum, std::make_pair(0, 0));
  346. int k = 0;
  347. for (int idx = 0; idx < elf_hdr->e_phnum; idx++, prg_hdr++)
  348. {
  349. /* Validate the size of the buffer */
  350. if (len < (uint64_t)prg_hdr->p_offset + prg_hdr->p_filesz)
  351. return false;
  352. if (PT_LOAD == prg_hdr->p_type)
  353. {
  354. // The default align is max page size. On x86-64, the max page size is 2M, but EPC page size is 4K,
  355. // so in x86-64, we just treat it as EPC page size. The (2M - 4K) size is not eadded. We leave it
  356. // as a hole.
  357. if (!IS_PAGE_ALIGNED(prg_hdr->p_align))
  358. {
  359. SE_TRACE(SE_TRACE_WARNING, "A segment is not PAGE aligned, alignment = %x\n", prg_hdr->p_align);
  360. return false;
  361. }
  362. // Verify the overlap of segment. we don't verify here, because a well compiled file has no overlapped segment.
  363. load_seg[k].first = prg_hdr->p_vaddr;
  364. load_seg[k].second = prg_hdr->p_vaddr + ROUND_TO(prg_hdr->p_memsz, prg_hdr->p_align) - 1;
  365. for (int j = 0; j < k; j++)
  366. {
  367. if (is_overlap(load_seg[k], load_seg[j]))
  368. {
  369. SE_TRACE(SE_TRACE_WARNING, "there is overlap segment [%x : %x] [%x : %x]\n",
  370. load_seg[k].first, load_seg[k].second, load_seg[j].first, load_seg[j].second);
  371. return false;
  372. }
  373. }
  374. k++;
  375. }
  376. }
  377. return true;
  378. }
  379. bool get_bin_fmt(const ElfW(Ehdr) *elf_hdr, bin_fmt_t& bf)
  380. {
  381. switch(elf_hdr->e_machine)
  382. {
  383. #if RTS_SYSTEM_WORDSIZE == 32
  384. case EM_386:
  385. bf = BF_ELF32;
  386. return true;
  387. #endif
  388. #if RTS_SYSTEM_WORDSIZE == 64
  389. case EM_X86_64:
  390. bf = BF_ELF64;
  391. return true;
  392. #endif
  393. }
  394. return false;
  395. }
  396. si_flags_t page_attr_to_si_flags(uint32_t page_attr)
  397. {
  398. si_flags_t res = SI_FLAG_REG;
  399. if (page_attr & PF_R)
  400. res |= SI_FLAG_R;
  401. if (page_attr & PF_W)
  402. res |= SI_FLAG_W;
  403. if (page_attr & PF_X)
  404. res |= SI_FLAG_X;
  405. return res;
  406. }
  407. Section* build_section(const uint8_t* raw_data, uint64_t size, uint64_t virtual_size,
  408. uint64_t rva, uint32_t page_attr)
  409. {
  410. si_flags_t sf = page_attr_to_si_flags(page_attr);
  411. if (sf != SI_FLAG_REG)
  412. return new Section(raw_data, size, virtual_size, rva, sf);
  413. return NULL;
  414. }
  415. bool build_regular_sections(const uint8_t* start_addr,
  416. vector<Section *>& sections,
  417. const Section*& tls_sec,
  418. uint64_t& metadata_offset,
  419. uint64_t& metadata_block_size)
  420. {
  421. const ElfW(Ehdr) *elf_hdr = (const ElfW(Ehdr) *)start_addr;
  422. const ElfW(Phdr) *prg_hdr = GET_PTR(ElfW(Phdr), start_addr, elf_hdr->e_phoff);
  423. uint64_t virtual_size = 0, alignment = 0, aligned_virtual_size = 0;
  424. if (get_meta_property(start_addr, elf_hdr, metadata_offset, metadata_block_size) == false)
  425. return false;
  426. for (unsigned idx = 0; idx < elf_hdr->e_phnum; ++idx, ++prg_hdr)
  427. {
  428. Section* sec = NULL;
  429. switch (prg_hdr->p_type)
  430. {
  431. case PT_LOAD:
  432. sec = build_section(GET_PTR(uint8_t, start_addr, prg_hdr->p_offset),
  433. (uint64_t)prg_hdr->p_filesz, (uint64_t)prg_hdr->p_memsz,
  434. (uint64_t)prg_hdr->p_vaddr, (uint32_t) prg_hdr->p_flags);
  435. break;
  436. case PT_TLS:
  437. virtual_size = (uint64_t)prg_hdr->p_memsz;
  438. alignment = (uint64_t)prg_hdr->p_align;
  439. /* according to ELF spec, alignment equals zero or one means no align requirement */
  440. if (alignment == 0 || alignment == 1)
  441. aligned_virtual_size = virtual_size;
  442. else
  443. aligned_virtual_size = (virtual_size + alignment - 1) & (~(alignment - 1));
  444. sec = build_section(GET_PTR(uint8_t, start_addr, prg_hdr->p_offset),
  445. (uint64_t)prg_hdr->p_filesz, aligned_virtual_size,
  446. (uint64_t)prg_hdr->p_vaddr, (uint32_t) prg_hdr->p_flags);
  447. break;
  448. default:
  449. continue;
  450. }
  451. if (sec == NULL)
  452. return false;
  453. /* We've filtered segments that are not of PT_LOAD or PT_TLS type. */
  454. if (!is_tls_segment(prg_hdr))
  455. {
  456. /* A PT_LOAD segment. */
  457. sections.push_back(sec);
  458. continue;
  459. }
  460. /* It is a TLS segment. */
  461. tls_sec = sec;
  462. }
  463. return true;
  464. }
  465. const Section* get_max_rva_section(const vector<Section*> sections)
  466. {
  467. size_t sec_size = sections.size();
  468. if (sec_size == 0)
  469. return NULL;
  470. const Section* psec = sections[0];
  471. for (size_t idx = 1; idx < sec_size; ++idx)
  472. {
  473. if (sections[idx]->get_rva() > psec->get_rva())
  474. psec = sections[idx];
  475. }
  476. return psec;
  477. }
  478. }
  479. ElfParser::ElfParser (const uint8_t* start_addr, uint64_t len)
  480. :m_start_addr(start_addr), m_len(len), m_bin_fmt(BF_UNKNOWN),
  481. m_tls_section(NULL), m_metadata_offset(0), m_metadata_block_size(0)
  482. {
  483. memset(&m_dyn_info, 0, sizeof(m_dyn_info));
  484. }
  485. sgx_status_t ElfParser::run_parser()
  486. {
  487. /* We only need to run the parser once. */
  488. if (m_sections.size() != 0) return SGX_SUCCESS;
  489. const ElfW(Ehdr) *elf_hdr = (const ElfW(Ehdr) *)m_start_addr;
  490. if (elf_hdr == NULL || m_len < sizeof(ElfW(Ehdr)))
  491. return SGX_ERROR_INVALID_ENCLAVE;
  492. /* Check elf header*/
  493. if (!validate_elf_header(elf_hdr))
  494. return SGX_ERROR_INVALID_ENCLAVE;
  495. /* Get and check machine mode */
  496. if (!get_bin_fmt(elf_hdr, m_bin_fmt))
  497. return SGX_ERROR_MODE_INCOMPATIBLE;
  498. /* Check if there is any overlap segment, and make sure the segment is 1 page aligned;
  499. * TLS segment must exist.
  500. */
  501. if (!validate_segment(elf_hdr, m_len))
  502. return SGX_ERROR_INVALID_ENCLAVE;
  503. if (!parse_dyn(elf_hdr, &m_dyn_info[0]))
  504. return SGX_ERROR_INVALID_ENCLAVE;
  505. /* Check if there is any undefined symbol */
  506. if (!check_symbol_table(elf_hdr, m_dyn_info, m_sym_table))
  507. {
  508. return SGX_ERROR_UNDEFINED_SYMBOL;
  509. }
  510. /* Check if there is unexpected relocation type */
  511. if (!validate_reltabs(elf_hdr, m_dyn_info))
  512. return SGX_ERROR_INVALID_ENCLAVE;
  513. /* Check if there is .ctor section */
  514. if (has_ctor_section(elf_hdr))
  515. return SGX_ERROR_INVALID_ENCLAVE;
  516. /* build regular sections */
  517. if (build_regular_sections(m_start_addr, m_sections, m_tls_section, m_metadata_offset, m_metadata_block_size))
  518. return SGX_SUCCESS;
  519. else
  520. return SGX_ERROR_INVALID_ENCLAVE;
  521. }
  522. ElfParser::~ElfParser()
  523. {
  524. delete_ptrs_from_container(m_sections);
  525. if (m_tls_section) delete m_tls_section;
  526. }
  527. bin_fmt_t ElfParser::get_bin_format() const
  528. {
  529. return m_bin_fmt;
  530. }
  531. uint64_t ElfParser::get_enclave_max_size() const
  532. {
  533. if(m_bin_fmt == BF_ELF64)
  534. return ENCLAVE_MAX_SIZE_64;
  535. else
  536. return ENCLAVE_MAX_SIZE_32;
  537. }
  538. uint64_t ElfParser::get_metadata_offset() const
  539. {
  540. return m_metadata_offset;
  541. }
  542. uint64_t ElfParser::get_metadata_block_size() const
  543. {
  544. return m_metadata_block_size;
  545. }
  546. const uint8_t* ElfParser::get_start_addr() const
  547. {
  548. return m_start_addr;
  549. }
  550. const vector<Section *>& ElfParser::get_sections() const
  551. {
  552. return m_sections;
  553. }
  554. const Section* ElfParser::get_tls_section() const
  555. {
  556. return m_tls_section;
  557. }
  558. uint64_t ElfParser::get_symbol_rva(const char* name) const
  559. {
  560. map<string, uint64_t>::const_iterator it = m_sym_table.find(name);
  561. if (it != m_sym_table.end())
  562. return it->second;
  563. else
  564. return 0;
  565. }
  566. bool ElfParser::get_reloc_bitmap(vector<uint8_t>& bitmap)
  567. {
  568. // Clear the `bitmap' so that it is in a known state
  569. bitmap.clear();
  570. if (!m_dyn_info[DT_TEXTREL].d_tag)
  571. return true;
  572. const ElfW(Ehdr) *elf_hdr = (const ElfW(Ehdr) *)m_start_addr;
  573. const ElfW(Rel) *rel[4] = { NULL, NULL, NULL, NULL };
  574. if (m_dyn_info[DT_JMPREL].d_tag)
  575. {
  576. rel[2] = get_section_raw_data<ElfW(Rel)>(elf_hdr, m_dyn_info[DT_JMPREL].d_un.d_ptr);
  577. rel[3] = GET_PTR(const ElfW(Rel), rel[2], m_dyn_info[DT_PLTRELSZ].d_un.d_val);
  578. }
  579. if (m_dyn_info[RTS_DT_REL].d_tag)
  580. {
  581. rel[0] = get_section_raw_data<ElfW(Rel)>(elf_hdr, m_dyn_info[RTS_DT_REL].d_un.d_ptr);
  582. rel[1] = GET_PTR(const ElfW(Rel), rel[0], m_dyn_info[RTS_DT_RELSZ].d_un.d_val);
  583. assert(sizeof(ElfW(Rel)) == m_dyn_info[RTS_DT_RELENT].d_un.d_val);
  584. }
  585. // The enclave size mapped in memory is calculated by
  586. // sec->get_rva() + sec->virtual_size();
  587. // where the `sec' is the section with maximum RVA value.
  588. uint64_t image_size = 0;
  589. const Section* max_rva_sec = get_max_rva_section(this->m_sections);
  590. if (max_rva_sec == NULL)
  591. return false;
  592. image_size = max_rva_sec->get_rva() + max_rva_sec->virtual_size();
  593. // NOTE:
  594. // Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
  595. // In the future, if the max enclave size is extended to beyond 1<<49, this type-casting will not work.
  596. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
  597. // allocate bitmap
  598. bitmap.resize((size_t)((((image_size + (SE_PAGE_SIZE - 1)) >> SE_PAGE_SHIFT) + 7) / 8));
  599. for (unsigned idx = 0; idx < ARRAY_LENGTH(rel); idx += 2)
  600. {
  601. const ElfW(Rel) *rel_entry = rel[idx], *rel_end = rel[idx+1];
  602. if (NULL == rel_entry)
  603. continue;
  604. for (; rel_entry < rel_end; rel_entry++)
  605. {
  606. #if RTS_SYSTEM_WORDSIZE == 64
  607. if (ELF64_R_TYPE(rel_entry->r_info) == R_X86_64_NONE)
  608. #else
  609. if (ELF32_R_TYPE(rel_entry->r_info) == R_386_NONE)
  610. #endif
  611. continue;
  612. ElfW(Addr) reloc_addr = rel_entry->r_offset;
  613. uint64_t page_frame = (uint64_t)(reloc_addr >> SE_PAGE_SHIFT);
  614. // NOTE:
  615. // Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK.
  616. // In the future, if the max enclave size is extended to beyond 1<<49, this type-casting will not work.
  617. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves)
  618. // If there is more than one relocation in one page, then "|" works as there
  619. // is only one relocation in one page.
  620. bitmap[(size_t)(page_frame/8)] = (uint8_t)(bitmap[(size_t)(page_frame/8)] | (uint8_t)(1 << (page_frame % 8)));
  621. // Check if the relocation across boundary
  622. if ((reloc_addr & (SE_PAGE_SIZE - 1)) > (SE_PAGE_SIZE - sizeof(sys_word_t)))
  623. {
  624. page_frame++;
  625. bitmap[(size_t)(page_frame/8)] = (uint8_t)(bitmap[(size_t)(page_frame/8)] | (uint8_t)(1 << (page_frame % 8)));
  626. }
  627. }
  628. }
  629. return true;
  630. }
  631. void ElfParser::get_reloc_entry_offset(const char* sec_name, vector<uint64_t>& offsets)
  632. {
  633. if (sec_name == NULL)
  634. return;
  635. const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr) *)m_start_addr;
  636. const ElfW(Shdr) *shdr = get_section_by_name(ehdr, sec_name);
  637. if (shdr == NULL)
  638. return;
  639. /* find the start and end offset of the target section */
  640. const uint64_t start = shdr->sh_addr;
  641. const uint64_t end = start + shdr->sh_size;
  642. offsets.clear();
  643. SE_TRACE(SE_TRACE_DEBUG, "found section '%s' - offset %#lx, size %#lx\n",
  644. sec_name, (long)start, (long)shdr->sh_size);
  645. /* iterate sections to find the relocs */
  646. shdr = GET_PTR(ElfW(Shdr), m_start_addr, ehdr->e_shoff);
  647. for (unsigned idx = 0; idx < ehdr->e_shnum; ++idx, ++shdr)
  648. {
  649. if (shdr->sh_type != SHT_RELA &&
  650. shdr->sh_type != SHT_REL)
  651. continue;
  652. uint64_t rel_size = shdr->sh_size;
  653. uint64_t rel_offset = shdr->sh_offset;
  654. uint64_t nr_rel = rel_size / shdr->sh_entsize;
  655. /* for each reloc, check its target address */
  656. const ElfW(Rel) *rel = GET_PTR(ElfW(Rel), m_start_addr, rel_offset);
  657. for (; nr_rel > 0; --nr_rel, ++rel)
  658. {
  659. if (rel->r_offset >= start && rel->r_offset < end)
  660. {
  661. uint64_t offset = DIFF64(rel, m_start_addr);
  662. SE_TRACE(SE_TRACE_DEBUG, "found one reloc at offset %#lx\n", offset);
  663. offsets.push_back(offset);
  664. }
  665. }
  666. }
  667. }
  668. #include "update_global_data.hxx"
  669. bool ElfParser::update_global_data(const create_param_t* const create_param,
  670. uint8_t *data,
  671. uint32_t *data_size)
  672. {
  673. if(*data_size < sizeof(global_data_t))
  674. {
  675. *data_size = sizeof(global_data_t);
  676. return false;
  677. }
  678. do_update_global_data(create_param, (global_data_t *)data);
  679. *data_size = sizeof(global_data_t);
  680. return true;
  681. }
  682. sgx_status_t ElfParser::modify_info(enclave_diff_info_t *enclave_diff_info)
  683. {
  684. UNUSED(enclave_diff_info);
  685. return SGX_SUCCESS;
  686. }
  687. sgx_status_t ElfParser::get_info(enclave_diff_info_t *enclave_diff_info)
  688. {
  689. UNUSED(enclave_diff_info);
  690. return SGX_SUCCESS;
  691. }
  692. void ElfParser::get_executable_sections(vector<const char *>& xsec_names) const
  693. {
  694. xsec_names.clear();
  695. const ElfW(Ehdr) *elf_hdr = (const ElfW(Ehdr) *)m_start_addr;
  696. const ElfW(Shdr) *shdr = GET_PTR(ElfW(Shdr), elf_hdr, elf_hdr->e_shoff);
  697. const char *shstrtab = GET_PTR(char, elf_hdr, shdr[elf_hdr->e_shstrndx].sh_offset);
  698. for (unsigned idx = 0; idx < elf_hdr->e_shnum; ++idx, ++shdr)
  699. {
  700. if ((shdr->sh_flags & SHF_EXECINSTR) == SHF_EXECINSTR)
  701. xsec_names.push_back(shstrtab + shdr->sh_name);
  702. }
  703. return;
  704. }