elf_parser.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /*
  2. * Copyright (C) 2011-2018 Intel Corporation. All rights reserved.
  3. *
  4. * Redistribution and use in source and binary forms, with or without
  5. * modification, are permitted provided that the following conditions
  6. * are met:
  7. *
  8. * * Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * * Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in
  12. * the documentation and/or other materials provided with the
  13. * distribution.
  14. * * Neither the name of Intel Corporation nor the names of its
  15. * contributors may be used to endorse or promote products derived
  16. * from this software without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  19. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  20. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  21. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  22. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  23. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  24. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  25. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  26. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  28. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. *
  30. */
  31. /*
  32. * This file is part of trusted loader for tRTS.
  33. */
  34. #include "elf_parser.h"
  35. #include "rts.h"
  36. #include "util.h"
  37. #include "elf_util.h"
  38. #include "global_data.h"
  39. #include "../trts_emodpr.h"
  40. #include "trts_inst.h"
  41. static int elf_tls_aligned_virtual_size(const void *enclave_base,
  42. size_t *aligned_virtual_size);
  43. static ElfW(Phdr)* get_phdr(const ElfW(Ehdr)* ehdr)
  44. {
  45. if (ehdr == NULL)
  46. return NULL; /* Invalid image. */
  47. /* Check the ElfW Magic number. */
  48. if ((ehdr->e_ident[EI_MAG0] != ELFMAG0) ||
  49. (ehdr->e_ident[EI_MAG1] != ELFMAG1) ||
  50. (ehdr->e_ident[EI_MAG2] != ELFMAG2) ||
  51. (ehdr->e_ident[EI_MAG3] != ELFMAG3))
  52. return NULL;
  53. /* Enclave image should be a shared object file. */
  54. if (ehdr->e_type != ET_DYN)
  55. return NULL;
  56. return GET_PTR(ElfW(Phdr), ehdr, ehdr->e_phoff);
  57. }
  58. static ElfW(Sym)* get_sym(ElfW(Sym)* symtab, size_t idx)
  59. {
  60. if(STB_WEAK == ELFW(ST_BIND)(symtab[idx].st_info)
  61. && 0 == symtab[idx].st_value)
  62. {
  63. return NULL;
  64. }
  65. return &symtab[idx];
  66. }
  67. #ifdef __x86_64__
  68. /* Relocation for x64 (with addend) */
  69. static int do_relocs(const ElfW(Addr) enclave_base,
  70. ElfW(Addr) rela_offset,
  71. ElfW(Addr) sym_offset,
  72. size_t nr_relocs)
  73. {
  74. ElfW(Rela)* rela = GET_PTR(ElfW(Rela), enclave_base, rela_offset);
  75. ElfW(Sym)* symtab = GET_PTR(ElfW(Sym), enclave_base, sym_offset);
  76. ElfW(Sym)* sym;
  77. size_t i;
  78. size_t aligned_virtual_size = 0;
  79. for (i = 0; i < nr_relocs; ++i, ++rela)
  80. {
  81. ElfW(Addr)* reloc_addr = GET_PTR(ElfW(Addr), enclave_base, rela->r_offset);
  82. switch (ELF64_R_TYPE(rela->r_info))
  83. {
  84. case R_X86_64_RELATIVE:
  85. *reloc_addr = enclave_base + (uintptr_t)rela->r_addend;
  86. break;
  87. case R_X86_64_GLOB_DAT:
  88. case R_X86_64_JUMP_SLOT:
  89. case R_X86_64_64:
  90. sym = get_sym(symtab, ELF64_R_SYM(rela->r_info));
  91. if(!sym)
  92. break;
  93. *reloc_addr = enclave_base + sym->st_value + (uintptr_t)rela->r_addend;
  94. break;
  95. case R_X86_64_DTPMOD64:
  96. *reloc_addr = 1;
  97. break;
  98. case R_X86_64_DTPOFF64:
  99. sym = get_sym(symtab, ELF64_R_SYM(rela->r_info));
  100. if(!sym)
  101. break;
  102. *reloc_addr = sym->st_value + (uintptr_t)rela->r_addend;
  103. break;
  104. case R_X86_64_TPOFF64:
  105. sym = get_sym(symtab, ELF64_R_SYM(rela->r_info));
  106. if(!sym)
  107. break;
  108. if ((0 == elf_tls_aligned_virtual_size((void *)enclave_base, &aligned_virtual_size)) && (aligned_virtual_size))
  109. {
  110. *reloc_addr = sym->st_value + (uintptr_t)rela->r_addend - aligned_virtual_size;
  111. break;
  112. }
  113. else
  114. return -1;
  115. case R_X86_64_NONE:
  116. break;
  117. default: /* unsupported relocs */
  118. return -1;
  119. }
  120. }
  121. return 0;
  122. }
  123. #elif defined(__i386__)
  124. /* Relocation for x86 (without addend) */
  125. static int do_relocs(const ElfW(Addr) enclave_base,
  126. ElfW(Addr) rel_offset,
  127. ElfW(Addr) sym_offset,
  128. size_t nr_relocs)
  129. {
  130. ElfW(Rel)* rel = GET_PTR(ElfW(Rel), enclave_base, rel_offset);
  131. ElfW(Sym)* symtab = GET_PTR(ElfW(Sym), enclave_base, sym_offset);
  132. ElfW(Sym)* sym = NULL;
  133. size_t i;
  134. size_t aligned_virtual_size = 0;
  135. for (i = 0; i < nr_relocs; ++i, ++rel)
  136. {
  137. ElfW(Addr)* reloc_addr = GET_PTR(ElfW(Addr), enclave_base, rel->r_offset);
  138. if(R_386_RELATIVE == ELF32_R_TYPE(rel->r_info))
  139. {
  140. *reloc_addr += enclave_base; /* B+A */
  141. continue;
  142. }
  143. sym = get_sym(symtab, ELF32_R_SYM(rel->r_info));
  144. if(!sym) /* when the weak symbol is not implemented, sym is NULL */
  145. continue;
  146. switch (ELF32_R_TYPE(rel->r_info))
  147. {
  148. case R_386_GLOB_DAT:
  149. case R_386_JMP_SLOT: /* S */
  150. *reloc_addr = enclave_base + sym->st_value;
  151. break;
  152. case R_386_32: /* S+A */
  153. *reloc_addr += enclave_base + sym->st_value;
  154. break;
  155. case R_386_PC32: /* S+A-P */
  156. *reloc_addr += (enclave_base + sym->st_value - (ElfW(Addr))reloc_addr);
  157. break;
  158. case R_386_NONE:
  159. break;
  160. case R_386_TLS_DTPMOD32:
  161. *reloc_addr = 1;
  162. break;
  163. case R_386_TLS_DTPOFF32:
  164. *reloc_addr = sym->st_value;
  165. break;
  166. case R_386_TLS_TPOFF:
  167. if ((0 == elf_tls_aligned_virtual_size((void *)enclave_base, &aligned_virtual_size)) && (aligned_virtual_size))
  168. {
  169. *reloc_addr += sym->st_value - aligned_virtual_size;
  170. break;
  171. }
  172. else
  173. return -1;
  174. case R_386_TLS_TPOFF32:
  175. if ((0 == elf_tls_aligned_virtual_size((void *)enclave_base, &aligned_virtual_size)) && (aligned_virtual_size))
  176. {
  177. *reloc_addr += aligned_virtual_size - sym->st_value;
  178. break;
  179. }
  180. else
  181. return -1;
  182. default: /* unsupported relocs */
  183. return -1;
  184. }
  185. }
  186. return 0;
  187. }
  188. #endif
  189. #define DO_REL(base_addr, rel_offset, sym_offset, total_sz, rel_entry_sz) \
  190. do { \
  191. if (rel_offset) \
  192. { \
  193. size_t n; \
  194. if (rel_entry_sz == 0) \
  195. return -1; \
  196. n = total_sz/rel_entry_sz; \
  197. if (do_relocs((ElfW(Addr))base_addr, rel_offset, sym_offset, n)) \
  198. return -1; \
  199. } \
  200. } while (0)
  201. /* By default all symbol is linked as global symbol by link editor. When call global symbol,
  202. * we first call .plt entry. It should have problems if the call goloal symbol when relocation
  203. * is not done.
  204. * Declare relocate_enclave as .hidden is to make it local symbol.
  205. * Since this function is called before relocation is done, we must make
  206. * it local symbol, so the code is like "fce3: e8 98 12 00 00 call 10f80 <relocate_enclave>"
  207. * 0x9812=0x10f80-0xfce8
  208. */
  209. __attribute__ ((visibility ("hidden")))
  210. int relocate_enclave(void* enclave_base)
  211. {
  212. ElfW(Half) phnum = 0;
  213. ElfW(Ehdr) *ehdr = (ElfW(Ehdr)*)enclave_base;
  214. ElfW(Phdr) *phdr = get_phdr(ehdr);
  215. if (phdr == NULL)
  216. return -1; /* Invalid image. */
  217. for (; phnum < ehdr->e_phnum; phnum++, phdr++)
  218. {
  219. /* Search for dynamic segment */
  220. if (phdr->p_type == PT_DYNAMIC)
  221. {
  222. size_t count;
  223. size_t n_dyn = phdr->p_filesz/sizeof(ElfW(Dyn));
  224. ElfW(Dyn) *dyn = GET_PTR(ElfW(Dyn), ehdr, phdr->p_paddr);
  225. ElfW(Addr) sym_offset = 0;
  226. ElfW(Addr) rel_offset = 0;
  227. ElfW(Addr) plt_offset = 0;
  228. size_t rel_total_sz = 0;
  229. size_t rel_entry_sz = 0;
  230. size_t plt_total_sz = 0;
  231. for (count = 0; count < n_dyn; count++, dyn++)
  232. {
  233. if (dyn->d_tag == DT_NULL) /* End */
  234. break;
  235. switch (dyn->d_tag)
  236. {
  237. case DT_SYMTAB: /* symbol table */
  238. sym_offset = dyn->d_un.d_ptr;
  239. break;
  240. case RTS_DT_REL:/* Rel (x86) or Rela (x64) relocs */
  241. rel_offset = dyn->d_un.d_ptr;
  242. break;
  243. case RTS_DT_RELSZ:
  244. rel_total_sz = dyn->d_un.d_val;
  245. break;
  246. case RTS_DT_RELENT:
  247. rel_entry_sz = dyn->d_un.d_val;
  248. break;
  249. case DT_JMPREL: /* PLT relocs */
  250. plt_offset = dyn->d_un.d_ptr;
  251. break;
  252. case DT_PLTRELSZ:
  253. plt_total_sz = dyn->d_un.d_val;
  254. break;
  255. }
  256. }
  257. DO_REL(enclave_base, rel_offset, sym_offset, rel_total_sz, rel_entry_sz);
  258. DO_REL(enclave_base, plt_offset, sym_offset, plt_total_sz, rel_entry_sz);
  259. }
  260. }
  261. return 0;
  262. }
  263. int elf_tls_info(const void* enclave_base,
  264. uintptr_t *tls_addr, size_t *tdata_size)
  265. {
  266. ElfW(Half) phnum = 0;
  267. const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr)*)enclave_base;
  268. ElfW(Phdr) *phdr = get_phdr(ehdr);
  269. if (!tls_addr || !tdata_size)
  270. return -1;
  271. if (phdr == NULL)
  272. return -1; /* Invalid image. */
  273. /* Search for TLS segment */
  274. *tls_addr = 0;
  275. *tdata_size = 0;
  276. for (; phnum < ehdr->e_phnum; phnum++, phdr++)
  277. {
  278. if (phdr->p_type == PT_TLS)
  279. {
  280. /* tls_addr here is got from the program header, the address
  281. * need to be added by the enclave base.
  282. */
  283. *tls_addr = (size_t)enclave_base + phdr->p_vaddr;
  284. *tdata_size = phdr->p_filesz;
  285. break;
  286. }
  287. }
  288. return 0;
  289. }
  290. static int elf_tls_aligned_virtual_size(const void *enclave_base,
  291. size_t *aligned_virtual_size)
  292. {
  293. ElfW(Half) phnum = 0;
  294. const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr)*)enclave_base;
  295. ElfW(Phdr) *phdr = get_phdr(ehdr);
  296. size_t virtual_size =0, align = 0;
  297. if (phdr == NULL)
  298. return -1;
  299. if (!aligned_virtual_size)
  300. return -1;
  301. *aligned_virtual_size = 0;
  302. for (; phnum < ehdr->e_phnum; phnum++, phdr++)
  303. {
  304. if (phdr->p_type == PT_TLS)
  305. {
  306. virtual_size = phdr->p_memsz;
  307. align = phdr->p_align;
  308. /* p_align == 0 or p_align == 1 means no alignment is required */
  309. if (align == 0 || align == 1)
  310. *aligned_virtual_size = virtual_size;
  311. else
  312. *aligned_virtual_size = (virtual_size + align - 1) & (~(align - 1));
  313. break;
  314. }
  315. }
  316. return 0;
  317. }
  318. int elf_get_init_array(const void* enclave_base,
  319. uintptr_t *init_array_addr, size_t *init_array_size)
  320. {
  321. ElfW(Half) phnum = 0;
  322. const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr)*)enclave_base;
  323. ElfW(Phdr) *phdr = get_phdr(ehdr);
  324. if (!init_array_addr || !init_array_size)
  325. return -1;
  326. if (phdr == NULL)
  327. return -1; /* Invalid image. */
  328. *init_array_addr = 0;
  329. *init_array_size = 0;
  330. /* Search for Dynamic segment */
  331. for (; phnum < ehdr->e_phnum; phnum++, phdr++)
  332. {
  333. if (phdr->p_type == PT_DYNAMIC)
  334. {
  335. size_t count;
  336. size_t n_dyn = phdr->p_filesz/sizeof(ElfW(Dyn));
  337. ElfW(Dyn) *dyn = GET_PTR(ElfW(Dyn), ehdr, phdr->p_paddr);
  338. for (count = 0; count < n_dyn; count++, dyn++)
  339. {
  340. switch (dyn->d_tag)
  341. {
  342. case DT_INIT_ARRAY:
  343. *init_array_addr = dyn->d_un.d_ptr;
  344. break;
  345. case DT_INIT_ARRAYSZ:
  346. *init_array_size = dyn->d_un.d_val;
  347. break;
  348. }
  349. }
  350. }
  351. }
  352. return 0;
  353. }
  354. int elf_get_uninit_array(const void* enclave_base,
  355. uintptr_t *uninit_array_addr, size_t *uninit_array_size)
  356. {
  357. ElfW(Half) phnum = 0;
  358. const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr)*)enclave_base;
  359. ElfW(Phdr) *phdr = get_phdr(ehdr);
  360. if (!uninit_array_addr || !uninit_array_size)
  361. return -1;
  362. if (phdr == NULL)
  363. return -1; /* Invalid image. */
  364. *uninit_array_addr = 0;
  365. *uninit_array_size = 0;
  366. /* Search for Dynamic segment */
  367. for (; phnum < ehdr->e_phnum; phnum++, phdr++)
  368. {
  369. if (phdr->p_type == PT_DYNAMIC)
  370. {
  371. size_t count;
  372. size_t n_dyn = phdr->p_filesz/sizeof(ElfW(Dyn));
  373. ElfW(Dyn) *dyn = GET_PTR(ElfW(Dyn), ehdr, phdr->p_paddr);
  374. for (count = 0; count < n_dyn; count++, dyn++)
  375. {
  376. switch (dyn->d_tag)
  377. {
  378. case DT_FINI_ARRAY:
  379. *uninit_array_addr = dyn->d_un.d_ptr;
  380. break;
  381. case DT_FINI_ARRAYSZ:
  382. *uninit_array_size = dyn->d_un.d_val;
  383. break;
  384. }
  385. }
  386. }
  387. }
  388. return 0;
  389. }
  390. static int has_text_relo(const ElfW(Ehdr) *ehdr, const ElfW(Phdr) *phdr, ElfW(Half) phnum)
  391. {
  392. ElfW(Half) phi = 0;
  393. int text_relo = 0;
  394. for (; phi < phnum; phi++, phdr++)
  395. {
  396. if (phdr->p_type == PT_DYNAMIC)
  397. {
  398. size_t count;
  399. size_t n_dyn = phdr->p_filesz/sizeof(ElfW(Dyn));
  400. ElfW(Dyn) *dyn = GET_PTR(ElfW(Dyn), ehdr, phdr->p_paddr);
  401. for (count = 0; count < n_dyn; count++, dyn++)
  402. {
  403. if (dyn->d_tag == DT_NULL)
  404. break;
  405. if (dyn->d_tag == DT_TEXTREL)
  406. {
  407. text_relo = 1;
  408. break;
  409. }
  410. }
  411. break;
  412. }
  413. }
  414. return text_relo;
  415. }
  416. sgx_status_t change_protection(void *enclave_base)
  417. {
  418. ElfW(Half) phnum = 0;
  419. const ElfW(Ehdr) *ehdr = (const ElfW(Ehdr)*)enclave_base;
  420. const ElfW(Phdr) *phdr = get_phdr(ehdr);
  421. uint64_t perms;
  422. sgx_status_t status = SGX_ERROR_UNEXPECTED;
  423. if (phdr == NULL)
  424. return status;
  425. int text_relocation = has_text_relo(ehdr, phdr, ehdr->e_phnum);
  426. for (; phnum < ehdr->e_phnum; phnum++, phdr++)
  427. {
  428. if (text_relocation && (phdr->p_type == PT_LOAD) && ((phdr->p_flags & PF_W) == 0))
  429. {
  430. perms = 0;
  431. size_t start = (size_t)enclave_base + (phdr->p_vaddr & (size_t)(~(SE_PAGE_SIZE-1)));
  432. size_t end = (size_t)enclave_base + ((phdr->p_vaddr + phdr->p_memsz + SE_PAGE_SIZE - 1) & (size_t)(~(SE_PAGE_SIZE-1)));
  433. if (phdr->p_flags & PF_R)
  434. perms |= SI_FLAG_R;
  435. if (phdr->p_flags & PF_X)
  436. perms |= SI_FLAG_X;
  437. if((status = trts_mprotect(start, end - start, perms)) != SGX_SUCCESS)
  438. return status;
  439. }
  440. if (phdr->p_type == PT_GNU_RELRO)
  441. {
  442. size_t start = (size_t)enclave_base + (phdr->p_vaddr & (size_t)(~(SE_PAGE_SIZE-1)));
  443. size_t end = (size_t)enclave_base + ((phdr->p_vaddr + phdr->p_memsz + SE_PAGE_SIZE - 1) & (size_t)(~(SE_PAGE_SIZE-1)));
  444. if ((start != end) &&
  445. (status = trts_mprotect(start, end - start, SI_FLAG_R)) != SGX_SUCCESS)
  446. return status;
  447. }
  448. }
  449. return SGX_SUCCESS;
  450. }
  451. /* vim: set ts=4 sw=4 et cin: */