db_rtld.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * db_rtld.c
  17. *
  18. * This file contains utilities to load ELF binaries into the memory
  19. * and link them against each other.
  20. * The source code in this file is imported and modified from the GNU C
  21. * Library.
  22. */
  23. #include "pal_defs.h"
  24. #include "pal.h"
  25. #include "pal_internal.h"
  26. #include "pal_debug.h"
  27. #include "pal_error.h"
  28. #include "pal_rtld.h"
  29. #include "api.h"
  30. #include <sysdeps/generic/ldsodefs.h>
  31. #include <elf/elf.h>
  32. #include <bits/dlfcn.h>
  33. struct link_map * loaded_maps = NULL;
  34. struct link_map * exec_map = NULL;
  35. struct link_map * lookup_symbol (const char *undef_name, ElfW(Sym) **ref);
  36. #ifdef assert
  37. /* This function can be used as a breakpoint to debug assertion */
  38. void __attribute_noinline __assert (void)
  39. {
  40. BREAK();
  41. }
  42. #endif
  43. /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code. */
  44. static struct link_map * resolve_map (const char **strtab, ElfW(Sym) ** ref)
  45. {
  46. if (ELFW(ST_BIND) ((*ref)->st_info) != STB_LOCAL) {
  47. struct link_map * l = lookup_symbol((*strtab) + (*ref)->st_name, ref);
  48. if (l) {
  49. *strtab = (const void *) D_PTR (l->l_info[DT_STRTAB]);
  50. return l;
  51. }
  52. }
  53. return 0;
  54. }
  55. extern ElfW(Addr) resolve_rtld (const char * sym_name);
  56. #define RESOLVE_RTLD(sym_name) resolve_rtld(sym_name)
  57. #define RESOLVE_MAP(strtab, ref) resolve_map(strtab, ref)
  58. #include "dynamic_link.h"
  59. #include "dl-machine-x86_64.h"
  60. /* Allocate a `struct link_map' for a new object being loaded,
  61. and enter it into the _dl_loaded list. */
  62. struct link_map *
  63. new_elf_object (const char * realname, enum object_type type)
  64. {
  65. struct link_map *new;
  66. new = (struct link_map *) malloc(sizeof (struct link_map));
  67. if (new == NULL)
  68. return NULL;
  69. /* We apparently expect this to be zeroed. */
  70. memset(new, 0, sizeof(struct link_map));
  71. new->l_name = realname ?
  72. remalloc(realname, strlen(realname) + 1) :
  73. NULL;
  74. new->l_type = type;
  75. return new;
  76. }
  77. /* Cache the location of MAP's hash table. */
  78. void setup_elf_hash (struct link_map *map)
  79. {
  80. Elf_Symndx * hash;
  81. if (__builtin_expect (map->l_info[DT_ADDRTAGIDX (DT_GNU_HASH) + DT_NUM
  82. + DT_THISPROCNUM + DT_VERSIONTAGNUM
  83. + DT_EXTRANUM + DT_VALNUM] != NULL, 1)) {
  84. Elf32_Word *hash32
  85. = (void *) D_PTR (map->l_info[DT_ADDRTAGIDX (DT_GNU_HASH) + DT_NUM
  86. + DT_THISPROCNUM + DT_VERSIONTAGNUM
  87. + DT_EXTRANUM + DT_VALNUM]);
  88. map->l_nbuckets = *hash32++;
  89. Elf32_Word symbias = *hash32++;
  90. Elf32_Word bitmask_nwords = *hash32++;
  91. /* Must be a power of two. */
  92. assert ((bitmask_nwords & (bitmask_nwords - 1)) == 0);
  93. map->l_gnu_bitmask_idxbits = bitmask_nwords - 1;
  94. map->l_gnu_shift = *hash32++;
  95. map->l_gnu_bitmask = (ElfW(Addr) *) hash32;
  96. hash32 += __ELF_NATIVE_CLASS / 32 * bitmask_nwords;
  97. map->l_gnu_buckets = hash32;
  98. hash32 += map->l_nbuckets;
  99. map->l_gnu_chain_zero = hash32 - symbias;
  100. return;
  101. }
  102. if (!map->l_info[DT_HASH])
  103. return;
  104. hash = (void *) D_PTR (map->l_info[DT_HASH]);
  105. /* Structure of DT_HASH:
  106. The bucket array forms the hast table itself. The entries in the
  107. chain array parallel the symbol table.
  108. [ nbucket ]
  109. [ nchain ]
  110. [ bucket[0] ]
  111. [ ... ]
  112. [ bucket[nbucket-1] ]
  113. [ chain[0] ]
  114. [ ... ]
  115. [ chain[nchain-1] ] */
  116. map->l_nbuckets = *hash++;
  117. hash++;
  118. map->l_buckets = hash;
  119. hash += map->l_nbuckets;
  120. map->l_chain = hash;
  121. }
  122. /* Map in the shared object NAME, actually located in REALNAME, and already
  123. opened on FD */
  124. struct link_map *
  125. map_elf_object_by_handle (PAL_HANDLE handle, enum object_type type,
  126. void * fbp, int fbp_len,
  127. bool do_copy_dyn)
  128. {
  129. struct link_map * l = new_elf_object(_DkStreamRealpath(handle), type);
  130. const char * errstring = NULL;
  131. int errval = 0;
  132. int ret;
  133. if (handle == NULL) {
  134. errstring = "cannot stat shared object";
  135. errval = PAL_ERROR_INVAL;
  136. call_lose:
  137. printf("%s (%s)\n", errstring, PAL_STRERROR(errval));
  138. return NULL;
  139. }
  140. /* This is the ELF header. We read it in `open_verify'. */
  141. const ElfW(Ehdr) * header = (void *) fbp;
  142. /* Extract the remaining details we need from the ELF header
  143. and then read in the program header table. */
  144. int e_type = header->e_type;
  145. l->l_entry = header->e_entry;
  146. l->l_phnum = header->e_phnum;
  147. int maplength = header->e_phnum * sizeof (ElfW(Phdr));
  148. ElfW(Phdr) * phdr;
  149. if (header->e_phoff + maplength <= (int) fbp_len) {
  150. phdr = (void *) ((char *) fbp + header->e_phoff);
  151. } else {
  152. phdr = (ElfW(Phdr) *) malloc (maplength);
  153. if ((ret = _DkStreamRead(handle, header->e_phoff, maplength, phdr,
  154. NULL, 0)) < 0) {
  155. errstring = "cannot read file data";
  156. errval = ret;
  157. goto call_lose;
  158. }
  159. }
  160. /* Presumed absent PT_GNU_STACK. */
  161. //uint_fast16_t stack_flags = PF_R|PF_W|PF_X;
  162. /* Scan the program header table, collecting its load commands. */
  163. struct loadcmd {
  164. ElfW(Addr) mapstart, mapend, dataend, allocend;
  165. unsigned int mapoff;
  166. int prot;
  167. } * loadcmds, *c;
  168. loadcmds = __alloca(sizeof(struct loadcmd) * l->l_phnum);
  169. int nloadcmds = 0;
  170. bool has_holes = false;
  171. /* The struct is initialized to zero so this is not necessary:
  172. l->l_ld = 0;
  173. l->l_phdr = 0;
  174. l->l_addr = 0; */
  175. const ElfW(Phdr) * ph;
  176. for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
  177. switch (ph->p_type)
  178. {
  179. /* These entries tell us where to find things once the file's
  180. segments are mapped in. We record the addresses it says
  181. verbatim, and later correct for the run-time load address. */
  182. case PT_DYNAMIC:
  183. l->l_ld = (void *) ph->p_vaddr;
  184. l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
  185. break;
  186. case PT_PHDR:
  187. l->l_phdr = (void *) ph->p_vaddr;
  188. break;
  189. case PT_LOAD:
  190. /* A load command tells us to map in part of the file.
  191. We record the load commands and process them all later. */
  192. if (__builtin_expect (!ALLOC_ALIGNED(ph->p_align), 0)) {
  193. errstring = "ELF load command alignment not aligned";
  194. errval = PAL_ERROR_NOMEM;
  195. goto call_lose;
  196. }
  197. if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
  198. & (ph->p_align - 1)) != 0, 0)) {
  199. errstring = "\
  200. ELF load command address/offset not properly aligned";
  201. errval = PAL_ERROR_NOMEM;
  202. goto call_lose;
  203. }
  204. c = &loadcmds[nloadcmds++];
  205. c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
  206. c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_filesz);
  207. c->dataend = ph->p_vaddr + ph->p_filesz;
  208. c->allocend = ph->p_vaddr + ph->p_memsz;
  209. c->mapoff = ALLOC_ALIGNDOWN(ph->p_offset);
  210. /* Determine whether there is a gap between the last segment
  211. and this one. */
  212. if (nloadcmds > 1 && c[-1].mapend != c->mapstart)
  213. has_holes = true;
  214. /* Optimize a common case. */
  215. c->prot = 0;
  216. if (ph->p_flags & PF_R)
  217. c->prot |= PAL_PROT_READ;
  218. if (ph->p_flags & PF_W)
  219. c->prot |= PAL_PROT_WRITE;
  220. if (ph->p_flags & PF_X)
  221. c->prot |= PAL_PROT_EXEC;
  222. break;
  223. case PT_TLS:
  224. if (ph->p_memsz == 0)
  225. /* Nothing to do for an empty segment. */
  226. break;
  227. case PT_GNU_STACK:
  228. //stack_flags = ph->p_flags;
  229. break;
  230. case PT_GNU_RELRO:
  231. l->l_relro_addr = ph->p_vaddr;
  232. l->l_relro_size = ph->p_memsz;
  233. break;
  234. }
  235. if (__builtin_expect (nloadcmds == 0, 0)) {
  236. /* This only happens for a bogus object that will be caught with
  237. another error below. But we don't want to go through the
  238. calculations below using NLOADCMDS - 1. */
  239. errstring = "object file has no loadable segments";
  240. goto call_lose;
  241. }
  242. /* Now process the load commands and map segments into memory. */
  243. c = loadcmds;
  244. /* Length of the sections to be loaded. */
  245. maplength = loadcmds[nloadcmds - 1].allocend - c->mapstart;
  246. #define APPEND_WRITECOPY(prot) ((prot)|PAL_PROT_WRITECOPY)
  247. if (__builtin_expect (e_type, ET_DYN) == ET_DYN) {
  248. /* This is a position-independent shared object. We can let the
  249. kernel map it anywhere it likes, but we must have space for all
  250. the segments in their specified positions relative to the first.
  251. So we map the first segment without MAP_FIXED, but with its
  252. extent increased to cover all the segments. Then we remove
  253. access from excess portion, and there is known sufficient space
  254. there to remap from the later segments.
  255. As a refinement, sometimes we have an address that we would
  256. prefer to map such objects at; but this is only a preference,
  257. the OS can do whatever it likes. */
  258. void * mapaddr = NULL;
  259. /* Remember which part of the address space this object uses. */
  260. errval = _DkStreamMap(handle, (void **) &mapaddr,
  261. APPEND_WRITECOPY(c->prot), c->mapoff,
  262. maplength);
  263. if (__builtin_expect (errval < 0, 0)) {
  264. errval = -errval;
  265. map_error:
  266. errstring = "failed to map segment from shared object";
  267. goto call_lose;
  268. }
  269. l->l_map_start = (ElfW(Addr)) mapaddr;
  270. l->l_map_end = (ElfW(Addr)) mapaddr + maplength;
  271. l->l_addr = l->l_map_start - c->mapstart;
  272. if (has_holes)
  273. /* Change protection on the excess portion to disallow all access;
  274. the portions we do not remap later will be inaccessible as if
  275. unallocated. Then jump into the normal segment-mapping loop to
  276. handle the portion of the segment past the end of the file
  277. mapping. */
  278. _DkVirtualMemoryProtect((void *) (l->l_addr + c->mapend),
  279. loadcmds[nloadcmds - 1].mapstart - c->mapend,
  280. PAL_PROT_NONE);
  281. goto postmap;
  282. }
  283. /* Remember which part of the address space this object uses. */
  284. l->l_map_start = c->mapstart + l->l_addr;
  285. l->l_map_end = l->l_map_start + maplength;
  286. while (c < &loadcmds[nloadcmds]) {
  287. if (c->mapend > c->mapstart) {
  288. /* Map the segment contents from the file. */
  289. void * mapaddr = (void *) (l->l_addr + c->mapstart);
  290. int rv;
  291. if ((rv = _DkStreamMap(handle, &mapaddr, APPEND_WRITECOPY(c->prot),
  292. c->mapoff, c->mapend - c->mapstart)) < 0) {
  293. goto map_error;
  294. }
  295. }
  296. postmap:
  297. if (l->l_phdr == 0
  298. && (ElfW(Off)) c->mapoff <= header->e_phoff
  299. && ((int) (c->mapend - c->mapstart + c->mapoff)
  300. >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
  301. /* Found the program header in this segment. */
  302. l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
  303. if (c->allocend > c->dataend) {
  304. /* Extra zero pages should appear at the end of this segment,
  305. after the data mapped from the file. */
  306. ElfW(Addr) zero, zeroend, zerosec;
  307. zero = l->l_addr + c->dataend;
  308. zeroend = ALLOC_ALIGNUP(l->l_addr + c->allocend);
  309. zerosec = ALLOC_ALIGNUP(zero);
  310. if (zeroend < zerosec)
  311. /* All the extra data is in the last section of the segment.
  312. We can just zero it. */
  313. zerosec = zeroend;
  314. if (zerosec > zero) {
  315. /* Zero the final part of the last section of the segment. */
  316. if (__builtin_expect ((c->prot & PAL_PROT_WRITE) == 0, 0))
  317. {
  318. /* Dag nab it. */
  319. if (_DkVirtualMemoryProtect((void *) ALLOC_ALIGNDOWN(zero),
  320. pal_state.alloc_align,
  321. c->prot | PAL_PROT_WRITE) < 0) {
  322. errstring = "cannot change memory protections";
  323. goto call_lose;
  324. }
  325. }
  326. memset ((void *) zero, '\0', zerosec - zero);
  327. if (__builtin_expect ((c->prot & PAL_PROT_WRITE) == 0, 0))
  328. _DkVirtualMemoryProtect((void *) ALLOC_ALIGNDOWN(zero),
  329. pal_state.alloc_align, c->prot);
  330. }
  331. if (zeroend > zerosec) {
  332. /* Map the remaining zero pages in from the zero fill FD. */
  333. void * mapat = (void *) zerosec;
  334. errval = _DkVirtualMemoryAlloc(&mapat, zeroend - zerosec,
  335. 0, c->prot);
  336. if (__builtin_expect (errval < 0, 0)) {
  337. errstring = "cannot map zero-fill allocation";
  338. goto call_lose;
  339. }
  340. }
  341. }
  342. ++c;
  343. }
  344. if (l->l_ld == 0) {
  345. if (__builtin_expect (e_type == ET_DYN, 0)) {
  346. errstring = "object file has no dynamic section";
  347. goto call_lose;
  348. }
  349. } else {
  350. l->l_real_ld = l->l_ld =
  351. (ElfW(Dyn) *) ((ElfW(Addr)) l->l_ld + l->l_addr);
  352. if (do_copy_dyn)
  353. l->l_ld = remalloc(l->l_ld, sizeof(ElfW(Dyn)) * l->l_ldnum);
  354. }
  355. elf_get_dynamic_info(l->l_ld, l->l_info, l->l_addr);
  356. if (l->l_phdr == NULL) {
  357. /* The program header is not contained in any of the segments.
  358. We have to allocate memory ourself and copy it over from out
  359. temporary place. */
  360. ElfW(Phdr) * newp = (ElfW(Phdr) *) malloc (header->e_phnum
  361. * sizeof (ElfW(Phdr)));
  362. if (!newp) {
  363. errstring = "cannot allocate memory for program header";
  364. goto call_lose;
  365. }
  366. l->l_phdr = memcpy(newp, phdr,
  367. header->e_phnum * sizeof (ElfW(Phdr)));
  368. } else {
  369. /* Adjust the PT_PHDR value by the runtime load address. */
  370. l->l_phdr = (ElfW(Phdr) *) ((ElfW(Addr)) l->l_phdr + l->l_addr);
  371. }
  372. l->l_entry += l->l_addr;
  373. /* Set up the symbol hash table. */
  374. setup_elf_hash (l);
  375. return l;
  376. }
  377. int check_elf_object (PAL_HANDLE handle)
  378. {
  379. #define ELF_MAGIC_SIZE EI_CLASS
  380. unsigned char buffer[ELF_MAGIC_SIZE];
  381. int len = _DkStreamRead(handle, 0, ELF_MAGIC_SIZE, buffer, NULL, 0);
  382. if (__builtin_expect (len < 0, 0))
  383. return -len;
  384. if (__builtin_expect (len < ELF_MAGIC_SIZE, 0))
  385. return -PAL_ERROR_INVAL;
  386. ElfW(Ehdr) * ehdr = (ElfW(Ehdr) *) buffer;
  387. static const unsigned char expected[EI_CLASS] =
  388. {
  389. [EI_MAG0] = ELFMAG0,
  390. [EI_MAG1] = ELFMAG1,
  391. [EI_MAG2] = ELFMAG2,
  392. [EI_MAG3] = ELFMAG3,
  393. };
  394. /* See whether the ELF header is what we expect. */
  395. if (__builtin_expect(memcmp(ehdr->e_ident, expected, ELF_MAGIC_SIZE) !=
  396. 0, 0))
  397. return -PAL_ERROR_INVAL;
  398. return 0;
  399. }
  400. void free_elf_object (struct link_map * map)
  401. {
  402. _DkVirtualMemoryFree((void *) map->l_map_start,
  403. map->l_map_end - map->l_map_start);
  404. if (map->l_prev)
  405. map->l_prev->l_next = map->l_next;
  406. if (map->l_next)
  407. map->l_next->l_prev = map->l_prev;
  408. #ifdef DEBUG
  409. _DkDebugDelMap(map);
  410. #endif
  411. if (loaded_maps == map)
  412. loaded_maps = map->l_next;
  413. free(map);
  414. }
  415. /* Map in the shared object file loaded from URI. */
  416. int load_elf_object (const char * uri, enum object_type type)
  417. {
  418. PAL_HANDLE handle;
  419. /* First we open the file by uri, as the regular file handles */
  420. int ret = _DkStreamOpen(&handle, uri, PAL_ACCESS_RDONLY,
  421. 0, 0, 0);
  422. if (ret < 0)
  423. return ret;
  424. ret = load_elf_object_by_handle(handle, type);
  425. _DkObjectClose(handle);
  426. return ret;
  427. }
  428. int add_elf_object(void * addr, PAL_HANDLE handle, int type)
  429. {
  430. struct link_map * map = new_elf_object(_DkStreamRealpath(handle), type);
  431. const ElfW(Ehdr) * header = (void *) addr;
  432. const ElfW(Phdr) * ph, * phdr =
  433. (ElfW(Phdr) *) ((char *) addr + header->e_phoff);
  434. map->l_phdr = (void *) header->e_phoff;
  435. map->l_phnum = header->e_phnum;
  436. ElfW(Addr) mapstart = 0, mapend = 0;
  437. for (ph = phdr; ph < &phdr[map->l_phnum]; ++ph)
  438. switch (ph->p_type) {
  439. case PT_DYNAMIC:
  440. map->l_ld = (void *) ph->p_vaddr;
  441. map->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
  442. break;
  443. case PT_LOAD: {
  444. ElfW(Addr) start = (ElfW(Addr))
  445. ALLOC_ALIGNDOWN(map->l_addr + ph->p_vaddr);
  446. ElfW(Addr) end = (ElfW(Addr))
  447. ALLOC_ALIGNUP(map->l_addr + ph->p_vaddr + ph->p_memsz);
  448. if (!mapstart || start < mapstart)
  449. mapstart = start;
  450. if (!mapend || end > mapend)
  451. mapend = end;
  452. }
  453. }
  454. map->l_addr = (ElfW(Addr)) addr - mapstart;
  455. map->l_entry = header->e_entry;
  456. map->l_map_start = (ElfW(Addr)) addr;
  457. map->l_map_end = (ElfW(Addr)) addr + (mapend - mapstart);
  458. map->l_real_ld = (ElfW(Dyn) *)
  459. ((char *) map->l_addr + (unsigned long) map->l_ld);
  460. map->l_ld = remalloc(map->l_real_ld, sizeof(ElfW(Dyn)) * map->l_ldnum);
  461. elf_get_dynamic_info(map->l_ld, map->l_info, map->l_addr);
  462. setup_elf_hash(map);
  463. ELF_DYNAMIC_RELOCATE(map);
  464. struct link_map * prev = loaded_maps;
  465. while (prev->l_next)
  466. prev = prev->l_next;
  467. map->l_prev = prev;
  468. map->l_next = NULL;
  469. prev->l_next = map;
  470. if (type == OBJECT_EXEC)
  471. exec_map = map;
  472. #ifdef DEBUG
  473. _DkDebugAddMap(map);
  474. #endif
  475. return 0;
  476. }
  477. static int relocate_elf_object (struct link_map *l);
  478. #if CACHE_LOADED_BINARIES == 1
  479. #define MAX_CACHED_LOADCMDS 32
  480. struct cached_elf_object {
  481. PAL_NUM instance_id;
  482. void * loader_addr; /* get the address of DkStreamOpen */
  483. struct link_map map;
  484. char map_name[80];
  485. struct cached_loadcmd {
  486. void * mapstart;
  487. unsigned long mapsize;
  488. unsigned long mapoff;
  489. int mapprot;
  490. } loadcmds[MAX_CACHED_LOADCMDS];
  491. int nloadcmds;
  492. ElfW(Dyn) dyn[];
  493. };
  494. void cache_elf_object (PAL_HANDLE handle, struct link_map * map)
  495. {
  496. char uri[URI_MAX];
  497. unsigned long cached_size = 0;
  498. int ret = _DkStreamGetName(handle, uri, URI_MAX);
  499. if (ret < 0)
  500. return;
  501. strcpy_static(uri + ret, ".cached", URI_MAX - ret);
  502. PAL_HANDLE cached_file;
  503. while (1) {
  504. ret = _DkStreamOpen(&cached_file, uri,
  505. PAL_ACCESS_RDWR,
  506. PAL_SHARE_OWNER_W|PAL_SHARE_OWNER_R,
  507. PAL_CREAT_TRY|PAL_CREAT_ALWAYS, 0);
  508. if (ret != -PAL_ERROR_STREAMEXIST)
  509. break;
  510. if (_DkStreamOpen(&cached_file, uri, PAL_ACCESS_RDWR, 0, 0, 0) < 0)
  511. return;
  512. ret = _DkStreamDelete(cached_file, 0);
  513. _DkObjectClose(cached_file);
  514. if (ret < 0)
  515. return;
  516. }
  517. if (ret < 0)
  518. return;
  519. struct cached_elf_object * obj = NULL;
  520. unsigned long obj_size = sizeof(struct cached_elf_object);
  521. if (map->l_ld != map->l_real_ld)
  522. obj_size += sizeof(ElfW(Dyn)) * map->l_ldnum;
  523. obj_size = ALLOC_ALIGNUP(obj_size);
  524. cached_size = obj_size;
  525. ret = _DkStreamSetLength(cached_file, obj_size);
  526. if (ret < 0)
  527. goto out;
  528. ret = _DkStreamMap(cached_file, (void **) &obj,
  529. PAL_PROT_READ|PAL_PROT_WRITE, 0, obj_size);
  530. if (ret < 0)
  531. goto out;
  532. obj->instance_id = pal_state.instance_id;
  533. obj->loader_addr = (void *) DkStreamOpen;
  534. memcpy(&obj->map, map, sizeof(struct link_map));
  535. if (map->l_ld != map->l_real_ld) {
  536. obj->map.l_ld = NULL;
  537. memcpy(obj->dyn, map->l_ld, sizeof(ElfW(Dyn)) * map->l_ldnum);
  538. for (int i = 0 ;
  539. i < sizeof(obj->map.l_info) / sizeof(obj->map.l_info[0]) ;
  540. i++)
  541. if (obj->map.l_info[i])
  542. obj->map.l_info[i] =
  543. (void *) obj->map.l_info[i] - (unsigned long) map->l_ld;
  544. }
  545. obj->map.l_name = NULL;
  546. memcpy(obj->map_name, map->l_name, sizeof(obj->map_name));
  547. obj->nloadcmds = 0;
  548. const ElfW(Ehdr) * header = (void *) map->l_map_start;
  549. const ElfW(Phdr) * phdr = (void *) header + header->e_phoff, * ph;
  550. for (ph = phdr ; ph < &phdr[header->e_phnum] ; ph++)
  551. if (ph->p_type == PT_LOAD) {
  552. assert(obj->nloadcmds < MAX_CACHED_LOADCMDS);
  553. void * mapstart = (void *)
  554. ALLOC_ALIGNDOWN(map->l_addr + ph->p_vaddr);
  555. void * mapend = (void *)
  556. ALLOC_ALIGNUP(map->l_addr + ph->p_vaddr + ph->p_memsz);
  557. unsigned long mapsize = mapend - mapstart;
  558. int mapprot = 0;
  559. void * cache_addr = NULL;
  560. if (ph->p_flags & PF_R)
  561. mapprot |= PAL_PROT_READ;
  562. if (ph->p_flags & PF_W)
  563. mapprot |= PAL_PROT_WRITE;
  564. if (ph->p_flags & PF_X)
  565. mapprot |= PAL_PROT_EXEC;
  566. ret = _DkStreamSetLength(cached_file, cached_size + mapsize);
  567. if (ret < 0)
  568. goto out_mapped;
  569. ret = _DkStreamMap(cached_file, &cache_addr,
  570. PAL_PROT_READ|PAL_PROT_WRITE, cached_size,
  571. mapsize);
  572. if (ret < 0)
  573. goto out_mapped;
  574. obj->loadcmds[obj->nloadcmds].mapstart = mapstart;
  575. obj->loadcmds[obj->nloadcmds].mapsize = mapsize;
  576. obj->loadcmds[obj->nloadcmds].mapprot = mapprot;
  577. obj->loadcmds[obj->nloadcmds].mapoff = cached_size;
  578. obj->nloadcmds++;
  579. cached_size += mapsize;
  580. memcpy(cache_addr, mapstart, mapsize);
  581. ret = _DkStreamUnmap(cache_addr, mapsize);
  582. if (ret < 0)
  583. goto out_mapped;
  584. }
  585. ret = _DkStreamUnmap(obj, obj_size);
  586. if (ret < 0)
  587. return;
  588. _DkObjectClose(cached_file);
  589. return;
  590. out_mapped:
  591. _DkStreamUnmap(obj, obj_size);
  592. out:
  593. _DkStreamDelete(cached_file, 0);
  594. _DkObjectClose(cached_file);
  595. }
  596. struct link_map * check_cached_elf_object (PAL_HANDLE handle)
  597. {
  598. char uri[URI_MAX];
  599. int ret = _DkStreamGetName(handle, uri, URI_MAX);
  600. if (ret < 0)
  601. return NULL;
  602. strcpy_static(uri + ret, ".cached", URI_MAX - ret);
  603. PAL_HANDLE cached_file;
  604. ret = _DkStreamOpen(&cached_file, uri, PAL_ACCESS_RDWR, 0, 0, 0);
  605. if (ret < 0)
  606. return NULL;
  607. struct cached_elf_object * obj = NULL;
  608. unsigned long obj_size = ALLOC_ALIGNUP(sizeof(struct cached_elf_object));
  609. ret = _DkStreamMap(cached_file, (void **) &obj,
  610. PAL_PROT_READ|PAL_PROT_WRITE|PAL_PROT_WRITECOPY,
  611. 0, obj_size);
  612. if (ret < 0)
  613. goto out;
  614. /* we want to check if there is a previously cached one, but if the
  615. * process is the first one, force to update the cached binary. */
  616. if (pal_state.instance_id != obj->instance_id)
  617. goto out_mapped;
  618. if (!obj->map.l_ld) {
  619. obj->map.l_ld = obj->dyn;
  620. for (int i = 0 ;
  621. i < sizeof(obj->map.l_info) / sizeof(obj->map.l_info[0]) ;
  622. i++)
  623. if (obj->map.l_info[i])
  624. obj->map.l_info[i] =
  625. (void *) obj->map.l_info[i] + (unsigned long) obj->map.l_ld;
  626. }
  627. struct cached_loadcmd * l = obj->loadcmds;
  628. struct cached_loadcmd * last = &obj->loadcmds[obj->nloadcmds - 1];
  629. if (l < last) {
  630. ret = _DkStreamMap(cached_file, &l->mapstart,
  631. l->mapprot|PAL_PROT_WRITECOPY,
  632. l->mapoff,
  633. last->mapstart + last->mapsize - l->mapstart);
  634. if (ret < 0)
  635. goto out_more_mapped;
  636. ret = _DkVirtualMemoryProtect(l->mapstart + l->mapsize,
  637. last->mapstart - (l->mapstart + l->mapsize),
  638. PAL_PROT_NONE);
  639. if (ret < 0)
  640. goto out_more_mapped;
  641. l++;
  642. goto map_next;
  643. }
  644. for ( ; l <= last ; l++) {
  645. map_next:
  646. ret = _DkStreamMap(cached_file, &l->mapstart,
  647. l->mapprot|PAL_PROT_WRITECOPY,
  648. l->mapoff,
  649. l->mapsize);
  650. if (ret < 0)
  651. goto out_more_mapped;
  652. }
  653. if ((void *) DkStreamOpen != obj->loader_addr) {
  654. for (int i = 0 ; i < obj->map.nrelocs ; i++)
  655. *obj->map.relocs[i] += (void *) DkStreamOpen - obj->loader_addr;
  656. }
  657. obj->map.l_name = obj->map_name;
  658. return &obj->map;
  659. out_more_mapped:
  660. _DkStreamUnmap(obj->loadcmds[0].mapstart,
  661. last->mapstart + last->mapsize - obj->loadcmds[0].mapstart);
  662. out_mapped:
  663. _DkStreamUnmap(obj, obj_size);
  664. out:
  665. _DkObjectClose(cached_file);
  666. return NULL;
  667. }
  668. #endif /* CACHE_LOADED_BINARIES == 1 */
  669. int load_elf_object_by_handle (PAL_HANDLE handle, enum object_type type)
  670. {
  671. struct link_map * map = NULL;
  672. char fb[FILEBUF_SIZE], * errstring;
  673. int ret = 0;
  674. #if CACHE_LOADED_BINARIES == 1
  675. map = check_cached_elf_object(handle);
  676. if (map)
  677. goto done;
  678. #endif
  679. /* Now we will start verify the file as a ELF header. This part of code
  680. is borrow from open_verify() */
  681. ElfW(Ehdr) * ehdr = (ElfW(Ehdr) *) &fb;
  682. ElfW(Phdr) * phdr = NULL;
  683. int phdr_malloced = 0;
  684. int len = _DkStreamRead(handle, 0, FILEBUF_SIZE, &fb, NULL, 0);
  685. if (__builtin_expect (len < sizeof(ElfW(Ehdr)), 0)) {
  686. errstring = "ELF file with a strange size";
  687. goto verify_failed;
  688. }
  689. #define ELF32_CLASS ELFCLASS32
  690. #define ELF64_CLASS ELFCLASS64
  691. static const unsigned char expected[EI_NIDENT] =
  692. {
  693. [EI_MAG0] = ELFMAG0,
  694. [EI_MAG1] = ELFMAG1,
  695. [EI_MAG2] = ELFMAG2,
  696. [EI_MAG3] = ELFMAG3,
  697. [EI_CLASS] = ELFW(CLASS),
  698. [EI_DATA] = byteorder,
  699. [EI_VERSION] = EV_CURRENT,
  700. [EI_OSABI] = 0,
  701. };
  702. #define ELFOSABI_LINUX 3 /* Linux. */
  703. /* See whether the ELF header is what we expect. */
  704. if (__builtin_expect(
  705. memcmp(ehdr->e_ident, expected, EI_OSABI) != 0 || (
  706. ehdr->e_ident[EI_OSABI] != ELFOSABI_SYSV &&
  707. ehdr->e_ident[EI_OSABI] != ELFOSABI_LINUX), 0)) {
  708. errstring = "ELF file with invalid header";
  709. goto verify_failed;
  710. }
  711. /* Chia-Che 11/23/13: Removing other checks, comparing the header
  712. should be enough */
  713. int maplength = ehdr->e_phnum * sizeof (ElfW(Phdr));
  714. /* if e_phoff + maplength is smaller than the data read */
  715. if (ehdr->e_phoff + maplength <= (int) len) {
  716. phdr = (void *) (&fb + ehdr->e_phoff);
  717. } else {
  718. /* ...otherwise, we have to read again */
  719. phdr = malloc (maplength);
  720. phdr_malloced = 1;
  721. ret = _DkStreamRead(handle, ehdr->e_phoff, maplength, phdr, NULL, 0);
  722. if (ret < 0 || ret != maplength) {
  723. errstring = "cannot read file data";
  724. goto verify_failed;
  725. }
  726. }
  727. if (!(map = map_elf_object_by_handle(handle, type, &fb, len, true))) {
  728. errstring = "unexpected failure";
  729. goto verify_failed;
  730. }
  731. relocate_elf_object(map);
  732. #if CACHE_LOADED_BINARIES == 1
  733. cache_elf_object(handle, map);
  734. done:
  735. #endif
  736. if (loaded_maps)
  737. loaded_maps->l_prev = map;
  738. map->l_next = loaded_maps;
  739. map->l_prev = NULL;
  740. loaded_maps = map;
  741. if (map->l_type == OBJECT_EXEC)
  742. exec_map = map;
  743. #ifdef DEBUG
  744. _DkDebugAddMap(map);
  745. #endif
  746. return 0;
  747. verify_failed:
  748. if (phdr && phdr_malloced)
  749. free(phdr);
  750. printf("%s\n", errstring);
  751. return ret;
  752. }
  753. struct sym_val {
  754. ElfW(Sym) *s;
  755. struct link_map *m;
  756. };
  757. /* This is the hashing function specified by the ELF ABI. In the
  758. first five operations no overflow is possible so we optimized it a
  759. bit. */
  760. unsigned long int elf_hash (const char *name_arg)
  761. {
  762. const unsigned char *name = (const unsigned char *) name_arg;
  763. unsigned long int hash = 0;
  764. if (*name == '\0')
  765. return hash;
  766. hash = *name++;
  767. if (*name == '\0')
  768. return hash;
  769. hash = (hash << 4) + *name++;
  770. if (*name == '\0')
  771. return hash;
  772. hash = (hash << 4) + *name++;
  773. if (*name == '\0')
  774. return hash;
  775. hash = (hash << 4) + *name++;
  776. if (*name == '\0')
  777. return hash;
  778. hash = (hash << 4) + *name++;
  779. while (*name != '\0') {
  780. unsigned long int hi;
  781. hash = (hash << 4) + *name++;
  782. hi = hash & 0xf0000000;
  783. /*
  784. * The algorithm specified in the ELF ABI is as follows:
  785. * if (hi != 0)
  786. * hash ^= hi >> 24;
  787. * hash &= ~hi;
  788. * But the following is equivalent and a lot faster, especially on
  789. * modern processors.
  790. */
  791. hash ^= hi;
  792. hash ^= hi >> 24;
  793. }
  794. return hash;
  795. }
  796. /* Nested routine to check whether the symbol matches. */
  797. static inline __attribute_always_inline
  798. ElfW(Sym) * check_match(ElfW(Sym) * sym, ElfW(Sym) * ref, const char * undef_name,
  799. const char * strtab)
  800. {
  801. unsigned int stt = ELFW(ST_TYPE) (sym->st_info);
  802. assert(ELF_RTYPE_CLASS_PLT == 1);
  803. if (__builtin_expect((sym->st_value == 0 /* No value. */
  804. && stt != STT_TLS)
  805. || sym->st_shndx == SHN_UNDEF, 0))
  806. return NULL;
  807. /* Ignore all but STT_NOTYPE, STT_OBJECT, STT_FUNC,
  808. STT_COMMON, STT_TLS, and STT_GNU_IFUNC since these are no
  809. code/data definitions. */
  810. #define ALLOWED_STT \
  811. ((1 << STT_NOTYPE) | (1 << STT_OBJECT) | (1 << STT_FUNC) \
  812. | (1 << STT_COMMON) | (1 << STT_TLS) | (1 << STT_GNU_IFUNC))
  813. if (__builtin_expect(((1 << stt) & ALLOWED_STT) == 0, 0))
  814. return NULL;
  815. if (sym != ref && memcmp(strtab + sym->st_name, undef_name,
  816. strlen(undef_name)))
  817. /* Not the symbol we are looking for. */
  818. return NULL;
  819. /* There cannot be another entry for this symbol so stop here. */
  820. return sym;
  821. }
  822. ElfW(Sym) *
  823. do_lookup_map (ElfW(Sym) * ref, const char * undef_name,
  824. const uint_fast32_t hash, unsigned long int elf_hash,
  825. const struct link_map * map)
  826. {
  827. /* These variables are used in the nested function. */
  828. Elf_Symndx symidx;
  829. ElfW(Sym) * sym;
  830. /* The tables for this map. */
  831. ElfW(Sym) * symtab = (void *) D_PTR (map->l_info[DT_SYMTAB]);
  832. const char * strtab = (const void *) D_PTR (map->l_info[DT_STRTAB]);
  833. const ElfW(Addr) * bitmask = map->l_gnu_bitmask;
  834. if (__builtin_expect (bitmask != NULL, 1)) {
  835. ElfW(Addr) bitmask_word = bitmask[(hash / __ELF_NATIVE_CLASS)
  836. & map->l_gnu_bitmask_idxbits];
  837. unsigned int hashbit1 = hash & (__ELF_NATIVE_CLASS - 1);
  838. unsigned int hashbit2 = (hash >> map->l_gnu_shift)
  839. & (__ELF_NATIVE_CLASS - 1);
  840. if (__builtin_expect ((bitmask_word >> hashbit1)
  841. & (bitmask_word >> hashbit2) & 1, 0)) {
  842. Elf32_Word bucket = map->l_gnu_buckets
  843. [hash % map->l_nbuckets];
  844. if (bucket != 0) {
  845. const Elf32_Word *hasharr = &map->l_gnu_chain_zero[bucket];
  846. do
  847. if (((*hasharr ^ hash) >> 1) == 0) {
  848. symidx = hasharr - map->l_gnu_chain_zero;
  849. sym = check_match (&symtab[symidx], ref, undef_name, strtab);
  850. if (sym != NULL)
  851. return sym;
  852. }
  853. while ((*hasharr++ & 1u) == 0);
  854. }
  855. }
  856. /* No symbol found. */
  857. symidx = SHN_UNDEF;
  858. } else {
  859. /* Use the old SysV-style hash table. Search the appropriate
  860. hash bucket in this object's symbol table for a definition
  861. for the same symbol name. */
  862. for (symidx = map->l_buckets[elf_hash % map->l_nbuckets];
  863. symidx != STN_UNDEF;
  864. symidx = map->l_chain[symidx]) {
  865. sym = check_match (&symtab[symidx], ref, undef_name, strtab);
  866. if (sym != NULL)
  867. return sym;
  868. }
  869. }
  870. return NULL;
  871. }
  872. /* Inner part of the lookup functions. We return a value > 0 if we
  873. found the symbol, the value 0 if nothing is found and < 0 if
  874. something bad happened. */
  875. static int do_lookup (const char * undef_name, ElfW(Sym) * ref,
  876. struct sym_val * result)
  877. {
  878. const uint_fast32_t fast_hash = elf_fast_hash(undef_name);
  879. const long int hash = elf_hash(undef_name);
  880. ElfW(Sym) * sym;
  881. struct link_map * map = loaded_maps;
  882. struct sym_val weak_result = { .s = NULL, .m = NULL };
  883. for (; map ; map = map->l_next) {
  884. sym = do_lookup_map(ref, undef_name, fast_hash, hash, map);
  885. if (!sym)
  886. continue;
  887. switch (__builtin_expect (ELFW(ST_BIND) (sym->st_info), STB_GLOBAL)) {
  888. case STB_WEAK:
  889. /* Weak definition. Use this value if we don't find another. */
  890. if (!weak_result.s) {
  891. weak_result.s = sym;
  892. weak_result.m = (struct link_map *) map;
  893. }
  894. break;
  895. /* FALLTHROUGH */
  896. case STB_GLOBAL:
  897. case STB_GNU_UNIQUE:
  898. /* success: */
  899. /* Global definition. Just what we need. */
  900. result->s = sym;
  901. result->m = (struct link_map *) map;
  902. return 1;
  903. default:
  904. /* Local symbols are ignored. */
  905. break;
  906. }
  907. }
  908. if (weak_result.s) {
  909. *result = weak_result;
  910. return 1;
  911. }
  912. /* We have not found anything until now. */
  913. return 0;
  914. }
  915. /* Search loaded objects' symbol tables for a definition of the symbol
  916. UNDEF_NAME, perhaps with a requested version for the symbol.
  917. We must never have calls to the audit functions inside this function
  918. or in any function which gets called. If this would happen the audit
  919. code might create a thread which can throw off all the scope locking. */
  920. struct link_map * lookup_symbol (const char * undef_name, ElfW(Sym) ** ref)
  921. {
  922. struct sym_val current_value = { NULL, NULL };
  923. do_lookup(undef_name, *ref, &current_value);
  924. if (__builtin_expect (current_value.s == NULL, 0)) {
  925. *ref = NULL;
  926. return NULL;
  927. }
  928. *ref = current_value.s;
  929. return current_value.m;
  930. }
  931. static int protect_relro (struct link_map * l)
  932. {
  933. ElfW(Addr) start = ALLOC_ALIGNDOWN(l->l_addr + l->l_relro_addr);
  934. ElfW(Addr) end = ALLOC_ALIGNUP(l->l_addr + l->l_relro_addr +
  935. l->l_relro_size);
  936. if (start != end)
  937. _DkVirtualMemoryProtect((void *) start, end - start, PAL_PROT_READ);
  938. return 0;
  939. }
  940. static int relocate_elf_object (struct link_map * l)
  941. {
  942. struct textrels {
  943. ElfW(Addr) start;
  944. ElfW(Addr) len;
  945. int prot;
  946. struct textrels * next;
  947. } * textrels = NULL;
  948. int ret;
  949. const ElfW(Phdr) * ph;
  950. for (ph = l->l_phdr ; ph < &l->l_phdr[l->l_phnum] ; ph++)
  951. if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0) {
  952. struct textrels * r = malloc(sizeof(struct textrels));
  953. r->start = ALLOC_ALIGNDOWN(ph->p_vaddr) + l->l_addr;
  954. r->len = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_memsz)
  955. - ALLOC_ALIGNDOWN(ph->p_vaddr);
  956. ret = _DkVirtualMemoryProtect((void *) r->start, r->len,
  957. PAL_PROT_READ|PAL_PROT_WRITE);
  958. if (ret < 0)
  959. return ret;
  960. r->prot = 0;
  961. if (ph->p_flags & PF_R)
  962. r->prot |= PAL_PROT_READ;
  963. if (ph->p_flags & PF_W)
  964. r->prot |= PAL_PROT_WRITE;
  965. if (ph->p_flags & PF_X)
  966. r->prot |= PAL_PROT_EXEC;
  967. r->next = textrels;
  968. textrels = r;
  969. }
  970. #if PROFILING == 1
  971. unsigned long before_relocate = _DkSystemTimeQuery();
  972. #endif
  973. /* Do the actual relocation of the object's GOT and other data. */
  974. ELF_DYNAMIC_RELOCATE(l);
  975. #if PROFILING == 1
  976. pal_state.relocation_time += _DkSystemTimeQuery() - before_relocate;
  977. #endif
  978. while (textrels) {
  979. ret = _DkVirtualMemoryProtect((void *) textrels->start, textrels->len,
  980. textrels->prot);
  981. if (ret < 0)
  982. return ret;
  983. struct textrels * next = textrels->next;
  984. free(textrels);
  985. textrels = next;
  986. }
  987. /* In case we can protect the data now that the relocations are
  988. done, do it. */
  989. if (l->l_type != OBJECT_EXEC && l->l_relro_size != 0)
  990. if ((ret = protect_relro(l)) < 0)
  991. return ret;
  992. return 0;
  993. }
  994. void DkDebugAttachBinary (PAL_STR uri, PAL_PTR start_addr)
  995. {
  996. #ifdef DEBUG
  997. if (!strpartcmp_static(uri, "file:"))
  998. return;
  999. const char * realname = uri + static_strlen("file:");
  1000. struct link_map * l = new_elf_object(realname, OBJECT_EXTERNAL);
  1001. /* This is the ELF header. We read it in `open_verify'. */
  1002. const ElfW(Ehdr) * header = (ElfW(Ehdr) *) start_addr;
  1003. l->l_entry = header->e_entry;
  1004. l->l_phnum = header->e_phnum;
  1005. l->l_map_start = (ElfW(Addr)) start_addr;
  1006. ElfW(Phdr) * phdr = (void *) ((char *) start_addr + header->e_phoff);
  1007. const ElfW(Phdr) * ph;
  1008. ElfW(Addr) map_start = 0, map_end = 0;
  1009. for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
  1010. if (ph->p_type == PT_PHDR) {
  1011. if (!map_start || ph->p_vaddr < map_start)
  1012. map_start = ph->p_vaddr;
  1013. if (!map_end || ph->p_vaddr + ph->p_memsz > map_end)
  1014. map_end = ph->p_vaddr + ph->p_memsz;
  1015. }
  1016. l->l_addr = l->l_map_start - map_start;
  1017. l->l_map_end = l->l_addr + map_end;
  1018. for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
  1019. switch (ph->p_type) {
  1020. /* These entries tell us where to find things once the file's
  1021. segments are mapped in. We record the addresses it says
  1022. verbatim, and later correct for the run-time load address. */
  1023. case PT_DYNAMIC:
  1024. l->l_ld = l->l_real_ld = (ElfW(Dyn) *)
  1025. ((char *) l->l_addr + ph->p_vaddr);
  1026. l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
  1027. break;
  1028. case PT_PHDR:
  1029. l->l_phdr = (ElfW(Phdr) *) ((char *) l->l_addr + ph->p_vaddr);
  1030. break;
  1031. case PT_GNU_RELRO:
  1032. l->l_relro_addr = l->l_addr + ph->p_vaddr;
  1033. l->l_relro_size = ph->p_memsz;
  1034. break;
  1035. }
  1036. _DkDebugAddMap(l);
  1037. #endif
  1038. }
  1039. void DkDebugDetachBinary (PAL_PTR start_addr)
  1040. {
  1041. #ifdef DEBUG
  1042. for (struct link_map * l = loaded_maps; l; l = l->l_next)
  1043. if (l->l_map_start == (ElfW(Addr)) start_addr) {
  1044. _DkDebugDelMap(l);
  1045. if (l->l_type == OBJECT_EXTERNAL)
  1046. free_elf_object(l);
  1047. break;
  1048. }
  1049. #endif
  1050. }
  1051. #ifndef CALL_ENTRY
  1052. #ifdef __x86_64__
  1053. void * stack_before_call __attribute_unused = NULL;
  1054. #define CALL_ENTRY(l, cookies) \
  1055. ({ long ret; \
  1056. asm volatile("movq %%rsp, stack_before_call(%%rip)\r\n" \
  1057. "leaq 1f(%%rip), %%rdx\r\n" \
  1058. "movq %2, %%rsp\r\n" \
  1059. "jmp *%1\r\n" \
  1060. "1: movq stack_before_call(%%rip), %%rsp\r\n" \
  1061. \
  1062. : "=a"(ret) : "a"(l->l_entry), "b"(cookies) \
  1063. : "rcx", "rdx", "rdi", "rsi", "r8", "r9", \
  1064. "r10", "r11", "memory"); \
  1065. ret; })
  1066. #else
  1067. # error "unsupported architecture"
  1068. #endif
  1069. #endif /* !CALL_ENTRY */
  1070. void start_execution (const char * first_argument, const char ** arguments,
  1071. const char ** environs)
  1072. {
  1073. /* First we will try to run all the preloaded libraries which come with
  1074. entry points */
  1075. if (exec_map) {
  1076. __pal_control.executable_range.start = (PAL_PTR) exec_map->l_map_start;
  1077. __pal_control.executable_range.end = (PAL_PTR) exec_map->l_map_end;
  1078. }
  1079. #if PROFILING == 1
  1080. unsigned long before_tail = _DkSystemTimeQuery();
  1081. #endif
  1082. int narguments = 0;
  1083. if (first_argument)
  1084. narguments++;
  1085. for (const char ** a = arguments; *a ; a++, narguments++);
  1086. /* Let's count the number of cookies, first we will have argc & argv */
  1087. int ncookies = narguments + 3; /* 1 for argc, argc + 2 for argv */
  1088. /* Then we count envp */
  1089. for (const char ** e = environs; *e; e++)
  1090. ncookies++;
  1091. ncookies++; /* for NULL-end */
  1092. int cookiesz = sizeof(unsigned long int) * ncookies
  1093. + sizeof(ElfW(auxv_t)) * 6
  1094. + sizeof(void *) * 4 + 16;
  1095. unsigned long int * cookies = __alloca(cookiesz);
  1096. int cnt = 0;
  1097. /* Let's copy the cookies */
  1098. cookies[cnt++] = (unsigned long int) narguments;
  1099. if (first_argument)
  1100. cookies[cnt++] = (unsigned long int) first_argument;
  1101. for (int i = 0 ; arguments[i] ; i++)
  1102. cookies[cnt++] = (unsigned long int) arguments[i];
  1103. cookies[cnt++] = 0;
  1104. for (int i = 0 ; environs[i]; i++)
  1105. cookies[cnt++] = (unsigned long int) environs[i];
  1106. cookies[cnt++] = 0;
  1107. ElfW(auxv_t) * auxv = (ElfW(auxv_t) *) &cookies[cnt];
  1108. auxv[0].a_type = AT_PHDR;
  1109. auxv[0].a_un.a_val = exec_map ? (unsigned long) exec_map->l_phdr : 0;
  1110. auxv[1].a_type = AT_PHNUM;
  1111. auxv[1].a_un.a_val = exec_map ? exec_map->l_phnum : 0;
  1112. auxv[2].a_type = AT_PAGESZ;
  1113. auxv[2].a_un.a_val = __pal_control.pagesize;
  1114. auxv[3].a_type = AT_ENTRY;
  1115. auxv[3].a_un.a_val = exec_map ? exec_map->l_entry : 0;
  1116. auxv[4].a_type = AT_BASE;
  1117. auxv[4].a_un.a_val = exec_map ? exec_map->l_addr : 0;
  1118. auxv[5].a_type = AT_NULL;
  1119. *(void **) &auxv[6] = NULL;
  1120. #if PROFILING == 1
  1121. __pal_control.startup_time = _DkSystemTimeQuery() - pal_state.start_time;
  1122. __pal_control.tail_startup_time =
  1123. pal_state.tail_startup_time += _DkSystemTimeQuery() - before_tail;
  1124. #endif
  1125. struct link_map * l = loaded_maps;
  1126. /* run entry point in reverse order */
  1127. for (; l->l_next ; l = l->l_next);
  1128. for (; l ; l = l->l_prev)
  1129. if (l->l_type == OBJECT_PRELOAD && l->l_entry)
  1130. CALL_ENTRY(l, cookies);
  1131. if (exec_map)
  1132. CALL_ENTRY(exec_map, cookies);
  1133. _DkThreadExit();
  1134. }