shim_rtld.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_rtld.c
  17. *
  18. * This file contains codes for dynamic loading of ELF binaries in library OS.
  19. * It's espeically used for loading interpreter (ld.so, in general) and
  20. * optimization of execve.
  21. * Most of the source codes are imported from GNU C library.
  22. */
  23. #include <shim_internal.h>
  24. #include <shim_table.h>
  25. #include <shim_utils.h>
  26. #include <shim_handle.h>
  27. #include <shim_thread.h>
  28. #include <shim_fs.h>
  29. #include <shim_vma.h>
  30. #include <shim_checkpoint.h>
  31. #include <shim_profile.h>
  32. #include <errno.h>
  33. #include <fcntl.h>
  34. #include <asm/prctl.h>
  35. #include <asm/mman.h>
  36. #include "ldsodefs.h"
  37. #include "elf.h"
  38. #ifndef DT_THISPROCNUM
  39. # define DT_THISPROCNUM 0
  40. #endif
  41. typedef ElfW(Word) Elf_Symndx;
  42. #define BOOKKEEP_INTERNAL_OBJ 0
  43. enum object_type {
  44. OBJECT_INTERNAL = 0,
  45. OBJECT_LOAD = 1,
  46. OBJECT_MAPPED = 2,
  47. OBJECT_REMAP = 3,
  48. OBJECT_USER = 4,
  49. };
  50. /* Structure describing a loaded shared object. The `l_next' and `l_prev'
  51. members form a chain of all the shared objects loaded at startup.
  52. These data structures exist in space used by the run-time dynamic linker;
  53. modifying them may have disastrous results.
  54. This data structure might change in future, if necessary. User-level
  55. programs must avoid defining objects of this type. */
  56. /* This is a simplified link_map structure */
  57. struct link_map {
  58. /* These first few members are part of the protocol with the debugger.
  59. This is the same format used in SVR4. */
  60. ElfW(Addr) l_addr; /* Base address shared object is loaded at. */
  61. const char * l_name; /* Absolute file name object was found in. */
  62. ElfW(Dyn) * l_real_ld; /* Dynamic section of the shared object. */
  63. struct link_map * l_next, * l_prev; /* Chain of loaded objects. */
  64. /* All following members are internal to the dynamic linker.
  65. They may change without notice. */
  66. ElfW(Dyn) * l_ld;
  67. char * l_soname;
  68. ElfW(Dyn) *l_info[DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM
  69. + DT_EXTRANUM + DT_VALNUM + DT_ADDRNUM];
  70. const ElfW(Phdr) *l_phdr; /* Pointer to program header table in core. */
  71. ElfW(Addr) l_entry; /* Entry point location. */
  72. ElfW(Half) l_phnum; /* Number of program header entries. */
  73. ElfW(Half) l_ldnum; /* Number of dynamic segment entries. */
  74. /* Start and finish of memory map for this object. l_map_start
  75. need not be the same as l_addr. */
  76. ElfW(Addr) l_map_start, l_map_end;
  77. bool l_resolved;
  78. ElfW(Addr) l_resolved_map;
  79. const char * l_interp_libname;
  80. ElfW(Addr) l_main_entry;
  81. /* Information used to change permission after the relocations are
  82. done. */
  83. ElfW(Addr) l_relro_addr;
  84. size_t l_relro_size;
  85. /* For DT_HASH */
  86. Elf_Symndx l_nbuckets;
  87. const Elf_Symndx * l_buckets;
  88. const Elf_Symndx * l_chain;
  89. /* For DT_GNU_HASH */
  90. Elf32_Word l_gnu_bitmask_idxbits;
  91. Elf32_Word l_gnu_shift;
  92. const ElfW(Addr) * l_gnu_bitmask;
  93. const Elf32_Word * l_gnu_buckets;
  94. const Elf32_Word * l_gnu_chain_zero;
  95. /* pointer to related file */
  96. struct shim_handle * l_file;
  97. enum object_type l_type;
  98. #define MAX_LOADCMDS 4
  99. struct loadcmd {
  100. ElfW(Addr) mapstart, mapend, dataend, allocend;
  101. off_t mapoff;
  102. int prot, flags;
  103. struct shim_vma * vma;
  104. } loadcmds[MAX_LOADCMDS];
  105. int nloadcmds;
  106. struct textrel {
  107. ElfW(Addr) start, end;
  108. int prot;
  109. struct textrel * next;
  110. } * textrels;
  111. };
  112. struct link_map * lookup_symbol (const char * undef_name, ElfW(Sym) ** ref);
  113. static struct link_map * loaded_libraries = NULL;
  114. static struct link_map * internal_map = NULL, * interp_map = NULL;
  115. /* This macro is used as a callback from the ELF_DYNAMIC_RELOCATE code. */
  116. static ElfW(Addr) resolve_map (const char ** strtab, ElfW(Sym) ** ref)
  117. {
  118. if (ELFW(ST_BIND) ((*ref)->st_info) != STB_LOCAL) {
  119. struct link_map *l = lookup_symbol ((*strtab) + (*ref)->st_name, ref);
  120. if (l) {
  121. *strtab = (const void *) D_PTR (l->l_info[DT_STRTAB]);
  122. return l->l_addr;
  123. }
  124. }
  125. return 0;
  126. }
  127. static int protect_page (struct link_map * l, void * addr, size_t size)
  128. {
  129. struct loadcmd * c = l->loadcmds;
  130. int prot = 0;
  131. for ( ; c < &l->loadcmds[l->nloadcmds] ; c++)
  132. if ((void *) l->l_addr + c->mapstart <= addr &&
  133. addr + size <= (void *) l->l_addr + c->mapend)
  134. break;
  135. if (c < &l->loadcmds[l->nloadcmds])
  136. prot = c->prot;
  137. struct textrel * t = l->textrels, ** loc = &l->textrels;
  138. for ( ; t ; t = t->next) {
  139. if ((void *) t->start <= addr && addr + size <= (void *) t->end)
  140. return 0;
  141. loc = &t->next;
  142. }
  143. if ((prot & (PROT_READ|PROT_WRITE)) == (PROT_READ|PROT_WRITE)) {
  144. struct shim_vma * vma = NULL;
  145. /* the actual protection of the vma might be changed */
  146. if (lookup_supervma(addr, size, &vma) < 0)
  147. return 0;
  148. prot = vma->prot;
  149. put_vma(vma);
  150. if ((prot & (PROT_READ|PROT_WRITE)) == (PROT_READ|PROT_WRITE))
  151. return 0;
  152. }
  153. void * start = ALIGN_DOWN(addr);
  154. void * end = ALIGN_UP(addr + size);
  155. if (!DkVirtualMemoryProtect(start, end - start,
  156. PAL_PROT_READ|PAL_PROT_WRITE))
  157. return -PAL_ERRNO;
  158. if (!c)
  159. return 0;
  160. t = malloc(sizeof(struct textrel));
  161. if (!t)
  162. return -ENOMEM;
  163. t->start = (ElfW(Addr)) start;
  164. t->end = (ElfW(Addr)) end;
  165. t->prot = prot;
  166. t->next = NULL;
  167. *loc = t;
  168. return 0;
  169. }
  170. static int reprotect_map (struct link_map * l)
  171. {
  172. struct textrel * t = l->textrels, * next;
  173. int ret = 0;
  174. while (t) {
  175. struct loadcmd * c = l->loadcmds;
  176. for ( ; c < &l->loadcmds[l->nloadcmds] ; c++)
  177. if (l->l_addr + c->mapstart <= t->start &&
  178. t->end <= l->l_addr + c->mapend)
  179. break;
  180. ElfW(Addr) start = t->start, end = t->end;
  181. int prot = t->prot;
  182. next = t->next;
  183. free(t);
  184. t = next;
  185. l->textrels = t;
  186. if (c && !DkVirtualMemoryProtect((void *) start, end - start, prot)) {
  187. ret = -PAL_ERRNO;
  188. break;
  189. }
  190. }
  191. return ret;
  192. }
  193. #define RESOLVE_MAP(strtab, ref) resolve_map(strtab, ref)
  194. #define PROTECT_PAGE(map, addr, size) protect_page(map, addr, size)
  195. #define USE__THREAD 0 /* disable TLS support */
  196. #include "rel.h"
  197. struct link_map * new_elf_object (const char * realname, int type)
  198. {
  199. struct link_map *new;
  200. new = (struct link_map *) malloc (sizeof (struct link_map));
  201. if (new == NULL)
  202. return NULL;
  203. /* We apparently expect this to be zeroed. */
  204. memset(new, 0, sizeof(struct link_map));
  205. new->l_name = realname;
  206. new->l_type = type;
  207. return new;
  208. }
  209. #include <endian.h>
  210. #if BYTE_ORDER == BIG_ENDIAN
  211. # define byteorder ELFDATA2MSB
  212. #elif BYTE_ORDER == LITTLE_ENDIAN
  213. # define byteorder ELFDATA2LSB
  214. #else
  215. # error "Unknown BYTE_ORDER " BYTE_ORDER
  216. # define byteorder ELFDATANONE
  217. #endif
  218. #if __WORDSIZE == 32
  219. # define FILEBUF_SIZE 512
  220. #else
  221. # define FILEBUF_SIZE 832
  222. #endif
  223. /* Cache the location of MAP's hash table. */
  224. void setup_elf_hash (struct link_map * map)
  225. {
  226. Elf_Symndx * hash;
  227. if (__builtin_expect (map->l_info[DT_ADDRTAGIDX (DT_GNU_HASH) + DT_NUM
  228. + DT_THISPROCNUM + DT_VERSIONTAGNUM
  229. + DT_EXTRANUM + DT_VALNUM] != NULL, 1)) {
  230. Elf32_Word * hash32
  231. = (void *) D_PTR (map->l_info[DT_ADDRTAGIDX (DT_GNU_HASH) + DT_NUM
  232. + DT_THISPROCNUM + DT_VERSIONTAGNUM
  233. + DT_EXTRANUM + DT_VALNUM]);
  234. map->l_nbuckets = *hash32++;
  235. Elf32_Word symbias = *hash32++;
  236. Elf32_Word bitmask_nwords = *hash32++;
  237. /* Must be a power of two. */
  238. assert ((bitmask_nwords & (bitmask_nwords - 1)) == 0);
  239. map->l_gnu_bitmask_idxbits = bitmask_nwords - 1;
  240. map->l_gnu_shift = *hash32++;
  241. map->l_gnu_bitmask = (ElfW(Addr) *) hash32;
  242. hash32 += __ELF_NATIVE_CLASS / 32 * bitmask_nwords;
  243. map->l_gnu_buckets = hash32;
  244. hash32 += map->l_nbuckets;
  245. map->l_gnu_chain_zero = hash32 - symbias;
  246. return;
  247. }
  248. if (!map->l_info[DT_HASH])
  249. return;
  250. hash = (void *) D_PTR (map->l_info[DT_HASH]);
  251. /* Structure of DT_HASH:
  252. The bucket array forms the hast table itself. The entries in the
  253. chain array parallel the symbol table.
  254. [ nbucket ]
  255. [ nchain ]
  256. [ bucket[0] ]
  257. [ ... ]
  258. [ bucket[nbucket-1] ]
  259. [ chain[0] ]
  260. [ ... ]
  261. [ chain[nchain-1] ] */
  262. map->l_nbuckets = *hash++;
  263. hash++;
  264. map->l_buckets = hash;
  265. hash += map->l_nbuckets;
  266. map->l_chain = hash;
  267. }
  268. /* Map in the shared object NAME, actually located in REALNAME, and already
  269. opened on FD */
  270. static struct link_map *
  271. __map_elf_object (struct shim_handle * file,
  272. const void * fbp, size_t fbp_len, void * addr, int type,
  273. struct link_map * remap)
  274. {
  275. if (file && (!file->fs || !file->fs->fs_ops))
  276. return NULL;
  277. int (*read) (struct shim_handle *, void *, size_t) =
  278. file ? file->fs->fs_ops->read : NULL;
  279. int (*mmap) (struct shim_handle *, void **, size_t, int, int, off_t) =
  280. file ? file->fs->fs_ops->mmap : NULL;
  281. int (*seek) (struct shim_handle *, off_t, int) =
  282. file ? file->fs->fs_ops->seek : NULL;
  283. if (file && (!read || !mmap || !seek))
  284. return NULL;
  285. struct link_map * l = remap ? :
  286. new_elf_object(file ? (!qstrempty(&file->path) ?
  287. qstrgetstr(&file->path) :
  288. qstrgetstr(&file->uri)) : "", type);
  289. const char * errstring __attribute__((unused)) = NULL;
  290. int errval = 0;
  291. int ret;
  292. if (type != OBJECT_INTERNAL && !file) {
  293. errstring = "shared object has to be backed by file";
  294. errval = -EINVAL;
  295. call_lose:
  296. debug("loading %s: %s (%e)\n", l->l_name, errstring, errval);
  297. return NULL;
  298. }
  299. /* Scan the program header table, collecting its load commands. */
  300. struct loadcmd * c = l->loadcmds;
  301. /* This is the ELF header. We read it in `open_verify'. */
  302. const ElfW(Ehdr) * header = fbp;
  303. if (type == OBJECT_REMAP)
  304. goto do_remap;
  305. /* Extract the remaining details we need from the ELF header
  306. and then read in the program header table. */
  307. l->l_addr = (ElfW(Addr)) addr;
  308. l->l_entry = header->e_entry;
  309. int e_type = header->e_type;
  310. l->l_phnum = header->e_phnum;
  311. size_t maplength = header->e_phnum * sizeof (ElfW(Phdr));
  312. const ElfW(Phdr) * phdr = (fbp + header->e_phoff);
  313. if (type == OBJECT_LOAD &&
  314. header->e_phoff + maplength <= (size_t) fbp_len) {
  315. ElfW(Phdr) * new_phdr = (ElfW(Phdr) *) malloc (maplength);
  316. if ((ret = (*seek) (file, header->e_phoff, SEEK_SET)) < 0 ||
  317. (ret = (*read) (file, new_phdr, maplength)) < 0) {
  318. errstring = "cannot read file data";
  319. errval = ret;
  320. goto call_lose;
  321. }
  322. phdr = new_phdr;
  323. }
  324. l->nloadcmds = 0;
  325. bool has_holes = false;
  326. const ElfW(Phdr) * ph;
  327. for (ph = phdr; ph < &phdr[l->l_phnum]; ++ph)
  328. switch (ph->p_type) {
  329. /* These entries tell us where to find things once the file's
  330. segments are mapped in. We record the addresses it says
  331. verbatim, and later correct for the run-time load address. */
  332. case PT_DYNAMIC:
  333. l->l_ld = (void *) ph->p_vaddr;
  334. l->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
  335. break;
  336. case PT_INTERP:
  337. l->l_interp_libname = (const char *) ph->p_vaddr;
  338. break;
  339. case PT_PHDR:
  340. l->l_phdr = (void *) ph->p_vaddr;
  341. break;
  342. case PT_LOAD:
  343. /* A load command tells us to map in part of the file.
  344. We record the load commands and process them all later. */
  345. if (__builtin_expect (!ALIGNED(ph->p_align), 0)) {
  346. errstring = "ELF load command alignment not page-aligned";
  347. errval = ENOMEM;
  348. goto call_lose;
  349. }
  350. if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
  351. & (ph->p_align - 1)) != 0, 0)) {
  352. errstring = "\
  353. ELF load command address/offset not properly aligned";
  354. errval = ENOMEM;
  355. goto call_lose;
  356. }
  357. if (l->nloadcmds >= MAX_LOADCMDS) {
  358. errstring = "too many load commamds";
  359. errval = -EINVAL;
  360. goto call_lose;
  361. }
  362. c = &l->loadcmds[l->nloadcmds++];
  363. c->mapstart = ALIGN_DOWN(ph->p_vaddr);
  364. c->mapend = ALIGN_UP(ph->p_vaddr + ph->p_filesz);
  365. c->dataend = ph->p_vaddr + ph->p_filesz;
  366. c->allocend = ph->p_vaddr + ph->p_memsz;
  367. c->mapoff = ALIGN_DOWN(ph->p_offset);
  368. /* Determine whether there is a gap between the last segment
  369. and this one. */
  370. if (l->nloadcmds > 1 && c[-1].mapend != c->mapstart)
  371. has_holes = true;
  372. /* Optimize a common case. */
  373. #if (PF_R | PF_W | PF_X) == 7 && (PROT_READ | PROT_WRITE | PROT_EXEC) == 7
  374. c->prot = (PF_TO_PROT
  375. >> ((ph->p_flags & (PF_R | PF_W | PF_X)) * 4)) & 0xf;
  376. #else
  377. c->prot = 0;
  378. if (ph->p_flags & PF_R)
  379. c->prot |= PROT_READ;
  380. if (ph->p_flags & PF_W)
  381. c->prot |= PROT_WRITE;
  382. if (ph->p_flags & PF_X)
  383. c->prot |= PROT_EXEC;
  384. #endif
  385. c->flags = MAP_PRIVATE|MAP_FILE;
  386. break;
  387. case PT_GNU_RELRO:
  388. l->l_relro_addr = ph->p_vaddr;
  389. l->l_relro_size = ph->p_memsz;
  390. break;
  391. }
  392. if (__builtin_expect (l->nloadcmds == 0, 0)) {
  393. /* This only happens for a bogus object that will be caught with
  394. another error below. But we don't want to go through the
  395. calculations below using NLOADCMDS - 1. */
  396. errstring = "object file has no loadable segments";
  397. goto call_lose;
  398. }
  399. c = &l->loadcmds[0];
  400. /* Length of the sections to be loaded. */
  401. maplength = l->loadcmds[l->nloadcmds - 1].allocend - c->mapstart;
  402. if (__builtin_expect (e_type, ET_DYN) == ET_DYN) {
  403. /* This is a position-independent shared object. We can let the
  404. kernel map it anywhere it likes, but we must have space for all
  405. the segments in their specified positions relative to the first.
  406. So we map the first segment without MAP_FIXED, but with its
  407. extent increased to cover all the segments. Then we remove
  408. access from excess portion, and there is known sufficient space
  409. there to remap from the later segments.
  410. As a refinement, sometimes we have an address that we would
  411. prefer to map such objects at; but this is only a preference,
  412. the OS can do whatever it likes. */
  413. ElfW(Addr) mappref = 0;
  414. if (type == OBJECT_LOAD) {
  415. if (addr)
  416. mappref = (ElfW(Addr)) c->mapstart + (ElfW(Addr)) addr;
  417. else
  418. mappref = (ElfW(Addr)) get_unmapped_vma(ALIGN_UP(maplength),
  419. MAP_PRIVATE|MAP_ANONYMOUS);
  420. /* Remember which part of the address space this object uses. */
  421. errval = (*mmap) (file, (void **) &mappref, ALIGN_UP(maplength),
  422. c->prot, c->flags|MAP_PRIVATE, c->mapoff);
  423. if (__builtin_expect (errval < 0, 0)) {
  424. map_error:
  425. errstring = "failed to map segment from shared object";
  426. goto call_lose;
  427. }
  428. } else {
  429. mappref = (ElfW(Addr)) addr;
  430. }
  431. l->l_map_start = mappref;
  432. l->l_map_end = l->l_map_start + maplength;
  433. #if BOOKKEEP_INTERNAL_OBJ == 0
  434. if (type != OBJECT_INTERNAL && type != OBJECT_USER)
  435. #else
  436. if (type != OBJECT_USER)
  437. #endif
  438. bkeep_mmap((void *) mappref, ALIGN_UP(maplength), c->prot,
  439. c->flags|MAP_PRIVATE|
  440. (type == OBJECT_INTERNAL ? VMA_INTERNAL : 0),
  441. file, c->mapoff, NULL);
  442. l->l_addr = l->l_map_start - c->mapstart;
  443. if (has_holes) {
  444. /* Change protection on the excess portion to disallow all access;
  445. the portions we do not remap later will be inaccessible as if
  446. unallocated. Then jump into the normal segment-mapping loop to
  447. handle the portion of the segment past the end of the file
  448. mapping. */
  449. if (type == OBJECT_LOAD)
  450. DkVirtualMemoryProtect((void *) RELOCATE(l, c->mapend),
  451. l->loadcmds[l->nloadcmds - 1].mapstart -
  452. c->mapend, PAL_PROT_NONE);
  453. if (type == OBJECT_MAPPED ||
  454. #if BOOKKEEP_INTERNAL_OBJ == 1
  455. type == OBJECT_INTERNAL ||
  456. #endif
  457. type == OBJECT_LOAD) {
  458. #if BOOKKEEP_INTERNAL_OBJ == 1
  459. int flags = (type == OBJECT_INTERNVAL) ? VMA_INTERVAL : 0;
  460. #else
  461. int flags = 0;
  462. #endif
  463. bkeep_mprotect((void *) RELOCATE(l, c->mapend),
  464. l->loadcmds[l->nloadcmds - 1].mapstart -
  465. c->mapend, PROT_NONE, &flags);
  466. }
  467. }
  468. goto postmap;
  469. }
  470. /* Remember which part of the address space this object uses. */
  471. l->l_addr = 0;
  472. l->l_map_start = c->mapstart;
  473. l->l_map_end = l->l_map_start + maplength;
  474. do_remap:
  475. while (c < &l->loadcmds[l->nloadcmds]) {
  476. if (c->mapend > c->mapstart) {
  477. /* Map the segment contents from the file. */
  478. void * mapaddr = (void *) RELOCATE(l, c->mapstart);
  479. if (type == OBJECT_LOAD || type == OBJECT_REMAP) {
  480. if ((*mmap) (file, &mapaddr, c->mapend - c->mapstart, c->prot,
  481. c->flags|MAP_FIXED|MAP_PRIVATE, c->mapoff) < 0)
  482. goto map_error;
  483. }
  484. #if BOOKKEEP_INTERNAL_OBJ == 0
  485. if (type != OBJECT_INTERNAL && type != OBJECT_USER)
  486. #else
  487. if (type != OBJECT_USER)
  488. #endif
  489. bkeep_mmap(mapaddr, c->mapend - c->mapstart, c->prot,
  490. c->flags|MAP_FIXED|MAP_PRIVATE|
  491. (type == OBJECT_INTERNAL ? VMA_INTERNAL : 0),
  492. file, c->mapoff, NULL);
  493. }
  494. postmap:
  495. if (l->l_phdr == 0
  496. && (ElfW(Off)) c->mapoff <= header->e_phoff
  497. && ((size_t) (c->mapend - c->mapstart + c->mapoff)
  498. >= header->e_phoff + header->e_phnum * sizeof (ElfW(Phdr))))
  499. /* Found the program header in this segment. */
  500. l->l_phdr = (void *) (c->mapstart + header->e_phoff - c->mapoff);
  501. if (c->allocend > c->dataend) {
  502. /* Extra zero pages should appear at the end of this segment,
  503. after the data mapped from the file. */
  504. ElfW(Addr) zero, zeroend, zeropage;
  505. zero = (ElfW(Addr)) RELOCATE(l, c->dataend);
  506. zeroend = ALIGN_UP((ElfW(Addr)) RELOCATE(l, c->allocend));
  507. zeropage = ALIGN_UP(zero);
  508. if (zeroend < zeropage)
  509. /* All the extra data is in the last page of the segment.
  510. We can just zero it. */
  511. zeropage = zeroend;
  512. if (type != OBJECT_MAPPED &&
  513. type != OBJECT_INTERNAL &&
  514. type != OBJECT_USER && zeropage > zero) {
  515. /* Zero the final part of the last page of the segment. */
  516. if (__builtin_expect ((c->prot & PROT_WRITE) == 0, 0)) {
  517. /* Dag nab it. */
  518. if (!DkVirtualMemoryProtect((caddr_t) ALIGN_DOWN(zero),
  519. allocsize,
  520. c->prot|PAL_PROT_WRITE)) {
  521. errstring = "cannot change memory protections";
  522. goto call_lose;
  523. }
  524. memset ((void *) zero, '\0', zeropage - zero);
  525. if (!DkVirtualMemoryProtect((caddr_t) ALIGN_DOWN(zero),
  526. allocsize, c->prot)) {
  527. errstring = "cannot change memory protections";
  528. goto call_lose;
  529. }
  530. } else {
  531. memset ((void *) zero, '\0', zeropage - zero);
  532. }
  533. }
  534. if (zeroend > zeropage) {
  535. if (type != OBJECT_MAPPED &&
  536. type != OBJECT_INTERNAL &&
  537. type != OBJECT_USER) {
  538. caddr_t mapat = (caddr_t)
  539. DkVirtualMemoryAlloc((caddr_t) zeropage,
  540. zeroend - zeropage,
  541. 0, c->prot);
  542. if (__builtin_expect (mapat < 0, 0)) {
  543. errstring = "cannot map zero-fill pages";
  544. goto call_lose;
  545. }
  546. }
  547. #if BOOKKEEP_INTERNAL_OBJ == 0
  548. if (type != OBJECT_INTERNAL && type != OBJECT_USER)
  549. #else
  550. if (type != OBJECT_USER)
  551. #endif
  552. bkeep_mmap((void *) zeropage, zeroend - zeropage, c->prot,
  553. MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED|
  554. (type == OBJECT_INTERNAL ? VMA_INTERNAL : 0),
  555. NULL, 0, 0);
  556. }
  557. }
  558. ++c;
  559. }
  560. if (type == OBJECT_REMAP)
  561. return l;
  562. if (l->l_ld == 0) {
  563. if (__builtin_expect(e_type == ET_DYN, 0)) {
  564. errstring = "object file has no dynamic section";
  565. goto call_lose;
  566. }
  567. } else {
  568. l->l_ld = (ElfW(Dyn) *) RELOCATE(l, l->l_ld);
  569. }
  570. l->l_real_ld = l->l_ld;
  571. l->l_ld = remalloc(l->l_ld, sizeof(ElfW(Dyn)) * l->l_ldnum);
  572. elf_get_dynamic_info(l);
  573. /* When we profile the SONAME might be needed for something else but
  574. loading. Add it right away. */
  575. if (l->l_info[DT_STRTAB] && l->l_info[DT_SONAME])
  576. l->l_soname = (char *) (D_PTR (l->l_info[DT_STRTAB])
  577. + D_PTR (l->l_info[DT_SONAME]));
  578. if (l->l_phdr == NULL) {
  579. /* The program header is not contained in any of the segments.
  580. We have to allocate memory ourself and copy it over from out
  581. temporary place. */
  582. ElfW(Phdr) * newp = (ElfW(Phdr) *) malloc(header->e_phnum
  583. * sizeof (ElfW(Phdr)));
  584. if (newp == NULL) {
  585. errstring = "cannot allocate memory for program header";
  586. goto call_lose;
  587. }
  588. l->l_phdr = memcpy (newp, phdr,
  589. (header->e_phnum * sizeof (ElfW(Phdr))));
  590. } else {
  591. /* Adjust the PT_PHDR value by the runtime load address. */
  592. l->l_phdr = (ElfW(Phdr) *) RELOCATE(l, l->l_phdr);
  593. }
  594. l->l_entry = RELOCATE(l, l->l_entry);
  595. /* Set up the symbol hash table. */
  596. setup_elf_hash(l);
  597. return l;
  598. }
  599. static inline
  600. struct link_map * __search_map_by_name (const char * name)
  601. {
  602. struct link_map * l = loaded_libraries;
  603. int len = strlen(name);
  604. while (l) {
  605. if (l->l_name && !memcmp(l->l_name, name, len + 1))
  606. break;
  607. l = l->l_next;
  608. }
  609. return l;
  610. }
  611. static inline
  612. struct link_map * __search_map_by_handle (struct shim_handle * file)
  613. {
  614. struct link_map * l = loaded_libraries;
  615. while (l) {
  616. if (l->l_file == file)
  617. break;
  618. l = l->l_next;
  619. }
  620. return l;
  621. }
  622. static inline
  623. struct link_map * __search_map_by_addr (void * addr)
  624. {
  625. struct link_map * l = loaded_libraries;
  626. while (l) {
  627. if ((void *) l->l_map_start == addr)
  628. break;
  629. l = l->l_next;
  630. }
  631. return l;
  632. }
  633. static int __remove_elf_object (struct link_map * l)
  634. {
  635. if (l->l_prev)
  636. l->l_prev->l_next = l->l_next;
  637. if (l->l_next)
  638. l->l_next->l_prev = l->l_prev;
  639. remove_r_debug((void *) l->l_addr);
  640. if (loaded_libraries == l)
  641. loaded_libraries = l->l_next;
  642. if (interp_map == l)
  643. interp_map = NULL;
  644. free(l);
  645. return 0;
  646. }
  647. static int __free_elf_object (struct link_map * l)
  648. {
  649. debug("removing %s as runtime object at %p\n", l->l_name, l->l_map_start);
  650. struct loadcmd *c = l->loadcmds;
  651. while (c < &l->loadcmds[l->nloadcmds]) {
  652. if (c->mapend > c->mapstart)
  653. /* Unmap the segment contents from the file. */
  654. shim_do_munmap ((void *) l->l_addr + c->mapstart,
  655. c->mapend - c->mapstart);
  656. if (c->allocend > c->dataend) {
  657. /* Extra zero pages should appear at the end of this segment,
  658. after the data mapped from the file. */
  659. ElfW(Addr) zero, zeroend, zeropage;
  660. zero = l->l_addr + c->dataend;
  661. zeroend = l->l_addr + c->allocend;
  662. zeropage = ALIGN_UP(zero);
  663. if (zeroend < zeropage)
  664. /* All the extra data is in the last page of the segment.
  665. We can just zero it. */
  666. zeropage = zeroend;
  667. if (zeroend > zeropage)
  668. shim_do_munmap((void *) zeropage, zeroend - zeropage);
  669. }
  670. ++c;
  671. }
  672. __remove_elf_object(l);
  673. return 0;
  674. }
  675. int free_elf_object (struct shim_handle * file)
  676. {
  677. struct link_map * l = __search_map_by_handle(file);
  678. if (!l)
  679. return -ENOENT;
  680. __free_elf_object(l);
  681. put_handle(file);
  682. return 0;
  683. }
  684. static int __check_elf_header (void * fbp, int len)
  685. {
  686. const char * errstring __attribute__((unused));
  687. /* Now we will start verify the file as a ELF header. This part of code
  688. is borrow from open_verify() */
  689. ElfW(Ehdr) * ehdr = (ElfW(Ehdr) *) fbp;
  690. if (__builtin_expect (len < sizeof(ElfW(Ehdr)), 0)) {
  691. errstring = "ELF file with a strange size";
  692. goto verify_failed;
  693. }
  694. #define ELF32_CLASS ELFCLASS32
  695. #define ELF64_CLASS ELFCLASS64
  696. static const unsigned char expected[EI_NIDENT] = {
  697. [EI_MAG0] = ELFMAG0,
  698. [EI_MAG1] = ELFMAG1,
  699. [EI_MAG2] = ELFMAG2,
  700. [EI_MAG3] = ELFMAG3,
  701. [EI_CLASS] = ELFW(CLASS),
  702. [EI_DATA] = byteorder,
  703. [EI_VERSION] = EV_CURRENT,
  704. [EI_OSABI] = ELFOSABI_SYSV,
  705. [EI_ABIVERSION] = 0
  706. };
  707. /* See whether the ELF header is what we expect. */
  708. if (__builtin_expect (memcmp (ehdr->e_ident, expected, EI_ABIVERSION) !=
  709. 0, 0)) {
  710. errstring = "ELF file with invalid header";
  711. goto verify_failed;
  712. }
  713. /* Check whether the ELF header use the right endian */
  714. if (ehdr->e_ident[EI_DATA] != byteorder) {
  715. if (BYTE_ORDER == BIG_ENDIAN) {
  716. errstring = "ELF file data encoding not big-endian";
  717. goto verify_failed;
  718. } else {
  719. errstring = "ELF file data encoding not little-endian";
  720. goto verify_failed;
  721. }
  722. }
  723. /* checking the header is of the right version */
  724. if (ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
  725. errstring = "ELF file version ident does not match current one";
  726. goto verify_failed;
  727. }
  728. if (memcmp(&ehdr->e_ident[EI_PAD], &expected[EI_PAD],
  729. EI_NIDENT - EI_PAD) != 0) {
  730. errstring = "nonzero padding in e_ident";
  731. goto verify_failed;
  732. }
  733. if (__builtin_expect (ehdr->e_version, EV_CURRENT) != EV_CURRENT) {
  734. errstring = "ELF file version does not match current one";
  735. goto verify_failed;
  736. }
  737. /* Now we check if the host match the elf machine profile */
  738. if (! __builtin_expect (elf_machine_matches_host (ehdr), 1)) {
  739. errstring = "ELF file does not match with the host";
  740. goto verify_failed;
  741. }
  742. /* check if the type of ELF header is either DYN or EXEC */
  743. if (__builtin_expect (ehdr->e_type, ET_DYN) != ET_DYN
  744. && __builtin_expect (ehdr->e_type, ET_EXEC) != ET_EXEC) {
  745. errstring = "only ET_DYN and ET_EXEC can be loaded\n";
  746. goto verify_failed;
  747. }
  748. /* check if phentsize match the size of ElfW(Phdr) */
  749. if (__builtin_expect (ehdr->e_phentsize, sizeof (ElfW(Phdr)))
  750. != sizeof (ElfW(Phdr))) {
  751. errstring = "ELF file's phentsize not the expected size";
  752. goto verify_failed;
  753. }
  754. return 0;
  755. verify_failed:
  756. debug("load runtime object: %s\n", errstring);
  757. return -EINVAL;
  758. }
  759. static int __read_elf_header (struct shim_handle * file, void * fbp)
  760. {
  761. if (!file)
  762. return -EINVAL;
  763. if (!file->fs || !file->fs->fs_ops)
  764. return -EACCES;
  765. int (*read) (struct shim_handle *, void *, size_t) =
  766. file->fs->fs_ops->read;
  767. int (*seek) (struct shim_handle *, off_t, int) =
  768. file->fs->fs_ops->seek;
  769. if (!read || !seek)
  770. return -EACCES;
  771. (*seek) (file, 0, SEEK_SET);
  772. return (*read) (file, fbp, FILEBUF_SIZE);
  773. }
  774. static int __load_elf_header (struct shim_handle * file, void * fbp,
  775. int * plen)
  776. {
  777. int len = __read_elf_header(file, fbp);
  778. if (len < 0)
  779. return len;
  780. int ret = __check_elf_header(fbp, len);
  781. if (ret < 0)
  782. return ret;
  783. if (plen)
  784. *plen = len;
  785. return 0;
  786. }
  787. int check_elf_object (struct shim_handle ** file)
  788. {
  789. struct shim_handle * exec = *file;
  790. char fb[FILEBUF_SIZE];
  791. int ret;
  792. if (!exec)
  793. return -EINVAL;
  794. int len = __read_elf_header(exec, &fb);
  795. if (len < 0)
  796. return len;
  797. if (!(ret = __check_elf_header(&fb, len)) || ret != -EINVAL)
  798. return ret;
  799. if (memcmp(fb, "#!", 2))
  800. return -EACCES;
  801. const char * shargs[16];
  802. int shnargs = 0;
  803. char * p = fb + 2, * e = fb + len;
  804. while (p < e) {
  805. assert(shnargs < 16);
  806. char * np = p;
  807. while (np < e && *np != ' ' && *np != '\n')
  808. np++;
  809. if (*np == '\n')
  810. e = np;
  811. *np = '\0';
  812. shargs[shnargs++] = p;
  813. p = np + 1;
  814. }
  815. if (!shnargs)
  816. return -EINVAL;
  817. debug("detected as script: run by %s\n", shargs[0]);
  818. struct shim_dentry * dent = NULL;
  819. if ((ret = path_lookupat(NULL, shargs[0], LOOKUP_OPEN, &dent)) < 0)
  820. return ret;
  821. if (!dent->fs || !dent->fs->d_ops ||
  822. !dent->fs->d_ops->open) {
  823. ret = -EACCES;
  824. err:
  825. put_dentry(dent);
  826. return ret;
  827. }
  828. struct shim_handle * new_exe = get_new_handle();
  829. if (!new_exe) {
  830. ret = -ENOMEM;
  831. goto err;
  832. }
  833. set_handle_fs(new_exe, dent->fs);
  834. new_exe->flags = O_RDONLY;
  835. new_exe->acc_mode = MAY_READ;
  836. if ((ret = dent->fs->d_ops->open(new_exe, dent, O_RDONLY)) < 0)
  837. goto err;
  838. flush_handle(*file);
  839. *file = new_exe;
  840. return 0;
  841. }
  842. static int __load_elf_object (struct shim_handle * file, void * addr,
  843. int type, struct link_map * remap);
  844. int load_elf_object (struct shim_handle * file, void * addr, size_t mapped)
  845. {
  846. if (!file)
  847. return -EINVAL;
  848. if (mapped)
  849. debug("adding %s as runtime object at %p-%p\n",
  850. file ? qstrgetstr(&file->uri) : "(unknown)", addr, addr + mapped);
  851. else
  852. debug("loading %s as runtime object at %p\n",
  853. file ? qstrgetstr(&file->uri) : "(unknown)", addr);
  854. return __load_elf_object(file, addr,
  855. mapped ? OBJECT_MAPPED : OBJECT_LOAD,
  856. NULL);
  857. }
  858. static void add_link_map (struct link_map * map)
  859. {
  860. struct link_map *prev = NULL;
  861. struct link_map **pprev = &loaded_libraries;
  862. struct link_map *next = loaded_libraries;
  863. while (next) {
  864. prev = next;
  865. pprev = &next->l_next;
  866. next = next->l_next;
  867. }
  868. *pprev = map;
  869. map->l_prev = prev;
  870. map->l_next = NULL;
  871. }
  872. static void replace_link_map (struct link_map * new, struct link_map * old)
  873. {
  874. new->l_next = old->l_next;
  875. new->l_prev = old->l_prev;
  876. if (old->l_next)
  877. old->l_next->l_prev = new;
  878. if (old->l_prev)
  879. old->l_prev->l_next = new;
  880. if (loaded_libraries == old)
  881. loaded_libraries = new;
  882. }
  883. static int do_relocate_object (struct link_map * l);
  884. static int __load_elf_object (struct shim_handle * file, void * addr,
  885. int type, struct link_map * remap)
  886. {
  887. char * hdr = addr;
  888. int len, ret = 0;
  889. if (type == OBJECT_LOAD || type == OBJECT_REMAP) {
  890. hdr = __alloca(FILEBUF_SIZE);
  891. if ((ret = __load_elf_header(file, hdr, &len)) < 0)
  892. goto out;
  893. }
  894. struct link_map * map = __map_elf_object(file, hdr, len, addr, type, remap);
  895. if (!map) {
  896. ret = -EINVAL;
  897. goto out;
  898. }
  899. if (type != OBJECT_INTERNAL)
  900. do_relocate_object(map);
  901. if (internal_map) {
  902. map->l_resolved = true;
  903. map->l_resolved_map = internal_map->l_addr;
  904. }
  905. if (type == OBJECT_INTERNAL)
  906. internal_map = map;
  907. if (type != OBJECT_REMAP) {
  908. if (file) {
  909. get_handle(file);
  910. map->l_file = file;
  911. }
  912. add_link_map(map);
  913. }
  914. if ((type == OBJECT_LOAD || type == OBJECT_REMAP || type == OBJECT_USER) &&
  915. map->l_file && !qstrempty(&map->l_file->uri)) {
  916. if (type == OBJECT_REMAP)
  917. remove_r_debug((void *) map->l_addr);
  918. append_r_debug(qstrgetstr(&map->l_file->uri), (void *) map->l_map_start,
  919. (void *) map->l_real_ld);
  920. }
  921. out:
  922. return ret;
  923. }
  924. int reload_elf_object (struct shim_handle * file)
  925. {
  926. struct link_map * map = loaded_libraries;
  927. while (map) {
  928. if (map->l_file == file)
  929. break;
  930. map = map->l_next;
  931. }
  932. if (!map)
  933. return -ENOENT;
  934. debug("reloading %s as runtime object at %p-%p\n",
  935. qstrgetstr(&file->uri), map->l_map_start, map->l_map_end);
  936. return __load_elf_object(file, NULL, OBJECT_REMAP, map);
  937. }
  938. struct sym_val {
  939. ElfW(Sym) * s;
  940. struct link_map * m;
  941. };
  942. static uint_fast32_t elf_fast_hash (const char *s)
  943. {
  944. uint_fast32_t h = 5381;
  945. for (unsigned char c = *s; c != '\0'; c = *++s)
  946. h = h * 33 + c;
  947. return h & 0xffffffff;
  948. }
  949. /* This is the hashing function specified by the ELF ABI. In the
  950. first five operations no overflow is possible so we optimized it a
  951. bit. */
  952. static unsigned long int
  953. elf_hash (const char * name_arg)
  954. {
  955. const unsigned char * name = (const unsigned char *) name_arg;
  956. unsigned long int hash = 0;
  957. if (*name == '\0')
  958. return hash;
  959. hash = *name++;
  960. if (*name == '\0')
  961. return hash;
  962. hash = (hash << 4) + *name++;
  963. if (*name == '\0')
  964. return hash;
  965. hash = (hash << 4) + *name++;
  966. if (*name == '\0')
  967. return hash;
  968. hash = (hash << 4) + *name++;
  969. if (*name == '\0')
  970. return hash;
  971. hash = (hash << 4) + *name++;
  972. while (*name != '\0') {
  973. unsigned long int hi;
  974. hash = (hash << 4) + *name++;
  975. hi = hash & 0xf0000000;
  976. /* The algorithm specified in the ELF ABI is as follows:
  977. if (hi != 0)
  978. hash ^= hi >> 24;
  979. hash &= ~hi;
  980. But the following is equivalent and a lot faster, especially on
  981. modern processors. */
  982. hash ^= hi;
  983. hash ^= hi >> 24;
  984. }
  985. return hash;
  986. }
  987. static ElfW(Sym) *
  988. do_lookup_map (ElfW(Sym) * ref, const char * undef_name,
  989. const uint_fast32_t hash, unsigned long int elf_hash,
  990. const struct link_map * map)
  991. {
  992. /* These variables are used in the nested function. */
  993. Elf_Symndx symidx;
  994. ElfW(Sym) * sym;
  995. /* The tables for this map. */
  996. ElfW(Sym) * symtab = (void *) D_PTR (map->l_info[DT_SYMTAB]);
  997. const char * strtab = (const void *) D_PTR (map->l_info[DT_STRTAB]);
  998. int len = strlen(undef_name);
  999. /* Nested routine to check whether the symbol matches. */
  1000. ElfW(Sym) * check_match (ElfW(Sym) * sym) {
  1001. unsigned int stt = ELFW(ST_TYPE) (sym->st_info);
  1002. if (__builtin_expect ((sym->st_value == 0 /* No value. */
  1003. && stt != STT_TLS)
  1004. || sym->st_shndx == SHN_UNDEF, 0))
  1005. return NULL;
  1006. /* Ignore all but STT_NOTYPE, STT_OBJECT, STT_FUNC,
  1007. STT_COMMON, STT_TLS, and STT_GNU_IFUNC since these are no
  1008. code/data definitions. */
  1009. #define ALLOWED_STT \
  1010. ((1 << STT_NOTYPE) | (1 << STT_OBJECT) | (1 << STT_FUNC) \
  1011. | (1 << STT_COMMON) | (1 << STT_TLS) | (1 << STT_GNU_IFUNC))
  1012. if (__builtin_expect (((1 << stt) & ALLOWED_STT) == 0, 0))
  1013. return NULL;
  1014. if (sym != ref && memcmp(strtab + sym->st_name, undef_name, len + 1))
  1015. /* Not the symbol we are looking for. */
  1016. return NULL;
  1017. /* There cannot be another entry for this symbol so stop here. */
  1018. return sym;
  1019. }
  1020. const ElfW(Addr) * bitmask = map->l_gnu_bitmask;
  1021. if (__builtin_expect (bitmask != NULL, 1)) {
  1022. ElfW(Addr) bitmask_word = bitmask[(hash / __ELF_NATIVE_CLASS)
  1023. & map->l_gnu_bitmask_idxbits];
  1024. unsigned int hashbit1 = hash & (__ELF_NATIVE_CLASS - 1);
  1025. unsigned int hashbit2 = (hash >> map->l_gnu_shift)
  1026. & (__ELF_NATIVE_CLASS - 1);
  1027. if (__builtin_expect ((bitmask_word >> hashbit1)
  1028. & (bitmask_word >> hashbit2) & 1, 0)) {
  1029. Elf32_Word bucket = map->l_gnu_buckets
  1030. [hash % map->l_nbuckets];
  1031. if (bucket != 0) {
  1032. const Elf32_Word *hasharr = &map->l_gnu_chain_zero[bucket];
  1033. do {
  1034. if (((*hasharr ^ hash) >> 1) == 0) {
  1035. symidx = hasharr - map->l_gnu_chain_zero;
  1036. sym = check_match (&symtab[symidx]);
  1037. if (sym != NULL)
  1038. return sym;
  1039. }
  1040. } while ((*hasharr++ & 1u) == 0);
  1041. }
  1042. }
  1043. /* No symbol found. */
  1044. symidx = SHN_UNDEF;
  1045. } else {
  1046. /* Use the old SysV-style hash table. Search the appropriate
  1047. hash bucket in this object's symbol table for a definition
  1048. for the same symbol name. */
  1049. for (symidx = map->l_buckets[elf_hash % map->l_nbuckets];
  1050. symidx != STN_UNDEF;
  1051. symidx = map->l_chain[symidx]) {
  1052. sym = check_match (&symtab[symidx]);
  1053. if (sym != NULL)
  1054. return sym;
  1055. }
  1056. }
  1057. return NULL;
  1058. }
  1059. /* Inner part of the lookup functions. We return a value > 0 if we
  1060. found the symbol, the value 0 if nothing is found and < 0 if
  1061. something bad happened. */
  1062. static int do_lookup (const char * undef_name, ElfW(Sym) * ref,
  1063. struct sym_val * result)
  1064. {
  1065. const uint_fast32_t fast_hash = elf_fast_hash(undef_name);
  1066. const long int hash = elf_hash(undef_name);
  1067. ElfW(Sym) *sym = NULL;
  1068. sym = do_lookup_map(ref, undef_name, fast_hash, hash, internal_map);
  1069. if (!sym)
  1070. return 0;;
  1071. switch (__builtin_expect (ELFW(ST_BIND) (sym->st_info), STB_GLOBAL)) {
  1072. case STB_WEAK:
  1073. /* Weak definition. Use this value if we don't find another. */
  1074. if (! result->s) {
  1075. result->s = sym;
  1076. result->m = (struct link_map *) internal_map;
  1077. }
  1078. break;
  1079. /* FALLTHROUGH */
  1080. case STB_GLOBAL:
  1081. case STB_GNU_UNIQUE:
  1082. /* success: */
  1083. /* Global definition. Just what we need. */
  1084. result->s = sym;
  1085. result->m = (struct link_map *) internal_map;
  1086. return 1;
  1087. default:
  1088. /* Local symbols are ignored. */
  1089. break;
  1090. }
  1091. /* We have not found anything until now. */
  1092. return 0;
  1093. }
  1094. /* Search loaded objects' symbol tables for a definition of the symbol
  1095. UNDEF_NAME, perhaps with a requested version for the symbol.
  1096. We must never have calls to the audit functions inside this function
  1097. or in any function which gets called. If this would happen the audit
  1098. code might create a thread which can throw off all the scope locking. */
  1099. struct link_map *
  1100. lookup_symbol (const char * undef_name, ElfW(Sym) ** ref)
  1101. {
  1102. struct sym_val current_value = { NULL, NULL };
  1103. do_lookup(undef_name, *ref, &current_value);
  1104. if (__builtin_expect (current_value.s == NULL, 0)) {
  1105. *ref = NULL;
  1106. return NULL;
  1107. }
  1108. *ref = current_value.s;
  1109. return current_value.m;
  1110. }
  1111. static int do_relocate_object (struct link_map * l)
  1112. {
  1113. int ret = 0;
  1114. ELF_DYNAMIC_RELOCATE(l);
  1115. ret = reprotect_map(l);
  1116. if (ret < 0)
  1117. return ret;
  1118. return 0;
  1119. }
  1120. static bool __need_interp (struct link_map * exec_map)
  1121. {
  1122. if (!exec_map->l_interp_libname)
  1123. return false;
  1124. const char * strtab = (const void *) D_PTR (exec_map->l_info[DT_STRTAB]);
  1125. const ElfW(Dyn) * d;
  1126. for (d = exec_map->l_ld ; d->d_tag != DT_NULL ; d++)
  1127. if (__builtin_expect (d->d_tag, DT_NEEDED) == DT_NEEDED) {
  1128. const char * name = strtab + d->d_un.d_val;
  1129. int len = strlen(name);
  1130. const char * filename = name + len - 1;
  1131. while (filename > name && *filename != '/')
  1132. filename--;
  1133. if (*filename == '/')
  1134. filename++;
  1135. /* if we find a dependency besides libsysdb.so, the
  1136. interpreter is necessary */
  1137. if (memcmp(filename, "libsysdb", 8))
  1138. return true;
  1139. }
  1140. return false;
  1141. }
  1142. extern const char ** library_paths;
  1143. int free_elf_interp (void)
  1144. {
  1145. if (interp_map)
  1146. __free_elf_object(interp_map);
  1147. return 0;
  1148. }
  1149. static int __load_interp_object (struct link_map * exec_map)
  1150. {
  1151. const char * interp_name = (const char *) exec_map->l_interp_libname +
  1152. (long) exec_map->l_addr;
  1153. int len = strlen(interp_name);
  1154. const char * filename = interp_name + len - 1;
  1155. while (filename > interp_name && *filename != '/')
  1156. filename--;
  1157. if (*filename == '/')
  1158. filename++;
  1159. len -= filename - interp_name;
  1160. const char * default_paths[] = { "/lib", "/lib64", NULL };
  1161. const char ** paths = library_paths ? : default_paths;
  1162. char interp_path[STR_SIZE];
  1163. for (const char ** p = paths ; *p ; p++) {
  1164. int plen = strlen(*p);
  1165. memcpy(interp_path, *p, plen);
  1166. interp_path[plen] = '/';
  1167. memcpy(interp_path + plen + 1, filename, len + 1);
  1168. debug("search interpreter: %s\n", interp_path);
  1169. struct shim_dentry * dent = NULL;
  1170. int ret = 0;
  1171. if ((ret = path_lookupat(NULL, interp_path, LOOKUP_OPEN, &dent)) < 0 ||
  1172. dent->state & DENTRY_NEGATIVE)
  1173. continue;
  1174. struct shim_mount * fs = dent->fs;
  1175. get_dentry(dent);
  1176. if (!fs->d_ops->open) {
  1177. ret = -EACCES;
  1178. err:
  1179. put_dentry(dent);
  1180. return ret;
  1181. }
  1182. if (fs->d_ops->mode) {
  1183. mode_t mode;
  1184. if ((ret = fs->d_ops->mode(dent, &mode, 1)) < 0)
  1185. goto err;
  1186. }
  1187. struct shim_handle * interp = NULL;
  1188. if (!(interp = get_new_handle())) {
  1189. ret = -ENOMEM;
  1190. goto err;
  1191. }
  1192. set_handle_fs(interp, fs);
  1193. interp->flags = O_RDONLY;
  1194. interp->acc_mode = MAY_READ;
  1195. if ((ret = fs->d_ops->open(interp, dent, O_RDONLY)) < 0) {
  1196. put_handle(interp);
  1197. goto err;
  1198. }
  1199. if (!(ret = __load_elf_object(interp, NULL, OBJECT_LOAD, NULL)))
  1200. interp_map = __search_map_by_handle(interp);
  1201. put_handle(interp);
  1202. return ret;
  1203. }
  1204. return -ENOENT;
  1205. }
  1206. int load_elf_interp (struct shim_handle * exec)
  1207. {
  1208. struct link_map * exec_map = __search_map_by_handle(exec);
  1209. if (exec_map && !interp_map &&
  1210. __need_interp(exec_map))
  1211. __load_interp_object(exec_map);
  1212. return 0;
  1213. }
  1214. int remove_loaded_libraries (void)
  1215. {
  1216. struct link_map * map = loaded_libraries, * next_map = map->l_next;
  1217. while (map) {
  1218. if (map->l_type != OBJECT_INTERNAL)
  1219. __remove_elf_object(map);
  1220. map = next_map;
  1221. next_map = map ? map->l_next : NULL;
  1222. }
  1223. return 0;
  1224. }
  1225. void * __load_address;
  1226. void * migrated_shim_addr __attribute_migratable = &__load_address;
  1227. int init_internal_map (void)
  1228. {
  1229. __load_elf_object(NULL, &__load_address, OBJECT_INTERNAL, NULL);
  1230. internal_map->l_name = "libsysdb.so";
  1231. return 0;
  1232. }
  1233. int init_loader (void)
  1234. {
  1235. struct shim_thread * cur_thread = get_cur_thread();
  1236. int ret = 0;
  1237. lock(cur_thread->lock);
  1238. struct shim_handle * exec = cur_thread->exec;
  1239. if (exec)
  1240. get_handle(exec);
  1241. unlock(cur_thread->lock);
  1242. if (!exec)
  1243. return 0;
  1244. struct link_map * exec_map = __search_map_by_handle(exec);
  1245. if (!exec_map) {
  1246. void * addr = (void *) PAL_CB(executable_begin);
  1247. void * addr_end = (void *) PAL_CB(executable_end);
  1248. if (!addr || !addr_end) {
  1249. ret = -EACCES;
  1250. goto out;
  1251. }
  1252. if ((ret = load_elf_object(exec, addr, addr_end - addr)) < 0)
  1253. goto out;
  1254. exec_map = __search_map_by_handle(exec);
  1255. }
  1256. if (!interp_map && __need_interp(exec_map))
  1257. ret = __load_interp_object(exec_map);
  1258. out:
  1259. put_handle(exec);
  1260. return 0;
  1261. }
  1262. int register_library (const char * name, unsigned long load_address)
  1263. {
  1264. debug("glibc register library %s loaded at %p\n",
  1265. name, load_address);
  1266. struct shim_handle * hdl = get_new_handle();
  1267. if (!hdl)
  1268. return -ENOMEM;
  1269. int err = open_namei(hdl, NULL, name, O_RDONLY, 0, NULL);
  1270. if (err < 0) {
  1271. put_handle(hdl);
  1272. return err;
  1273. }
  1274. __load_elf_object(hdl, (void *) load_address, OBJECT_USER, NULL);
  1275. put_handle(hdl);
  1276. return 0;
  1277. }
  1278. int execute_elf_object (struct shim_handle * exec, int argc, const char ** argp,
  1279. int nauxv, ElfW(auxv_t) * auxp)
  1280. {
  1281. struct link_map * exec_map = __search_map_by_handle(exec);
  1282. assert(exec_map);
  1283. auxp[0].a_type = AT_PHDR;
  1284. auxp[0].a_un.a_val = (__typeof(auxp[0].a_un.a_val)) exec_map->l_phdr;
  1285. auxp[1].a_type = AT_PHNUM;
  1286. auxp[1].a_un.a_val = exec_map->l_phnum;
  1287. auxp[2].a_type = AT_PAGESZ;
  1288. auxp[2].a_un.a_val = allocsize;
  1289. auxp[3].a_type = AT_ENTRY;
  1290. auxp[3].a_un.a_val = exec_map->l_entry;
  1291. auxp[4].a_type = AT_BASE;
  1292. auxp[4].a_un.a_val = interp_map ? interp_map->l_addr : 0;
  1293. auxp[5].a_type = AT_NULL;
  1294. int ret = 0;
  1295. ElfW(Addr) entry = interp_map ? interp_map->l_entry : exec_map->l_entry;
  1296. #if defined(__x86_64__)
  1297. asm volatile (
  1298. "movq %%rbx, %%rsp\r\n"
  1299. "pushq %%rdi\r\n"
  1300. "jmp *%%rax\r\n"
  1301. :
  1302. : "a"(entry),
  1303. "b"(argp),
  1304. "D"(argc)
  1305. : "memory");
  1306. #else
  1307. # error "architecture not supported"
  1308. #endif
  1309. ret = 0;
  1310. #if 0
  1311. int (*main_entry) (int, const char **, const char **, ElfW(auxv_t) *) =
  1312. (void *) exec_map->l_entry;
  1313. ret = main_entry(argc, argp, argp + argc + 1, auxp);
  1314. #endif
  1315. shim_do_exit(ret);
  1316. return ret;
  1317. }
  1318. DEFINE_MIGRATE_FUNC(library)
  1319. MIGRATE_FUNC_BODY(library)
  1320. {
  1321. assert(size == sizeof(struct link_map));
  1322. struct link_map * map = (struct link_map *) obj;
  1323. struct link_map * new_map;
  1324. struct shim_handle * file = NULL;
  1325. if (map->l_file)
  1326. __DO_MIGRATE(handle, map->l_file, &file, 1);
  1327. unsigned long off = ADD_TO_MIGRATE_MAP(obj, *offset, size);
  1328. int namelen = map->l_name ? strlen(map->l_name) : 0;
  1329. int sonamelen = map->l_soname ? strlen(map->l_soname) : 0;
  1330. if (ENTRY_JUST_CREATED(off)) {
  1331. ADD_OFFSET(sizeof(struct link_map));
  1332. ADD_FUNC_ENTRY(*offset);
  1333. ADD_ENTRY(SIZE, sizeof(struct link_map));
  1334. if (!dry) {
  1335. new_map = (struct link_map *) (base + *offset);
  1336. memcpy(new_map, map, sizeof(struct link_map));
  1337. get_handle(file);
  1338. new_map->l_file = file;
  1339. new_map->l_prev = NULL;
  1340. new_map->l_next = NULL;
  1341. }
  1342. if (map->l_ld) {
  1343. int size = sizeof(ElfW(Dyn)) * map->l_ldnum;
  1344. ADD_OFFSET(size);
  1345. if (!dry) {
  1346. ElfW(Dyn) * ld = (void *) (base + *offset);
  1347. memcpy(ld, map->l_ld, size);
  1348. new_map->l_ld = ld;
  1349. for (ElfW(Dyn) ** dyn = new_map->l_info ;
  1350. (void *) dyn < ((void *) new_map->l_info +
  1351. sizeof(new_map->l_info)) ; dyn++)
  1352. if (*dyn)
  1353. *dyn = ((void *) *dyn + ((void *) ld -
  1354. (void *) map->l_ld));
  1355. }
  1356. }
  1357. if (map->l_name) {
  1358. ADD_OFFSET(namelen + 1);
  1359. if (!dry && map->l_name) {
  1360. char * name = (char *) (base + *offset);
  1361. memcpy(name, map->l_name, namelen + 1);
  1362. new_map->l_name = name;
  1363. }
  1364. }
  1365. if (map->l_soname) {
  1366. ADD_OFFSET(sonamelen + 1);
  1367. if (!dry && map->l_soname) {
  1368. char * soname = (char *) (base + *offset);
  1369. memcpy(soname, map->l_soname, sonamelen + 1);
  1370. new_map->l_soname = soname;
  1371. }
  1372. }
  1373. } else if (!dry) {
  1374. new_map = (struct link_map *) (base + off);
  1375. }
  1376. if (new_map && objp)
  1377. *objp = (void *) new_map;
  1378. }
  1379. END_MIGRATE_FUNC
  1380. RESUME_FUNC_BODY(library)
  1381. {
  1382. unsigned long off = GET_FUNC_ENTRY();
  1383. assert((size_t) GET_ENTRY(SIZE) == sizeof(struct link_map));
  1384. struct link_map * map = (struct link_map *) (base + off);
  1385. RESUME_REBASE(map->l_name);
  1386. RESUME_REBASE(map->l_soname);
  1387. RESUME_REBASE(map->l_file);
  1388. if (map->l_ld && map->l_ld != map->l_real_ld) {
  1389. RESUME_REBASE(map->l_ld);
  1390. RESUME_REBASE(map->l_info);
  1391. }
  1392. struct link_map * old_map = __search_map_by_name(map->l_name);
  1393. if (old_map)
  1394. remove_r_debug((void *) old_map->l_addr);
  1395. struct shim_vma * vma = NULL;
  1396. if (lookup_supervma((void *) map->l_map_start, allocsize, &vma) < 0 ||
  1397. vma->addr != (void *) map->l_map_start ||
  1398. !vma->received) {
  1399. sys_printf(vma ? "library %s (%p - %p) not received\n" :
  1400. "library %s (%p - %p) not mapped\n",
  1401. map->l_name, map->l_map_start, map->l_map_end);
  1402. if (vma)
  1403. put_vma(vma);
  1404. return -ENOMEM;
  1405. }
  1406. put_vma(vma);
  1407. if (internal_map && (!map->l_resolved ||
  1408. map->l_resolved_map != internal_map->l_addr))
  1409. do_relocate_object(map);
  1410. if (old_map)
  1411. replace_link_map(map, old_map);
  1412. else
  1413. add_link_map(map);
  1414. #ifdef DEBUG_RESUME
  1415. debug("library: loaded at %p,name=%s\n", map->l_addr, map->l_name);
  1416. #endif
  1417. }
  1418. END_RESUME_FUNC
  1419. DEFINE_MIGRATE_FUNC(loaded_libraries)
  1420. MIGRATE_FUNC_BODY(loaded_libraries)
  1421. {
  1422. struct link_map * map = loaded_libraries, * new_interp_map = NULL;
  1423. while (map) {
  1424. struct link_map * new_map = NULL, ** map_obj = &new_map;
  1425. if (map != internal_map)
  1426. DO_MIGRATE(library, map, map_obj, recursive);
  1427. if (map == interp_map)
  1428. new_interp_map = new_map;
  1429. map = map->l_next;
  1430. }
  1431. ADD_FUNC_ENTRY(new_interp_map);
  1432. }
  1433. END_MIGRATE_FUNC
  1434. RESUME_FUNC_BODY(loaded_libraries)
  1435. {
  1436. interp_map = (struct link_map *) GET_FUNC_ENTRY();
  1437. if (interp_map) {
  1438. RESUME_REBASE(interp_map);
  1439. #ifdef DEBUG_RESUME
  1440. debug("library: interpreter is %s\n", interp_map->l_name);
  1441. #endif
  1442. }
  1443. }
  1444. END_RESUME_FUNC