shim_init.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_init.c
  17. *
  18. * This file contains entry and exit functions of library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_tls.h>
  23. #include <shim_thread.h>
  24. #include <shim_handle.h>
  25. #include <shim_vma.h>
  26. #include <shim_checkpoint.h>
  27. #include <shim_fs.h>
  28. #include <shim_ipc.h>
  29. #include <shim_profile.h>
  30. #include <pal.h>
  31. #include <pal_debug.h>
  32. #include <pal_error.h>
  33. #include <sys/mman.h>
  34. #include <asm/unistd.h>
  35. #include <asm/fcntl.h>
  36. unsigned long allocsize;
  37. unsigned long allocshift;
  38. unsigned long allocmask;
  39. /* The following constants will help matching glibc version with compatible
  40. SHIM libraries */
  41. #include "glibc-version.h"
  42. const unsigned int glibc_version = GLIBC_VERSION;
  43. static void handle_failure (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
  44. {
  45. shim_get_tls()->pal_errno = (arg <= PAL_ERROR_BOUND) ? arg : 0;
  46. }
  47. void __abort(void) {
  48. pause();
  49. shim_terminate();
  50. }
  51. void warn (const char *format, ...)
  52. {
  53. va_list args;
  54. va_start (args, format);
  55. __sys_vprintf(format, &args);
  56. va_end (args);
  57. }
  58. void __stack_chk_fail (void)
  59. {
  60. }
  61. static int pal_errno_to_unix_errno [PAL_ERROR_BOUND + 1] = {
  62. /* reserved */ 0,
  63. /* PAL_ERROR_NOTIMPLEMENTED */ ENOSYS,
  64. /* PAL_ERROR_NOTDEFINED */ ENOSYS,
  65. /* PAL_ERROR_NOTSUPPORT */ EACCES,
  66. /* PAL_ERROR_INVAL */ EINVAL,
  67. /* PAL_ERROR_TOOLONG */ ENAMETOOLONG,
  68. /* PAL_ERROR_DENIED */ EACCES,
  69. /* PAL_ERROR_BADHANDLE */ EFAULT,
  70. /* PAL_ERROR_STREAMEXIST */ EEXIST,
  71. /* PAL_ERROR_STREAMNOTEXIST */ ENOENT,
  72. /* PAL_ERROR_STREAMISFILE */ ENOTDIR,
  73. /* PAL_ERROR_STREAMISDIR */ EISDIR,
  74. /* PAL_ERROR_STREAMISDEVICE */ ESPIPE,
  75. /* PAL_ERROR_INTERRUPTED */ EINTR,
  76. /* PAL_ERROR_OVERFLOW */ EFAULT,
  77. /* PAL_ERROR_BADADDR */ EFAULT,
  78. /* PAL_ERROR_NOMEM */ ENOMEM,
  79. /* PAL_ERROR_NOTKILLABLE */ EACCES,
  80. /* PAL_ERROR_INCONSIST */ EFAULT,
  81. /* PAL_ERROR_TRYAGAIN */ EAGAIN,
  82. /* PAL_ERROR_ENDOFSTREAM */ 0,
  83. /* PAL_ERROR_NOTSERVER */ EINVAL,
  84. /* PAL_ERROR_NOTCONNECTION */ ENOTCONN,
  85. /* PAL_ERROR_ZEROSIZE */ 0,
  86. /* PAL_ERROR_CONNFAILED */ ECONNRESET,
  87. /* PAL_ERROR_ADDRNOTEXIST */ EADDRNOTAVAIL,
  88. };
  89. long convert_pal_errno (long err)
  90. {
  91. return (err >= 0 && err <= PAL_ERROR_BOUND) ?
  92. pal_errno_to_unix_errno[err] : 0;
  93. }
  94. unsigned long parse_int (const char * str)
  95. {
  96. unsigned long num = 0;
  97. int radix = 10;
  98. char c;
  99. if (str[0] == '0') {
  100. str++;
  101. radix = 8;
  102. if (str[0] == 'x') {
  103. str++;
  104. radix = 16;
  105. }
  106. }
  107. while ((c = *(str++))) {
  108. int val;
  109. if (c >= 'A' && c <= 'F')
  110. val = c - 'A' + 10;
  111. else if (c >= 'a' && c <= 'f')
  112. val = c - 'a' + 10;
  113. else if (c >= '0' && c <= '9')
  114. val = c - '0';
  115. else
  116. break;
  117. if (val >= radix)
  118. break;
  119. num = num * radix + val;
  120. }
  121. if (c == 'G' || c == 'g')
  122. num *= 1024 * 1024 * 1024;
  123. else if (c == 'M' || c == 'm')
  124. num *= 1024 * 1024;
  125. else if (c == 'K' || c == 'k')
  126. num *= 1024;
  127. return num;
  128. }
  129. long int glibc_option (const char * opt)
  130. {
  131. char cfg[CONFIG_MAX];
  132. if (strcmp_static(opt, "heap_size")) {
  133. ssize_t ret = get_config(root_config, "glibc.heap_size", cfg, CONFIG_MAX);
  134. if (ret <= 0) {
  135. debug("no glibc option: %s (err=%ld)\n", opt, ret);
  136. return -ENOENT;
  137. }
  138. long int heap_size = parse_int(cfg);
  139. debug("glibc option: heap_size = %ld\n", heap_size);
  140. return (long int) heap_size;
  141. }
  142. return -EINVAL;
  143. }
  144. void * migrated_memory_start;
  145. void * migrated_memory_end;
  146. void * migrated_shim_addr;
  147. const char ** initial_envp __attribute_migratable;
  148. char ** library_paths;
  149. LOCKTYPE __master_lock;
  150. bool lock_enabled;
  151. void init_tcb (shim_tcb_t * tcb)
  152. {
  153. tcb->canary = SHIM_TLS_CANARY;
  154. tcb->self = tcb;
  155. }
  156. void copy_tcb (shim_tcb_t * new_tcb, const shim_tcb_t * old_tcb)
  157. {
  158. memset(new_tcb, 0, sizeof(shim_tcb_t));
  159. new_tcb->canary = SHIM_TLS_CANARY;
  160. new_tcb->self = new_tcb;
  161. new_tcb->tp = old_tcb->tp;
  162. memcpy(&new_tcb->context, &old_tcb->context, sizeof(struct shim_context));
  163. new_tcb->tid = old_tcb->tid;
  164. new_tcb->debug_buf = old_tcb->debug_buf;
  165. }
  166. /* This function is used to allocate tls before interpreter start running */
  167. void allocate_tls (void * tcb_location, bool user, struct shim_thread * thread)
  168. {
  169. __libc_tcb_t * tcb = tcb_location;
  170. assert(tcb);
  171. tcb->tcb = tcb;
  172. init_tcb(&tcb->shim_tcb);
  173. if (thread) {
  174. thread->tcb = tcb;
  175. thread->user_tcb = user;
  176. tcb->shim_tcb.tp = thread;
  177. tcb->shim_tcb.tid = thread->tid;
  178. } else {
  179. tcb->shim_tcb.tp = NULL;
  180. tcb->shim_tcb.tid = 0;
  181. }
  182. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  183. assert(shim_tls_check_canary());
  184. }
  185. void populate_tls (void * tcb_location, bool user)
  186. {
  187. __libc_tcb_t * tcb = (__libc_tcb_t *) tcb_location;
  188. assert(tcb);
  189. tcb->tcb = tcb;
  190. copy_tcb(&tcb->shim_tcb, shim_get_tls());
  191. struct shim_thread * thread = (struct shim_thread *) tcb->shim_tcb.tp;
  192. if (thread) {
  193. thread->tcb = tcb;
  194. thread->user_tcb = user;
  195. }
  196. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  197. assert(shim_tls_check_canary());
  198. }
  199. DEFINE_PROFILE_OCCURENCE(alloc_stack, memory);
  200. DEFINE_PROFILE_OCCURENCE(alloc_stack_count, memory);
  201. #define STACK_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
  202. void * allocate_stack (size_t size, size_t protect_size, bool user)
  203. {
  204. size = ALIGN_UP(size);
  205. protect_size = ALIGN_UP(protect_size);
  206. /* preserve a non-readable, non-writeable page below the user
  207. stack to stop user program to clobber other vmas */
  208. void * stack = NULL;
  209. int flags = STACK_FLAGS|(user ? 0 : VMA_INTERNAL);
  210. if (user) {
  211. stack = bkeep_unmapped_heap(size + protect_size, PROT_NONE,
  212. flags, NULL, 0, "stack");
  213. if (!stack)
  214. return NULL;
  215. stack = (void *)
  216. DkVirtualMemoryAlloc(stack, size + protect_size,
  217. 0, PAL_PROT_NONE);
  218. } else {
  219. stack = system_malloc(size + protect_size);
  220. }
  221. if (!stack)
  222. return NULL;
  223. ADD_PROFILE_OCCURENCE(alloc_stack, size + protect_size);
  224. INC_PROFILE_OCCURENCE(alloc_stack_count);
  225. stack += protect_size;
  226. // Ensure proper alignment for process' initial stack pointer value.
  227. stack += (16 - (uintptr_t)stack % 16) % 16;
  228. DkVirtualMemoryProtect(stack, size, PAL_PROT_READ|PAL_PROT_WRITE);
  229. if (bkeep_mprotect(stack, size, PROT_READ|PROT_WRITE, flags) < 0)
  230. return NULL;
  231. debug("allocated stack at %p (size = %ld)\n", stack, size);
  232. return stack;
  233. }
  234. static int populate_user_stack (void * stack, size_t stack_size,
  235. int nauxv, elf_auxv_t ** auxpp,
  236. int ** argcpp,
  237. const char *** argvp, const char *** envpp)
  238. {
  239. const int argc = **argcpp;
  240. const char ** argv = *argvp, ** envp = *envpp;
  241. const char ** new_argv = NULL, ** new_envp = NULL;
  242. elf_auxv_t *new_auxp = NULL;
  243. void * stack_bottom = stack;
  244. void * stack_top = stack + stack_size;
  245. #define ALLOCATE_TOP(size) \
  246. ({ if ((stack_top -= (size)) < stack_bottom) return -ENOMEM; \
  247. stack_top; })
  248. #define ALLOCATE_BOTTOM(size) \
  249. ({ if ((stack_bottom += (size)) > stack_top) return -ENOMEM; \
  250. stack_bottom - (size); })
  251. /* ld.so expects argc as long on stack, not int. */
  252. long * argcp = ALLOCATE_BOTTOM(sizeof(long));
  253. *argcp = **argcpp;
  254. if (!argv) {
  255. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  256. goto copy_envp;
  257. }
  258. new_argv = stack_bottom;
  259. while (argv) {
  260. for (const char ** a = argv ; *a ; a++) {
  261. const char ** t = ALLOCATE_BOTTOM(sizeof(const char *));
  262. int len = strlen(*a) + 1;
  263. char * abuf = ALLOCATE_TOP(len);
  264. memcpy(abuf, *a, len);
  265. *t = abuf;
  266. }
  267. *((const char **) ALLOCATE_BOTTOM(sizeof(const char *))) = NULL;
  268. copy_envp:
  269. if (!envp)
  270. break;
  271. new_envp = stack_bottom;
  272. argv = envp;
  273. envp = NULL;
  274. }
  275. if (!new_envp)
  276. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  277. if (nauxv) {
  278. new_auxp = ALLOCATE_BOTTOM(sizeof(elf_auxv_t) * nauxv);
  279. if (*auxpp)
  280. memcpy(new_auxp, *auxpp, nauxv * sizeof(elf_auxv_t));
  281. }
  282. /* x86_64 ABI requires 16 bytes alignment on stack on every function
  283. call. */
  284. size_t move_size = stack_bottom - stack;
  285. *argcpp = stack_top - move_size;
  286. *argcpp = ALIGN_DOWN_PTR(*argcpp, 16UL);
  287. **argcpp = argc;
  288. size_t shift = (void*)(*argcpp) - stack;
  289. memmove(*argcpp, stack, move_size);
  290. *argvp = new_argv ? (void *) new_argv + shift : NULL;
  291. *envpp = new_envp ? (void *) new_envp + shift : NULL;
  292. *auxpp = new_auxp ? (void *) new_auxp + shift : NULL;
  293. /* clear working area at the bottom */
  294. memset(stack, 0, shift);
  295. return 0;
  296. }
  297. unsigned long sys_stack_size = 0;
  298. int init_stack (const char ** argv, const char ** envp,
  299. int ** argcpp, const char *** argpp,
  300. int nauxv, elf_auxv_t ** auxpp)
  301. {
  302. if (!sys_stack_size) {
  303. sys_stack_size = DEFAULT_SYS_STACK_SIZE;
  304. if (root_config) {
  305. char stack_cfg[CONFIG_MAX];
  306. if (get_config(root_config, "sys.stack.size", stack_cfg,
  307. CONFIG_MAX) > 0)
  308. sys_stack_size = ALIGN_UP(parse_int(stack_cfg));
  309. }
  310. }
  311. struct shim_thread * cur_thread = get_cur_thread();
  312. if (!cur_thread || cur_thread->stack)
  313. return 0;
  314. void * stack = allocate_stack(sys_stack_size, allocsize, true);
  315. if (!stack)
  316. return -ENOMEM;
  317. if (initial_envp)
  318. envp = initial_envp;
  319. int ret = populate_user_stack(stack, sys_stack_size,
  320. nauxv, auxpp, argcpp, &argv, &envp);
  321. if (ret < 0)
  322. return ret;
  323. *argpp = argv;
  324. initial_envp = envp;
  325. cur_thread->stack_top = stack + sys_stack_size;
  326. cur_thread->stack = stack;
  327. cur_thread->stack_red = stack - allocsize;
  328. return 0;
  329. }
  330. int read_environs (const char ** envp)
  331. {
  332. for (const char ** e = envp ; *e ; e++) {
  333. if (strpartcmp_static(*e, "LD_LIBRARY_PATH=")) {
  334. const char * s = *e + static_strlen("LD_LIBRARY_PATH=");
  335. size_t npaths = 2; // One for the first entry, one for the last
  336. // NULL.
  337. for (const char * tmp = s ; *tmp ; tmp++)
  338. if (*tmp == ':')
  339. npaths++;
  340. char** paths = malloc(sizeof(const char *) *
  341. npaths);
  342. if (!paths)
  343. return -ENOMEM;
  344. size_t cnt = 0;
  345. while (*s) {
  346. const char * next;
  347. for (next = s ; *next && *next != ':' ; next++);
  348. size_t len = next - s;
  349. char * str = malloc(len + 1);
  350. if (!str) {
  351. for (size_t i = 0; i < cnt; i++)
  352. free(paths[cnt]);
  353. return -ENOMEM;
  354. }
  355. memcpy(str, s, len);
  356. str[len] = 0;
  357. paths[cnt++] = str;
  358. s = *next ? next + 1 : next;
  359. }
  360. paths[cnt] = NULL;
  361. library_paths = paths;
  362. return 0;
  363. }
  364. }
  365. return 0;
  366. }
  367. struct config_store * root_config = NULL;
  368. static void * __malloc (size_t size)
  369. {
  370. return malloc(size);
  371. }
  372. static void __free (void * mem)
  373. {
  374. free(mem);
  375. }
  376. int init_manifest (PAL_HANDLE manifest_handle)
  377. {
  378. int ret = 0;
  379. void * addr = NULL;
  380. size_t size = 0, map_size = 0;
  381. #define MAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL)
  382. if (PAL_CB(manifest_preload.start)) {
  383. addr = PAL_CB(manifest_preload.start);
  384. size = PAL_CB(manifest_preload.end) - PAL_CB(manifest_preload.start);
  385. } else {
  386. PAL_STREAM_ATTR attr;
  387. if (!DkStreamAttributesQuerybyHandle(manifest_handle, &attr))
  388. return -PAL_ERRNO;
  389. size = attr.pending_size;
  390. map_size = ALIGN_UP(size);
  391. addr = bkeep_unmapped_any(map_size, PROT_READ, MAP_FLAGS,
  392. NULL, 0, "manifest");
  393. if (!addr)
  394. return -ENOMEM;
  395. void * ret_addr = DkStreamMap(manifest_handle, addr,
  396. PAL_PROT_READ, 0,
  397. ALIGN_UP(size));
  398. if (!ret_addr) {
  399. bkeep_munmap(addr, map_size, MAP_FLAGS);
  400. return -ENOMEM;
  401. } else {
  402. assert(addr == ret_addr);
  403. }
  404. }
  405. struct config_store * new_root_config = malloc(sizeof(struct config_store));
  406. if (!new_root_config) {
  407. ret = -ENOMEM;
  408. goto fail;
  409. }
  410. new_root_config->raw_data = addr;
  411. new_root_config->raw_size = size;
  412. new_root_config->malloc = __malloc;
  413. new_root_config->free = __free;
  414. const char * errstring = "Unexpected error";
  415. if ((ret = read_config(new_root_config, NULL, &errstring)) < 0) {
  416. sys_printf("Unable to read manifest file: %s\n", errstring);
  417. goto fail;
  418. }
  419. root_config = new_root_config;
  420. return 0;
  421. fail:
  422. if (map_size) {
  423. DkStreamUnmap(addr, map_size);
  424. if (bkeep_munmap(addr, map_size, MAP_FLAGS) < 0)
  425. bug();
  426. }
  427. free(new_root_config);
  428. return ret;
  429. }
  430. #ifdef PROFILE
  431. struct shim_profile profile_root;
  432. #endif
  433. # define FIND_ARG_COMPONENTS(cookie, argc, argv, envp, auxp) \
  434. do { \
  435. void *_tmp = (cookie); \
  436. (argv) = _tmp; \
  437. _tmp += sizeof(char *) * ((argc) + 1); \
  438. (envp) = _tmp; \
  439. for ( ; *(char **) _tmp; _tmp += sizeof(char *)); \
  440. (auxp) = _tmp + sizeof(char *); \
  441. } while (0)
  442. static elf_auxv_t* __process_auxv (elf_auxv_t * auxp)
  443. {
  444. elf_auxv_t * av;
  445. for (av = auxp; av->a_type != AT_NULL; av++)
  446. switch (av->a_type) {
  447. default: break;
  448. }
  449. return av + 1;
  450. }
  451. #ifdef PROFILE
  452. static void set_profile_enabled (const char ** envp)
  453. {
  454. const char ** p;
  455. for (p = envp ; (*p) ; p++)
  456. if (strpartcmp_static(*p, "PROFILE_ENABLED="))
  457. break;
  458. if (!(*p))
  459. return;
  460. for (int i = 0 ; i < N_PROFILE ; i++)
  461. PROFILES[i].disabled = true;
  462. const char * str = (*p) + 16;
  463. bool enabled = false;
  464. while (*str) {
  465. const char * next = str;
  466. for ( ; (*next) && (*next) != ',' ; next++);
  467. if (next > str) {
  468. int len = next - str;
  469. for (int i = 0 ; i < N_PROFILE ; i++) {
  470. struct shim_profile * profile = &PROFILES[i];
  471. if (!memcmp(profile->name, str, len) && !profile->name[len]) {
  472. profile->disabled = false;
  473. if (profile->type == CATAGORY)
  474. enabled = true;
  475. }
  476. }
  477. }
  478. str = (*next) ? next + 1 : next;
  479. }
  480. while (enabled) {
  481. enabled = false;
  482. for (int i = 0 ; i < N_PROFILE ; i++) {
  483. struct shim_profile * profile = &PROFILES[i];
  484. if (!profile->disabled || profile->root == &profile_)
  485. continue;
  486. if (!profile->root->disabled) {
  487. profile->disabled = false;
  488. if (profile->type == CATAGORY)
  489. enabled = true;
  490. }
  491. }
  492. }
  493. for (int i = 0 ; i < N_PROFILE ; i++) {
  494. struct shim_profile * profile = &PROFILES[i];
  495. if (profile->type == CATAGORY || profile->disabled)
  496. continue;
  497. for (profile = profile->root ;
  498. profile != &profile_ && profile->disabled ;
  499. profile = profile->root)
  500. profile->disabled = false;
  501. }
  502. }
  503. #endif
  504. static int init_newproc (struct newproc_header * hdr)
  505. {
  506. BEGIN_PROFILE_INTERVAL();
  507. int bytes = DkStreamRead(PAL_CB(parent_process), 0,
  508. sizeof(struct newproc_header), hdr,
  509. NULL, 0);
  510. if (!bytes)
  511. return -PAL_ERRNO;
  512. SAVE_PROFILE_INTERVAL(child_wait_header);
  513. SAVE_PROFILE_INTERVAL_SINCE(child_receive_header, hdr->write_proc_time);
  514. return hdr->failure;
  515. }
  516. DEFINE_PROFILE_CATAGORY(pal, );
  517. DEFINE_PROFILE_INTERVAL(pal_startup_time, pal);
  518. DEFINE_PROFILE_INTERVAL(pal_host_specific_startup_time, pal);
  519. DEFINE_PROFILE_INTERVAL(pal_relocation_time, pal);
  520. DEFINE_PROFILE_INTERVAL(pal_linking_time, pal);
  521. DEFINE_PROFILE_INTERVAL(pal_manifest_loading_time, pal);
  522. DEFINE_PROFILE_INTERVAL(pal_allocation_time, pal);
  523. DEFINE_PROFILE_INTERVAL(pal_tail_startup_time, pal);
  524. DEFINE_PROFILE_INTERVAL(pal_child_creation_time, pal);
  525. DEFINE_PROFILE_CATAGORY(init, );
  526. DEFINE_PROFILE_INTERVAL(init_vma, init);
  527. DEFINE_PROFILE_INTERVAL(init_slab, init);
  528. DEFINE_PROFILE_INTERVAL(init_str_mgr, init);
  529. DEFINE_PROFILE_INTERVAL(init_internal_map, init);
  530. DEFINE_PROFILE_INTERVAL(init_fs, init);
  531. DEFINE_PROFILE_INTERVAL(init_dcache, init);
  532. DEFINE_PROFILE_INTERVAL(init_handle, init);
  533. DEFINE_PROFILE_INTERVAL(read_from_checkpoint, init);
  534. DEFINE_PROFILE_INTERVAL(read_from_file, init);
  535. DEFINE_PROFILE_INTERVAL(init_newproc, init);
  536. DEFINE_PROFILE_INTERVAL(init_mount_root, init);
  537. DEFINE_PROFILE_INTERVAL(init_from_checkpoint_file, init);
  538. DEFINE_PROFILE_INTERVAL(restore_from_file, init);
  539. DEFINE_PROFILE_INTERVAL(init_manifest, init);
  540. DEFINE_PROFILE_INTERVAL(init_ipc, init);
  541. DEFINE_PROFILE_INTERVAL(init_thread, init);
  542. DEFINE_PROFILE_INTERVAL(init_important_handles, init);
  543. DEFINE_PROFILE_INTERVAL(init_mount, init);
  544. DEFINE_PROFILE_INTERVAL(init_async, init);
  545. DEFINE_PROFILE_INTERVAL(init_stack, init);
  546. DEFINE_PROFILE_INTERVAL(read_environs, init);
  547. DEFINE_PROFILE_INTERVAL(init_loader, init);
  548. DEFINE_PROFILE_INTERVAL(init_ipc_helper, init);
  549. DEFINE_PROFILE_INTERVAL(init_signal, init);
  550. #define CALL_INIT(func, args ...) func(args)
  551. #define RUN_INIT(func, ...) \
  552. do { \
  553. int _err = CALL_INIT(func, ##__VA_ARGS__); \
  554. if (_err < 0) { \
  555. sys_printf("shim_init() in " #func " (%d)\n", _err); \
  556. shim_terminate(); \
  557. } \
  558. SAVE_PROFILE_INTERVAL(func); \
  559. } while (0)
  560. extern PAL_HANDLE thread_start_event;
  561. __attribute__((noreturn)) void* shim_init (int argc, void * args)
  562. {
  563. debug_handle = PAL_CB(debug_stream);
  564. cur_process.vmid = (IDTYPE) PAL_CB(process_id);
  565. /* create the initial TCB, shim can not be run without a tcb */
  566. __libc_tcb_t tcb;
  567. memset(&tcb, 0, sizeof(__libc_tcb_t));
  568. allocate_tls(&tcb, false, NULL);
  569. __disable_preempt(&tcb.shim_tcb); // Temporarily disable preemption for delaying any signal
  570. // that arrives during initialization
  571. debug_setbuf(&tcb.shim_tcb, true);
  572. debug("set tcb to %p\n", &tcb);
  573. #ifdef PROFILE
  574. unsigned long begin_time = GET_PROFILE_INTERVAL();
  575. #endif
  576. debug("host: %s\n", PAL_CB(host_type));
  577. DkSetExceptionHandler(&handle_failure, PAL_EVENT_FAILURE);
  578. allocsize = PAL_CB(alloc_align);
  579. allocshift = allocsize - 1;
  580. allocmask = ~allocshift;
  581. create_lock(__master_lock);
  582. int * argcp = &argc;
  583. const char ** argv, ** envp, ** argp = NULL;
  584. elf_auxv_t * auxp;
  585. /* call to figure out where the arguments are */
  586. FIND_ARG_COMPONENTS(args, argc, argv, envp, auxp);
  587. int nauxv = __process_auxv(auxp) - auxp;
  588. #ifdef PROFILE
  589. set_profile_enabled(envp);
  590. #endif
  591. struct newproc_header hdr;
  592. void * cpaddr = NULL;
  593. #ifdef PROFILE
  594. unsigned long begin_create_time = 0;
  595. #endif
  596. BEGIN_PROFILE_INTERVAL();
  597. RUN_INIT(init_vma);
  598. RUN_INIT(init_slab);
  599. RUN_INIT(read_environs, envp);
  600. RUN_INIT(init_str_mgr);
  601. RUN_INIT(init_internal_map);
  602. RUN_INIT(init_fs);
  603. RUN_INIT(init_dcache);
  604. RUN_INIT(init_handle);
  605. debug("shim loaded at %p, ready to initialize\n", &__load_address);
  606. if (argc && argv[0][0] == '-') {
  607. if (strcmp_static(argv[0], "-resume") && argc >= 2) {
  608. const char * filename = *(argv + 1);
  609. argc -= 2;
  610. argv += 2;
  611. RUN_INIT(init_mount_root);
  612. RUN_INIT(init_from_checkpoint_file, filename, &hdr.checkpoint,
  613. &cpaddr);
  614. goto restore;
  615. }
  616. }
  617. if (PAL_CB(parent_process)) {
  618. RUN_INIT(init_newproc, &hdr);
  619. SAVE_PROFILE_INTERVAL_SET(child_created_in_new_process,
  620. hdr.create_time, begin_time);
  621. #ifdef PROFILE
  622. begin_create_time = hdr.begin_create_time;
  623. #endif
  624. if (hdr.checkpoint.hdr.size)
  625. RUN_INIT(do_migration, &hdr.checkpoint, &cpaddr);
  626. }
  627. if (cpaddr) {
  628. restore:
  629. thread_start_event = DkNotificationEventCreate(PAL_FALSE);
  630. RUN_INIT(restore_checkpoint,
  631. &hdr.checkpoint.hdr, &hdr.checkpoint.mem,
  632. (ptr_t) cpaddr, 0);
  633. }
  634. if (PAL_CB(manifest_handle))
  635. RUN_INIT(init_manifest, PAL_CB(manifest_handle));
  636. RUN_INIT(init_mount_root);
  637. RUN_INIT(init_ipc);
  638. RUN_INIT(init_thread);
  639. RUN_INIT(init_mount);
  640. RUN_INIT(init_important_handles);
  641. RUN_INIT(init_async);
  642. RUN_INIT(init_stack, argv, envp, &argcp, &argp, nauxv, &auxp);
  643. RUN_INIT(init_loader);
  644. RUN_INIT(init_ipc_helper);
  645. RUN_INIT(init_signal);
  646. if (PAL_CB(parent_process)) {
  647. /* Notify the parent process */
  648. struct newproc_response res;
  649. res.child_vmid = cur_process.vmid;
  650. res.failure = 0;
  651. if (!DkStreamWrite(PAL_CB(parent_process), 0,
  652. sizeof(struct newproc_response),
  653. &res, NULL))
  654. shim_do_exit(-PAL_ERRNO);
  655. }
  656. debug("shim process initialized\n");
  657. #ifdef PROFILE
  658. if (begin_create_time)
  659. SAVE_PROFILE_INTERVAL_SINCE(child_total_migration_time,
  660. begin_create_time);
  661. #endif
  662. SAVE_PROFILE_INTERVAL_SET(pal_startup_time, 0, pal_control.startup_time);
  663. SAVE_PROFILE_INTERVAL_SET(pal_host_specific_startup_time, 0,
  664. pal_control.host_specific_startup_time);
  665. SAVE_PROFILE_INTERVAL_SET(pal_relocation_time, 0,
  666. pal_control.relocation_time);
  667. SAVE_PROFILE_INTERVAL_SET(pal_linking_time, 0, pal_control.linking_time);
  668. SAVE_PROFILE_INTERVAL_SET(pal_manifest_loading_time, 0,
  669. pal_control.manifest_loading_time);
  670. SAVE_PROFILE_INTERVAL_SET(pal_allocation_time, 0,
  671. pal_control.allocation_time);
  672. SAVE_PROFILE_INTERVAL_SET(pal_tail_startup_time, 0,
  673. pal_control.tail_startup_time);
  674. SAVE_PROFILE_INTERVAL_SET(pal_child_creation_time, 0,
  675. pal_control.child_creation_time);
  676. if (thread_start_event)
  677. DkEventSet(thread_start_event);
  678. shim_tcb_t * cur_tcb = shim_get_tls();
  679. struct shim_thread * cur_thread = (struct shim_thread *) cur_tcb->tp;
  680. if (cur_tcb->context.sp)
  681. restore_context(&cur_tcb->context);
  682. if (cur_thread->exec)
  683. execute_elf_object(cur_thread->exec,
  684. argcp, argp, nauxv, auxp);
  685. shim_do_exit(0);
  686. }
  687. static int create_unique (int (*mkname) (char *, size_t, void *),
  688. int (*create) (const char *, void *),
  689. int (*output) (char *, size_t, const void *,
  690. struct shim_qstr *),
  691. char * name, size_t size, void * id, void * obj,
  692. struct shim_qstr * qstr)
  693. {
  694. int ret, len;
  695. while (1) {
  696. len = mkname(name, size, id);
  697. if (len < 0)
  698. return len;
  699. if ((ret = create(name, obj)) < 0)
  700. return ret;
  701. if (ret)
  702. continue;
  703. if (output)
  704. return output(name, size, id, qstr);
  705. if (qstr)
  706. qstrsetstr(qstr, name, len);
  707. return len;
  708. }
  709. }
  710. static int name_pipe (char * uri, size_t size, void * id)
  711. {
  712. IDTYPE pipeid;
  713. int len;
  714. int ret = DkRandomBitsRead(&pipeid, sizeof(pipeid));
  715. if (ret < 0)
  716. return -convert_pal_errno(-ret);
  717. debug("creating pipe: pipe.srv:%u\n", pipeid);
  718. if ((len = snprintf(uri, size, "pipe.srv:%u", pipeid)) == size)
  719. return -ERANGE;
  720. *((IDTYPE *) id) = pipeid;
  721. return len;
  722. }
  723. static int open_pipe (const char * uri, void * obj)
  724. {
  725. PAL_HANDLE pipe = DkStreamOpen(uri, 0, 0, 0, 0);
  726. if (!pipe)
  727. return PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST ? 1 :
  728. -PAL_ERRNO;
  729. if (obj)
  730. *((PAL_HANDLE *) obj) = pipe;
  731. else
  732. DkObjectClose(pipe);
  733. return 0;
  734. }
  735. static int pipe_addr (char * uri, size_t size, const void * id,
  736. struct shim_qstr * qstr)
  737. {
  738. IDTYPE pipeid = *((IDTYPE *) id);
  739. int len;
  740. if ((len = snprintf(uri, size, "pipe:%u", pipeid)) == size)
  741. return -ERANGE;
  742. if (qstr)
  743. qstrsetstr(qstr, uri, len);
  744. return len;
  745. }
  746. int create_pipe (IDTYPE * id, char * uri, size_t size, PAL_HANDLE * hdl,
  747. struct shim_qstr * qstr)
  748. {
  749. IDTYPE pipeid;
  750. int ret = create_unique(&name_pipe, &open_pipe, &pipe_addr,
  751. uri, size, &pipeid, hdl, qstr);
  752. if (ret > 0 && id)
  753. *id = pipeid;
  754. return ret;
  755. }
  756. static int name_path (char * path, size_t size, void * id)
  757. {
  758. unsigned int suffix;
  759. int prefix_len = strlen(path);
  760. int len;
  761. int ret = DkRandomBitsRead(&suffix, sizeof(suffix));
  762. if (ret < 0)
  763. return -convert_pal_errno(-ret);
  764. len = snprintf(path + prefix_len, size - prefix_len, "%08x", suffix);
  765. if (len == size)
  766. return -ERANGE;
  767. *((unsigned int *) id) = suffix;
  768. return prefix_len + len;
  769. }
  770. static int open_dir (const char * path, void * obj)
  771. {
  772. struct shim_handle * dir = NULL;
  773. if (obj) {
  774. dir = get_new_handle();
  775. if (!dir)
  776. return -ENOMEM;
  777. }
  778. int ret = open_namei(dir, NULL, path, O_CREAT|O_EXCL|O_DIRECTORY, 0700,
  779. NULL);
  780. if (ret < 0)
  781. return ret = -EEXIST ? 1 : ret;
  782. if (obj)
  783. *((struct shim_handle **) obj) = dir;
  784. return 0;
  785. }
  786. static int open_file (const char * path, void * obj)
  787. {
  788. struct shim_handle * file = NULL;
  789. if (obj) {
  790. file = get_new_handle();
  791. if (!file)
  792. return -ENOMEM;
  793. }
  794. int ret = open_namei(file, NULL, path, O_CREAT|O_EXCL|O_RDWR, 0600,
  795. NULL);
  796. if (ret < 0)
  797. return ret = -EEXIST ? 1 : ret;
  798. if (obj)
  799. *((struct shim_handle **) obj) = file;
  800. return 0;
  801. }
  802. static int open_pal_handle (const char * uri, void * obj)
  803. {
  804. PAL_HANDLE hdl;
  805. if (strpartcmp_static(uri, "dev:"))
  806. hdl = DkStreamOpen(uri, 0,
  807. PAL_SHARE_OWNER_X|PAL_SHARE_OWNER_W|
  808. PAL_SHARE_OWNER_R,
  809. PAL_CREATE_TRY|PAL_CREATE_ALWAYS,
  810. 0);
  811. else
  812. hdl = DkStreamOpen(uri, PAL_ACCESS_RDWR,
  813. PAL_SHARE_OWNER_W|PAL_SHARE_OWNER_R,
  814. PAL_CREATE_TRY|PAL_CREATE_ALWAYS,
  815. 0);
  816. if (!hdl) {
  817. if (PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST)
  818. return 0;
  819. else
  820. return -PAL_ERRNO;
  821. }
  822. if (obj) {
  823. *((PAL_HANDLE *) obj) = hdl;
  824. } else {
  825. DkObjectClose(hdl);
  826. }
  827. return 0;
  828. }
  829. static int output_path (char * path, size_t size, const void * id,
  830. struct shim_qstr * qstr)
  831. {
  832. int len = strlen(path);
  833. if (qstr)
  834. qstrsetstr(qstr, path, len);
  835. return len;
  836. }
  837. int create_dir (const char * prefix, char * path, size_t size,
  838. struct shim_handle ** hdl)
  839. {
  840. unsigned int suffix;
  841. if (prefix) {
  842. int len = strlen(prefix);
  843. if (len >= size)
  844. return -ERANGE;
  845. memcpy(path, prefix, len + 1);
  846. }
  847. return create_unique(&name_path, &open_dir, &output_path, path, size,
  848. &suffix, hdl, NULL);
  849. }
  850. int create_file (const char * prefix, char * path, size_t size,
  851. struct shim_handle ** hdl)
  852. {
  853. unsigned int suffix;
  854. if (prefix) {
  855. int len = strlen(prefix);
  856. if (len >= size)
  857. return -ERANGE;
  858. memcpy(path, prefix, len + 1);
  859. }
  860. return create_unique(&name_path, &open_file, &output_path, path, size,
  861. &suffix, hdl, NULL);
  862. }
  863. int create_handle (const char * prefix, char * uri, size_t size,
  864. PAL_HANDLE * hdl, unsigned int * id)
  865. {
  866. unsigned int suffix;
  867. if (prefix) {
  868. int len = strlen(prefix);
  869. if (len >= size)
  870. return -ERANGE;
  871. memcpy(uri, prefix, len + 1);
  872. }
  873. return create_unique(&name_path, &open_pal_handle, &output_path, uri, size,
  874. id ? : &suffix, hdl, NULL);
  875. }
  876. void check_stack_hook (void)
  877. {
  878. struct shim_thread * cur_thread = get_cur_thread();
  879. void * rsp;
  880. __asm__ volatile ("movq %%rsp, %0" : "=r"(rsp) :: "memory");
  881. if (rsp <= cur_thread->stack_top && rsp > cur_thread->stack) {
  882. if (rsp - cur_thread->stack < PAL_CB(pagesize))
  883. sys_printf("*** stack is almost drained (RSP = %p, stack = %p-%p) ***\n",
  884. rsp, cur_thread->stack, cur_thread->stack_top);
  885. } else {
  886. sys_printf("*** context dismatched with thread stack (RSP = %p, stack = %p-%p) ***\n",
  887. rsp, cur_thread->stack, cur_thread->stack_top);
  888. }
  889. }
  890. #ifdef PROFILE
  891. static void print_profile_result (PAL_HANDLE hdl, struct shim_profile * root,
  892. int level)
  893. {
  894. unsigned long total_interval_time = 0;
  895. unsigned long total_interval_count = 0;
  896. for (int i = 0 ; i < N_PROFILE ; i++) {
  897. struct shim_profile * profile = &PROFILES[i];
  898. if (profile->root != root || profile->disabled)
  899. continue;
  900. switch (profile->type) {
  901. case OCCURENCE: {
  902. unsigned int count =
  903. atomic_read(&profile->val.occurence.count);
  904. if (count) {
  905. for (int j = 0 ; j < level ; j++)
  906. __sys_fprintf(hdl, " ");
  907. __sys_fprintf(hdl, "- %s: %u times\n", profile->name, count);
  908. }
  909. break;
  910. }
  911. case INTERVAL: {
  912. unsigned int count =
  913. atomic_read(&profile->val.interval.count);
  914. if (count) {
  915. unsigned long time =
  916. atomic_read(&profile->val.interval.time);
  917. unsigned long ind_time = time / count;
  918. total_interval_time += time;
  919. total_interval_count += count;
  920. for (int j = 0 ; j < level ; j++)
  921. __sys_fprintf(hdl, " ");
  922. __sys_fprintf(hdl, "- (%11.11lu) %s: %u times, %lu msec\n",
  923. time, profile->name, count, ind_time);
  924. }
  925. break;
  926. }
  927. case CATAGORY:
  928. for (int j = 0 ; j < level ; j++)
  929. __sys_fprintf(hdl, " ");
  930. __sys_fprintf(hdl, "- %s:\n", profile->name);
  931. print_profile_result(hdl, profile, level + 1);
  932. break;
  933. }
  934. }
  935. if (total_interval_count) {
  936. __sys_fprintf(hdl, " - (%11.11u) total: %u times, %lu msec\n",
  937. total_interval_time, total_interval_count,
  938. total_interval_time / total_interval_count);
  939. }
  940. }
  941. #endif /* PROFILE */
  942. static struct atomic_int in_terminate = { .counter = 0, };
  943. int shim_terminate (void)
  944. {
  945. debug("teminating the whole process\n");
  946. /* do last clean-up of the process */
  947. shim_clean();
  948. DkProcessExit(0);
  949. return 0;
  950. }
  951. int shim_clean (void)
  952. {
  953. /* preventing multiple cleanup, this is mostly caused by
  954. assertion in shim_clean */
  955. atomic_inc(&in_terminate);
  956. if (atomic_read(&in_terminate) > 1)
  957. return 0;
  958. store_all_msg_persist();
  959. #ifdef PROFILE
  960. if (ENTER_TIME) {
  961. switch (shim_get_tls()->context.syscall_nr) {
  962. case __NR_exit_group:
  963. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit_group, ENTER_TIME);
  964. break;
  965. case __NR_exit:
  966. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit, ENTER_TIME);
  967. break;
  968. }
  969. }
  970. if (ipc_cld_profile_send()) {
  971. master_lock();
  972. PAL_HANDLE hdl = __open_shim_stdio();
  973. if (hdl) {
  974. __sys_fprintf(hdl, "******************************\n");
  975. __sys_fprintf(hdl, "profiling:\n");
  976. print_profile_result(hdl, &profile_root, 0);
  977. __sys_fprintf(hdl, "******************************\n");
  978. }
  979. master_unlock();
  980. DkObjectClose(hdl);
  981. }
  982. #endif
  983. del_all_ipc_ports(0);
  984. if (shim_stdio && shim_stdio != (PAL_HANDLE) -1)
  985. DkObjectClose(shim_stdio);
  986. shim_stdio = NULL;
  987. debug("process %u exited with status %d\n", cur_process.vmid & 0xFFFF, cur_process.exit_code);
  988. master_lock();
  989. DkProcessExit(cur_process.exit_code);
  990. return 0;
  991. }
  992. int message_confirm (const char * message, const char * options)
  993. {
  994. char answer;
  995. int noptions = strlen(options);
  996. char * option_str = __alloca(noptions * 2 + 3), * str = option_str;
  997. int ret = 0;
  998. *(str++) = ' ';
  999. *(str++) = '[';
  1000. for (int i = 0 ; i < noptions ; i++) {
  1001. *(str++) = options[i];
  1002. *(str++) = '/';
  1003. }
  1004. str--;
  1005. *(str++) = ']';
  1006. *(str++) = ' ';
  1007. master_lock();
  1008. PAL_HANDLE hdl = __open_shim_stdio();
  1009. if (!hdl) {
  1010. master_unlock();
  1011. return -EACCES;
  1012. }
  1013. #define WRITE(buf, len) \
  1014. ({ int _ret = DkStreamWrite(hdl, 0, len, (void *) buf, NULL); \
  1015. _ret ? : -PAL_ERRNO; })
  1016. #define READ(buf, len) \
  1017. ({ int _ret = DkStreamRead(hdl, 0, len, buf, NULL, 0); \
  1018. _ret ? : -PAL_ERRNO; })
  1019. if ((ret = WRITE(message, strlen(message))) < 0)
  1020. goto out;
  1021. if ((ret = WRITE(option_str, noptions * 2 + 3)) < 0)
  1022. goto out;
  1023. if ((ret = READ(&answer, 1)) < 0)
  1024. goto out;
  1025. out:
  1026. DkObjectClose(hdl);
  1027. master_unlock();
  1028. return (ret < 0) ? ret : answer;
  1029. }