shim_init.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_init.c
  17. *
  18. * This file contains entry and exit functions of library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_tls.h>
  22. #include <shim_thread.h>
  23. #include <shim_handle.h>
  24. #include <shim_vma.h>
  25. #include <shim_checkpoint.h>
  26. #include <shim_fs.h>
  27. #include <shim_ipc.h>
  28. #include <shim_profile.h>
  29. #include <pal.h>
  30. #include <pal_debug.h>
  31. #include <pal_error.h>
  32. #include <sys/mman.h>
  33. #include <asm/unistd.h>
  34. #include <asm/fcntl.h>
  35. unsigned long allocsize;
  36. unsigned long allocshift;
  37. unsigned long allocmask;
  38. /* The following constants will help matching glibc version with compatible
  39. SHIM libraries */
  40. #include "glibc-version.h"
  41. const unsigned int glibc_version = GLIBC_VERSION;
  42. static void handle_failure (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
  43. {
  44. SHIM_GET_TLS()->pal_errno = (arg <= PAL_ERROR_BOUND) ? arg : 0;
  45. }
  46. void __abort(void) {
  47. pause();
  48. shim_terminate();
  49. }
  50. void warn (const char *format, ...)
  51. {
  52. va_list args;
  53. va_start (args, format);
  54. __sys_vprintf(format, &args);
  55. va_end (args);
  56. }
  57. void __stack_chk_fail (void)
  58. {
  59. }
  60. static int pal_errno_to_unix_errno [PAL_ERROR_BOUND + 1] = {
  61. /* reserved */ 0,
  62. /* PAL_ERROR_NOTIMPLEMENTED */ ENOSYS,
  63. /* PAL_ERROR_NOTDEFINED */ ENOSYS,
  64. /* PAL_ERROR_NOTSUPPORT */ EACCES,
  65. /* PAL_ERROR_INVAL */ EINVAL,
  66. /* PAL_ERROR_TOOLONG */ ENAMETOOLONG,
  67. /* PAL_ERROR_DENIED */ EACCES,
  68. /* PAL_ERROR_BADHANDLE */ EFAULT,
  69. /* PAL_ERROR_STREAMEXIST */ EEXIST,
  70. /* PAL_ERROR_STREAMNOTEXIST */ ENOENT,
  71. /* PAL_ERROR_STREAMISFILE */ ENOTDIR,
  72. /* PAL_ERROR_STREAMISDIR */ EISDIR,
  73. /* PAL_ERROR_STREAMISDEVICE */ ESPIPE,
  74. /* PAL_ERROR_INTERRUPTED */ EINTR,
  75. /* PAL_ERROR_OVERFLOW */ EFAULT,
  76. /* PAL_ERROR_BADADDR */ EFAULT,
  77. /* PAL_ERROR_NOMEM */ ENOMEM,
  78. /* PAL_ERROR_NOTKILLABLE */ EACCES,
  79. /* PAL_ERROR_INCONSIST */ EFAULT,
  80. /* PAL_ERROR_TRYAGAIN */ EAGAIN,
  81. /* PAL_ERROR_ENDOFSTREAM */ 0,
  82. /* PAL_ERROR_NOTSERVER */ EINVAL,
  83. /* PAL_ERROR_NOTCONNECTION */ ENOTCONN,
  84. /* PAL_ERROR_ZEROSIZE */ 0,
  85. /* PAL_ERROR_CONNFAILED */ ECONNRESET,
  86. /* PAL_ERROR_ADDRNOTEXIST */ EADDRNOTAVAIL,
  87. };
  88. long convert_pal_errno (long err)
  89. {
  90. return (err >= 0 && err <= PAL_ERROR_BOUND) ?
  91. pal_errno_to_unix_errno[err] : 0;
  92. }
  93. unsigned long parse_int (const char * str)
  94. {
  95. unsigned long num = 0;
  96. int radix = 10;
  97. char c;
  98. if (str[0] == '0') {
  99. str++;
  100. radix = 8;
  101. if (str[0] == 'x') {
  102. str++;
  103. radix = 16;
  104. }
  105. }
  106. while ((c = *(str++))) {
  107. int val;
  108. if (c >= 'A' && c <= 'F')
  109. val = c - 'A' + 10;
  110. else if (c >= 'a' && c <= 'f')
  111. val = c - 'a' + 10;
  112. else if (c >= '0' && c <= '9')
  113. val = c - '0';
  114. else
  115. break;
  116. if (val >= radix)
  117. break;
  118. num = num * radix + val;
  119. }
  120. if (c == 'G' || c == 'g')
  121. num *= 1024 * 1024 * 1024;
  122. else if (c == 'M' || c == 'm')
  123. num *= 1024 * 1024;
  124. else if (c == 'K' || c == 'k')
  125. num *= 1024;
  126. return num;
  127. }
  128. long int glibc_option (const char * opt)
  129. {
  130. char cfg[CONFIG_MAX];
  131. if (strcmp_static(opt, "heap_size")) {
  132. ssize_t ret = get_config(root_config, "glibc.heap_size", cfg, CONFIG_MAX);
  133. if (ret <= 0) {
  134. debug("no glibc option: %s (err=%d)\n", opt, ret);
  135. return -ENOENT;
  136. }
  137. long int heap_size = parse_int(cfg);
  138. debug("glibc option: heap_size = %ld\n", heap_size);
  139. return (long int) heap_size;
  140. }
  141. return -EINVAL;
  142. }
  143. void * migrated_memory_start;
  144. void * migrated_memory_end;
  145. void * migrated_shim_addr;
  146. void * initial_stack;
  147. const char ** initial_envp __attribute_migratable;
  148. char ** library_paths;
  149. LOCKTYPE __master_lock;
  150. bool lock_enabled;
  151. void init_tcb (shim_tcb_t * tcb)
  152. {
  153. tcb->canary = SHIM_TLS_CANARY;
  154. tcb->self = tcb;
  155. }
  156. void copy_tcb (shim_tcb_t * new_tcb, const shim_tcb_t * old_tcb)
  157. {
  158. memset(new_tcb, 0, sizeof(shim_tcb_t));
  159. new_tcb->canary = SHIM_TLS_CANARY;
  160. new_tcb->self = new_tcb;
  161. new_tcb->tp = old_tcb->tp;
  162. memcpy(&new_tcb->context, &old_tcb->context, sizeof(struct shim_context));
  163. new_tcb->tid = old_tcb->tid;
  164. new_tcb->debug_buf = old_tcb->debug_buf;
  165. }
  166. /* This function is used to allocate tls before interpreter start running */
  167. void allocate_tls (void * tcb_location, bool user, struct shim_thread * thread)
  168. {
  169. __libc_tcb_t * tcb = tcb_location;
  170. assert(tcb);
  171. tcb->tcb = tcb;
  172. init_tcb(&tcb->shim_tcb);
  173. if (thread) {
  174. thread->tcb = tcb;
  175. thread->user_tcb = user;
  176. tcb->shim_tcb.tp = thread;
  177. tcb->shim_tcb.tid = thread->tid;
  178. } else {
  179. tcb->shim_tcb.tp = NULL;
  180. tcb->shim_tcb.tid = 0;
  181. }
  182. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  183. assert(SHIM_TLS_CHECK_CANARY());
  184. }
  185. void populate_tls (void * tcb_location, bool user)
  186. {
  187. __libc_tcb_t * tcb = (__libc_tcb_t *) tcb_location;
  188. assert(tcb);
  189. tcb->tcb = tcb;
  190. copy_tcb(&tcb->shim_tcb, SHIM_GET_TLS());
  191. struct shim_thread * thread = (struct shim_thread *) tcb->shim_tcb.tp;
  192. if (thread) {
  193. thread->tcb = tcb;
  194. thread->user_tcb = user;
  195. }
  196. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  197. assert(SHIM_TLS_CHECK_CANARY());
  198. }
  199. DEFINE_PROFILE_OCCURENCE(alloc_stack, memory);
  200. DEFINE_PROFILE_OCCURENCE(alloc_stack_count, memory);
  201. #define STACK_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
  202. void * allocate_stack (size_t size, size_t protect_size, bool user)
  203. {
  204. size = ALIGN_UP(size);
  205. protect_size = ALIGN_UP(protect_size);
  206. /* preserve a non-readable, non-writeable page below the user
  207. stack to stop user program to clobber other vmas */
  208. void * stack = NULL;
  209. int flags = STACK_FLAGS|(user ? 0 : VMA_INTERNAL);
  210. if (user) {
  211. stack = bkeep_unmapped_heap(size + protect_size, PROT_NONE,
  212. flags, NULL, 0, "stack");
  213. if (!stack)
  214. return NULL;
  215. stack = (void *)
  216. DkVirtualMemoryAlloc(stack, size + protect_size,
  217. 0, PAL_PROT_NONE);
  218. } else {
  219. stack = system_malloc(size + protect_size);
  220. }
  221. if (!stack)
  222. return NULL;
  223. ADD_PROFILE_OCCURENCE(alloc_stack, size + protect_size);
  224. INC_PROFILE_OCCURENCE(alloc_stack_count);
  225. stack += protect_size;
  226. DkVirtualMemoryProtect(stack, size, PAL_PROT_READ|PAL_PROT_WRITE);
  227. if (bkeep_mprotect(stack, size, PROT_READ|PROT_WRITE, flags) < 0)
  228. return NULL;
  229. debug("allocated stack at %p (size = %d)\n", stack, size);
  230. return stack;
  231. }
  232. int populate_user_stack (void * stack, size_t stack_size,
  233. int nauxv, elf_auxv_t ** auxpp,
  234. const char *** argvp, const char *** envpp)
  235. {
  236. const char ** argv = *argvp, ** envp = *envpp;
  237. const char ** new_argv = NULL, ** new_envp = NULL;
  238. void * stack_bottom = stack;
  239. void * stack_top = stack + stack_size;
  240. #define ALLOCATE_TOP(size) \
  241. ({ if ((stack_top -= (size)) < stack_bottom) return -ENOMEM; \
  242. stack_top; })
  243. #define ALLOCATE_BOTTOM(size) \
  244. ({ if ((stack_bottom += (size)) > stack_top) return -ENOMEM; \
  245. stack_bottom - (size); })
  246. if (!argv) {
  247. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  248. goto copy_envp;
  249. }
  250. new_argv = stack_bottom;
  251. while (argv) {
  252. for (const char ** a = argv ; *a ; a++) {
  253. const char ** t = ALLOCATE_BOTTOM(sizeof(const char *));
  254. int len = strlen(*a) + 1;
  255. char * abuf = ALLOCATE_TOP(len);
  256. memcpy(abuf, *a, len);
  257. *t = abuf;
  258. }
  259. *((const char **) ALLOCATE_BOTTOM(sizeof(const char *))) = NULL;
  260. copy_envp:
  261. if (!envp)
  262. break;
  263. new_envp = stack_bottom;
  264. argv = envp;
  265. envp = NULL;
  266. }
  267. if (!new_envp)
  268. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  269. stack_bottom = (void *) ((unsigned long) stack_bottom & ~7UL);
  270. *((unsigned long *) ALLOCATE_TOP(sizeof(unsigned long))) = 0;
  271. if (nauxv) {
  272. elf_auxv_t * old_auxp = *auxpp;
  273. *auxpp = ALLOCATE_TOP(sizeof(elf_auxv_t) * nauxv);
  274. if (old_auxp)
  275. memcpy(*auxpp, old_auxp, nauxv * sizeof(elf_auxv_t));
  276. }
  277. memmove(stack_top - (stack_bottom - stack), stack, stack_bottom - stack);
  278. if (new_argv)
  279. *argvp = (void *) new_argv + (stack_top - stack_bottom);
  280. if (new_envp)
  281. *envpp = (void *) new_envp + (stack_top - stack_bottom);
  282. return 0;
  283. }
  284. unsigned long sys_stack_size = 0;
  285. int init_stack (const char ** argv, const char ** envp, const char *** argpp,
  286. int nauxv, elf_auxv_t ** auxpp)
  287. {
  288. if (!sys_stack_size) {
  289. sys_stack_size = DEFAULT_SYS_STACK_SIZE;
  290. if (root_config) {
  291. char stack_cfg[CONFIG_MAX];
  292. if (get_config(root_config, "sys.stack.size", stack_cfg,
  293. CONFIG_MAX) > 0)
  294. sys_stack_size = ALIGN_UP(parse_int(stack_cfg));
  295. }
  296. }
  297. struct shim_thread * cur_thread = get_cur_thread();
  298. if (!cur_thread || cur_thread->stack)
  299. return 0;
  300. void * stack = allocate_stack(sys_stack_size, allocsize, true);
  301. if (!stack)
  302. return -ENOMEM;
  303. if (initial_envp)
  304. envp = initial_envp;
  305. int ret = populate_user_stack(stack, sys_stack_size,
  306. nauxv, auxpp, &argv, &envp);
  307. if (ret < 0)
  308. return ret;
  309. *argpp = argv;
  310. initial_envp = envp;
  311. cur_thread->stack_top = stack + sys_stack_size;
  312. cur_thread->stack = stack;
  313. cur_thread->stack_red = stack - allocsize;
  314. return 0;
  315. }
  316. int read_environs (const char ** envp)
  317. {
  318. for (const char ** e = envp ; *e ; e++) {
  319. if (strpartcmp_static(*e, "LD_LIBRARY_PATH=")) {
  320. const char * s = *e + static_strlen("LD_LIBRARY_PATH=");
  321. size_t npaths = 2; // One for the first entry, one for the last
  322. // NULL.
  323. for (const char * tmp = s ; *tmp ; tmp++)
  324. if (*tmp == ':')
  325. npaths++;
  326. char** paths = malloc(sizeof(const char *) *
  327. npaths);
  328. if (!paths)
  329. return -ENOMEM;
  330. size_t cnt = 0;
  331. while (*s) {
  332. const char * next;
  333. for (next = s ; *next && *next != ':' ; next++);
  334. size_t len = next - s;
  335. char * str = malloc(len + 1);
  336. if (!str) {
  337. for (size_t i = 0; i < cnt; i++)
  338. free(paths[cnt]);
  339. return -ENOMEM;
  340. }
  341. memcpy(str, s, len);
  342. str[len] = 0;
  343. paths[cnt++] = str;
  344. s = *next ? next + 1 : next;
  345. }
  346. paths[cnt] = NULL;
  347. library_paths = paths;
  348. return 0;
  349. }
  350. }
  351. return 0;
  352. }
  353. struct config_store * root_config = NULL;
  354. static void * __malloc (size_t size)
  355. {
  356. return malloc(size);
  357. }
  358. static void __free (void * mem)
  359. {
  360. free(mem);
  361. }
  362. int init_manifest (PAL_HANDLE manifest_handle)
  363. {
  364. int ret = 0;
  365. void * addr = NULL;
  366. size_t size = 0, map_size = 0;
  367. #define MAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL)
  368. if (PAL_CB(manifest_preload.start)) {
  369. addr = PAL_CB(manifest_preload.start);
  370. size = PAL_CB(manifest_preload.end) - PAL_CB(manifest_preload.start);
  371. } else {
  372. PAL_STREAM_ATTR attr;
  373. if (!DkStreamAttributesQuerybyHandle(manifest_handle, &attr))
  374. return -PAL_ERRNO;
  375. size = attr.pending_size;
  376. map_size = ALIGN_UP(size);
  377. addr = bkeep_unmapped_any(map_size, PROT_READ, MAP_FLAGS,
  378. NULL, 0, "manifest");
  379. if (!addr)
  380. return -ENOMEM;
  381. void * ret_addr = DkStreamMap(manifest_handle, addr,
  382. PAL_PROT_READ, 0,
  383. ALIGN_UP(size));
  384. if (!ret_addr) {
  385. bkeep_munmap(addr, map_size, MAP_FLAGS);
  386. return -ENOMEM;
  387. } else {
  388. assert(addr == ret_addr);
  389. }
  390. }
  391. struct config_store * new_root_config = malloc(sizeof(struct config_store));
  392. if (!new_root_config) {
  393. ret = -ENOMEM;
  394. goto fail;
  395. }
  396. new_root_config->raw_data = addr;
  397. new_root_config->raw_size = size;
  398. new_root_config->malloc = __malloc;
  399. new_root_config->free = __free;
  400. const char * errstring = "Unexpected error";
  401. if ((ret = read_config(new_root_config, NULL, &errstring)) < 0) {
  402. sys_printf("Unable to read manifest file: %s\n", errstring);
  403. goto fail;
  404. }
  405. root_config = new_root_config;
  406. return 0;
  407. fail:
  408. if (map_size) {
  409. DkStreamUnmap(addr, map_size);
  410. if (bkeep_munmap(addr, map_size, MAP_FLAGS) < 0)
  411. bug();
  412. }
  413. free(new_root_config);
  414. return ret;
  415. }
  416. #ifdef PROFILE
  417. struct shim_profile profile_root;
  418. #endif
  419. # define FIND_ARG_COMPONENTS(cookie, argc, argv, envp, auxp) \
  420. do { \
  421. void *_tmp = (cookie); \
  422. (argv) = _tmp; \
  423. _tmp += sizeof(char *) * ((argc) + 1); \
  424. (envp) = _tmp; \
  425. for ( ; *(char **) _tmp; _tmp += sizeof(char *)); \
  426. (auxp) = _tmp + sizeof(char *); \
  427. } while (0)
  428. static void * __process_auxv (elf_auxv_t * auxp)
  429. {
  430. elf_auxv_t * av;
  431. for (av = auxp; av->a_type != AT_NULL; av++)
  432. switch (av->a_type) {
  433. default: break;
  434. }
  435. return av + 1;
  436. }
  437. #define FIND_LAST_STACK(stack) \
  438. do { \
  439. /* check if exist a NULL end */ \
  440. assert(*(uint64_t *) stack == 0); \
  441. stack += sizeof(uint64_t); \
  442. } while (0)
  443. #ifdef PROFILE
  444. static void set_profile_enabled (const char ** envp)
  445. {
  446. const char ** p;
  447. for (p = envp ; (*p) ; p++)
  448. if (strpartcmp_static(*p, "PROFILE_ENABLED="))
  449. break;
  450. if (!(*p))
  451. return;
  452. for (int i = 0 ; i < N_PROFILE ; i++)
  453. PROFILES[i].disabled = true;
  454. const char * str = (*p) + 16;
  455. bool enabled = false;
  456. while (*str) {
  457. const char * next = str;
  458. for ( ; (*next) && (*next) != ',' ; next++);
  459. if (next > str) {
  460. int len = next - str;
  461. for (int i = 0 ; i < N_PROFILE ; i++) {
  462. struct shim_profile * profile = &PROFILES[i];
  463. if (!memcmp(profile->name, str, len) && !profile->name[len]) {
  464. profile->disabled = false;
  465. if (profile->type == CATAGORY)
  466. enabled = true;
  467. }
  468. }
  469. }
  470. str = (*next) ? next + 1 : next;
  471. }
  472. while (enabled) {
  473. enabled = false;
  474. for (int i = 0 ; i < N_PROFILE ; i++) {
  475. struct shim_profile * profile = &PROFILES[i];
  476. if (!profile->disabled || profile->root == &profile_)
  477. continue;
  478. if (!profile->root->disabled) {
  479. profile->disabled = false;
  480. if (profile->type == CATAGORY)
  481. enabled = true;
  482. }
  483. }
  484. }
  485. for (int i = 0 ; i < N_PROFILE ; i++) {
  486. struct shim_profile * profile = &PROFILES[i];
  487. if (profile->type == CATAGORY || profile->disabled)
  488. continue;
  489. for (profile = profile->root ;
  490. profile != &profile_ && profile->disabled ;
  491. profile = profile->root)
  492. profile->disabled = false;
  493. }
  494. }
  495. #endif
  496. static int init_newproc (struct newproc_header * hdr)
  497. {
  498. BEGIN_PROFILE_INTERVAL();
  499. int bytes = DkStreamRead(PAL_CB(parent_process), 0,
  500. sizeof(struct newproc_header), hdr,
  501. NULL, 0);
  502. if (!bytes)
  503. return -PAL_ERRNO;
  504. SAVE_PROFILE_INTERVAL(child_wait_header);
  505. SAVE_PROFILE_INTERVAL_SINCE(child_receive_header, hdr->write_proc_time);
  506. return hdr->failure;
  507. }
  508. DEFINE_PROFILE_CATAGORY(pal, );
  509. DEFINE_PROFILE_INTERVAL(pal_startup_time, pal);
  510. DEFINE_PROFILE_INTERVAL(pal_host_specific_startup_time, pal);
  511. DEFINE_PROFILE_INTERVAL(pal_relocation_time, pal);
  512. DEFINE_PROFILE_INTERVAL(pal_linking_time, pal);
  513. DEFINE_PROFILE_INTERVAL(pal_manifest_loading_time, pal);
  514. DEFINE_PROFILE_INTERVAL(pal_allocation_time, pal);
  515. DEFINE_PROFILE_INTERVAL(pal_tail_startup_time, pal);
  516. DEFINE_PROFILE_INTERVAL(pal_child_creation_time, pal);
  517. DEFINE_PROFILE_CATAGORY(init, );
  518. DEFINE_PROFILE_INTERVAL(init_randgen, init);
  519. DEFINE_PROFILE_INTERVAL(init_vma, init);
  520. DEFINE_PROFILE_INTERVAL(init_slab, init);
  521. DEFINE_PROFILE_INTERVAL(init_str_mgr, init);
  522. DEFINE_PROFILE_INTERVAL(init_internal_map, init);
  523. DEFINE_PROFILE_INTERVAL(init_fs, init);
  524. DEFINE_PROFILE_INTERVAL(init_dcache, init);
  525. DEFINE_PROFILE_INTERVAL(init_handle, init);
  526. DEFINE_PROFILE_INTERVAL(read_from_checkpoint, init);
  527. DEFINE_PROFILE_INTERVAL(read_from_file, init);
  528. DEFINE_PROFILE_INTERVAL(init_newproc, init);
  529. DEFINE_PROFILE_INTERVAL(init_mount_root, init);
  530. DEFINE_PROFILE_INTERVAL(init_from_checkpoint_file, init);
  531. DEFINE_PROFILE_INTERVAL(restore_from_file, init);
  532. DEFINE_PROFILE_INTERVAL(init_manifest, init);
  533. DEFINE_PROFILE_INTERVAL(init_ipc, init);
  534. DEFINE_PROFILE_INTERVAL(init_thread, init);
  535. DEFINE_PROFILE_INTERVAL(init_important_handles, init);
  536. DEFINE_PROFILE_INTERVAL(init_mount, init);
  537. DEFINE_PROFILE_INTERVAL(init_async, init);
  538. DEFINE_PROFILE_INTERVAL(init_stack, init);
  539. DEFINE_PROFILE_INTERVAL(read_environs, init);
  540. DEFINE_PROFILE_INTERVAL(init_loader, init);
  541. DEFINE_PROFILE_INTERVAL(init_ipc_helper, init);
  542. DEFINE_PROFILE_INTERVAL(init_signal, init);
  543. #define CALL_INIT(func, args ...) func(args)
  544. #define RUN_INIT(func, ...) \
  545. do { \
  546. int _err = CALL_INIT(func, ##__VA_ARGS__); \
  547. if (_err < 0) { \
  548. sys_printf("shim_init() in " #func " (%d)\n", _err); \
  549. shim_terminate(); \
  550. } \
  551. SAVE_PROFILE_INTERVAL(func); \
  552. } while (0)
  553. extern PAL_HANDLE thread_start_event;
  554. int shim_init (int argc, void * args, void ** return_stack)
  555. {
  556. debug_handle = PAL_CB(debug_stream);
  557. cur_process.vmid = (IDTYPE) PAL_CB(process_id);
  558. /* create the initial TCB, shim can not be run without a tcb */
  559. __libc_tcb_t tcb;
  560. memset(&tcb, 0, sizeof(__libc_tcb_t));
  561. allocate_tls(&tcb, false, NULL);
  562. debug_setbuf(&tcb.shim_tcb, true);
  563. debug("set tcb to %p\n", &tcb);
  564. #ifdef PROFILE
  565. unsigned long begin_time = GET_PROFILE_INTERVAL();
  566. #endif
  567. debug("host: %s\n", PAL_CB(host_type));
  568. DkSetExceptionHandler(&handle_failure, PAL_EVENT_FAILURE, 0);
  569. allocsize = PAL_CB(alloc_align);
  570. allocshift = allocsize - 1;
  571. allocmask = ~allocshift;
  572. create_lock(__master_lock);
  573. const char ** argv, ** envp, ** argp = NULL;
  574. elf_auxv_t * auxp;
  575. /* call to figure out where the arguments are */
  576. FIND_ARG_COMPONENTS(args, argc, argv, envp, auxp);
  577. initial_stack = __process_auxv(auxp);
  578. int nauxv = (elf_auxv_t *) initial_stack - auxp;
  579. FIND_LAST_STACK(initial_stack);
  580. #ifdef PROFILE
  581. set_profile_enabled(envp);
  582. #endif
  583. struct newproc_header hdr;
  584. void * cpaddr = NULL;
  585. #ifdef PROFILE
  586. unsigned long begin_create_time = 0;
  587. #endif
  588. BEGIN_PROFILE_INTERVAL();
  589. RUN_INIT(init_randgen);
  590. RUN_INIT(init_vma);
  591. RUN_INIT(init_slab);
  592. RUN_INIT(read_environs, envp);
  593. RUN_INIT(init_str_mgr);
  594. RUN_INIT(init_internal_map);
  595. RUN_INIT(init_fs);
  596. RUN_INIT(init_dcache);
  597. RUN_INIT(init_handle);
  598. debug("shim loaded at %p, ready to initialize\n", &__load_address);
  599. if (argc && argv[0][0] == '-') {
  600. if (strcmp_static(argv[0], "-resume") && argc >= 2) {
  601. const char * filename = *(argv + 1);
  602. argc -= 2;
  603. argv += 2;
  604. RUN_INIT(init_mount_root);
  605. RUN_INIT(init_from_checkpoint_file, filename, &hdr.checkpoint,
  606. &cpaddr);
  607. goto restore;
  608. }
  609. }
  610. if (PAL_CB(parent_process)) {
  611. RUN_INIT(init_newproc, &hdr);
  612. SAVE_PROFILE_INTERVAL_SET(child_created_in_new_process,
  613. hdr.create_time, begin_time);
  614. #ifdef PROFILE
  615. begin_create_time = hdr.begin_create_time;
  616. #endif
  617. if (hdr.checkpoint.hdr.size)
  618. RUN_INIT(do_migration, &hdr.checkpoint, &cpaddr);
  619. }
  620. if (cpaddr) {
  621. restore:
  622. thread_start_event = DkNotificationEventCreate(PAL_FALSE);
  623. RUN_INIT(restore_checkpoint,
  624. &hdr.checkpoint.hdr, &hdr.checkpoint.mem,
  625. (ptr_t) cpaddr, 0);
  626. }
  627. if (PAL_CB(manifest_handle))
  628. RUN_INIT(init_manifest, PAL_CB(manifest_handle));
  629. RUN_INIT(init_mount_root);
  630. RUN_INIT(init_ipc);
  631. RUN_INIT(init_thread);
  632. RUN_INIT(init_mount);
  633. RUN_INIT(init_important_handles);
  634. RUN_INIT(init_async);
  635. RUN_INIT(init_stack, argv, envp, &argp, nauxv, &auxp);
  636. RUN_INIT(init_loader);
  637. RUN_INIT(init_ipc_helper);
  638. RUN_INIT(init_signal);
  639. if (PAL_CB(parent_process)) {
  640. /* Notify the parent process */
  641. struct newproc_response res;
  642. res.child_vmid = cur_process.vmid;
  643. res.failure = 0;
  644. if (!DkStreamWrite(PAL_CB(parent_process), 0,
  645. sizeof(struct newproc_response),
  646. &res, NULL))
  647. return -PAL_ERRNO;
  648. }
  649. debug("shim process initialized\n");
  650. #ifdef PROFILE
  651. if (begin_create_time)
  652. SAVE_PROFILE_INTERVAL_SINCE(child_total_migration_time,
  653. begin_create_time);
  654. #endif
  655. SAVE_PROFILE_INTERVAL_SET(pal_startup_time, 0, pal_control.startup_time);
  656. SAVE_PROFILE_INTERVAL_SET(pal_host_specific_startup_time, 0,
  657. pal_control.host_specific_startup_time);
  658. SAVE_PROFILE_INTERVAL_SET(pal_relocation_time, 0,
  659. pal_control.relocation_time);
  660. SAVE_PROFILE_INTERVAL_SET(pal_linking_time, 0, pal_control.linking_time);
  661. SAVE_PROFILE_INTERVAL_SET(pal_manifest_loading_time, 0,
  662. pal_control.manifest_loading_time);
  663. SAVE_PROFILE_INTERVAL_SET(pal_allocation_time, 0,
  664. pal_control.allocation_time);
  665. SAVE_PROFILE_INTERVAL_SET(pal_tail_startup_time, 0,
  666. pal_control.tail_startup_time);
  667. SAVE_PROFILE_INTERVAL_SET(pal_child_creation_time, 0,
  668. pal_control.child_creation_time);
  669. if (thread_start_event)
  670. DkEventSet(thread_start_event);
  671. shim_tcb_t * cur_tcb = SHIM_GET_TLS();
  672. struct shim_thread * cur_thread = (struct shim_thread *) cur_tcb->tp;
  673. if (cur_tcb->context.sp)
  674. restore_context(&cur_tcb->context);
  675. if (cur_thread->exec)
  676. execute_elf_object(cur_thread->exec,
  677. argc, argp, nauxv, auxp);
  678. *return_stack = initial_stack;
  679. return 0;
  680. }
  681. static int create_unique (int (*mkname) (char *, size_t, void *),
  682. int (*create) (const char *, void *),
  683. int (*output) (char *, size_t, const void *,
  684. struct shim_qstr *),
  685. char * name, size_t size, void * id, void * obj,
  686. struct shim_qstr * qstr)
  687. {
  688. int ret, len;
  689. while (1) {
  690. len = mkname(name, size, id);
  691. if (len < 0)
  692. return len;
  693. if ((ret = create(name, obj)) < 0)
  694. return ret;
  695. if (ret)
  696. continue;
  697. if (output)
  698. return output(name, size, id, qstr);
  699. if (qstr)
  700. qstrsetstr(qstr, name, len);
  701. return len;
  702. }
  703. }
  704. static int name_pipe (char * uri, size_t size, void * id)
  705. {
  706. IDTYPE pipeid;
  707. int len;
  708. getrand(&pipeid, sizeof(pipeid));
  709. debug("creating pipe: pipe.srv:%u\n", pipeid);
  710. if ((len = snprintf(uri, size, "pipe.srv:%u", pipeid)) == size)
  711. return -ERANGE;
  712. *((IDTYPE *) id) = pipeid;
  713. return len;
  714. }
  715. static int open_pipe (const char * uri, void * obj)
  716. {
  717. PAL_HANDLE pipe = DkStreamOpen(uri, 0, 0, 0, 0);
  718. if (!pipe)
  719. return PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST ? 1 :
  720. -PAL_ERRNO;
  721. if (obj)
  722. *((PAL_HANDLE *) obj) = pipe;
  723. else
  724. DkObjectClose(pipe);
  725. return 0;
  726. }
  727. static int pipe_addr (char * uri, size_t size, const void * id,
  728. struct shim_qstr * qstr)
  729. {
  730. IDTYPE pipeid = *((IDTYPE *) id);
  731. int len;
  732. if ((len = snprintf(uri, size, "pipe:%u", pipeid)) == size)
  733. return -ERANGE;
  734. if (qstr)
  735. qstrsetstr(qstr, uri, len);
  736. return len;
  737. }
  738. int create_pipe (IDTYPE * id, char * uri, size_t size, PAL_HANDLE * hdl,
  739. struct shim_qstr * qstr)
  740. {
  741. IDTYPE pipeid;
  742. int ret = create_unique(&name_pipe, &open_pipe, &pipe_addr,
  743. uri, size, &pipeid, hdl, qstr);
  744. if (ret > 0 && id)
  745. *id = pipeid;
  746. return ret;
  747. }
  748. static int name_path (char * path, size_t size, void * id)
  749. {
  750. unsigned int suffix;
  751. int prefix_len = strlen(path);
  752. int len;
  753. getrand(&suffix, sizeof(suffix));
  754. len = snprintf(path + prefix_len, size - prefix_len, "%08x", suffix);
  755. if (len == size)
  756. return -ERANGE;
  757. *((unsigned int *) id) = suffix;
  758. return prefix_len + len;
  759. }
  760. static int open_dir (const char * path, void * obj)
  761. {
  762. struct shim_handle * dir = NULL;
  763. if (obj) {
  764. dir = get_new_handle();
  765. if (!dir)
  766. return -ENOMEM;
  767. }
  768. int ret = open_namei(dir, NULL, path, O_CREAT|O_EXCL|O_DIRECTORY, 0700,
  769. NULL);
  770. if (ret < 0)
  771. return ret = -EEXIST ? 1 : ret;
  772. if (obj)
  773. *((struct shim_handle **) obj) = dir;
  774. return 0;
  775. }
  776. static int open_file (const char * path, void * obj)
  777. {
  778. struct shim_handle * file = NULL;
  779. if (obj) {
  780. file = get_new_handle();
  781. if (!file)
  782. return -ENOMEM;
  783. }
  784. int ret = open_namei(file, NULL, path, O_CREAT|O_EXCL|O_RDWR, 0600,
  785. NULL);
  786. if (ret < 0)
  787. return ret = -EEXIST ? 1 : ret;
  788. if (obj)
  789. *((struct shim_handle **) obj) = file;
  790. return 0;
  791. }
  792. static int open_pal_handle (const char * uri, void * obj)
  793. {
  794. PAL_HANDLE hdl;
  795. if (strpartcmp_static(uri, "dev:"))
  796. hdl = DkStreamOpen(uri, 0,
  797. PAL_SHARE_OWNER_X|PAL_SHARE_OWNER_W|
  798. PAL_SHARE_OWNER_R,
  799. PAL_CREAT_TRY|PAL_CREAT_ALWAYS,
  800. 0);
  801. else
  802. hdl = DkStreamOpen(uri, PAL_ACCESS_RDWR,
  803. PAL_SHARE_OWNER_W|PAL_SHARE_OWNER_R,
  804. PAL_CREAT_TRY|PAL_CREAT_ALWAYS,
  805. 0);
  806. if (!hdl) {
  807. if (PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST)
  808. return 0;
  809. else
  810. return -PAL_ERRNO;
  811. }
  812. if (obj) {
  813. *((PAL_HANDLE *) obj) = hdl;
  814. } else {
  815. DkObjectClose(hdl);
  816. }
  817. return 0;
  818. }
  819. static int output_path (char * path, size_t size, const void * id,
  820. struct shim_qstr * qstr)
  821. {
  822. int len = strlen(path);
  823. if (qstr)
  824. qstrsetstr(qstr, path, len);
  825. return len;
  826. }
  827. int create_dir (const char * prefix, char * path, size_t size,
  828. struct shim_handle ** hdl)
  829. {
  830. unsigned int suffix;
  831. if (prefix) {
  832. int len = strlen(prefix);
  833. if (len >= size)
  834. return -ERANGE;
  835. memcpy(path, prefix, len + 1);
  836. }
  837. return create_unique(&name_path, &open_dir, &output_path, path, size,
  838. &suffix, hdl, NULL);
  839. }
  840. int create_file (const char * prefix, char * path, size_t size,
  841. struct shim_handle ** hdl)
  842. {
  843. unsigned int suffix;
  844. if (prefix) {
  845. int len = strlen(prefix);
  846. if (len >= size)
  847. return -ERANGE;
  848. memcpy(path, prefix, len + 1);
  849. }
  850. return create_unique(&name_path, &open_file, &output_path, path, size,
  851. &suffix, hdl, NULL);
  852. }
  853. int create_handle (const char * prefix, char * uri, size_t size,
  854. PAL_HANDLE * hdl, unsigned int * id)
  855. {
  856. unsigned int suffix;
  857. if (prefix) {
  858. int len = strlen(prefix);
  859. if (len >= size)
  860. return -ERANGE;
  861. memcpy(uri, prefix, len + 1);
  862. }
  863. return create_unique(&name_path, &open_pal_handle, &output_path, uri, size,
  864. id ? : &suffix, hdl, NULL);
  865. }
  866. void check_stack_hook (void)
  867. {
  868. struct shim_thread * cur_thread = get_cur_thread();
  869. void * rsp;
  870. asm volatile ("movq %%rsp, %0" : "=r"(rsp) :: "memory");
  871. if (rsp <= cur_thread->stack_top && rsp > cur_thread->stack) {
  872. if (rsp - cur_thread->stack < PAL_CB(pagesize))
  873. sys_printf("*** stack is almost drained (RSP = %p, stack = %p-%p) ***\n",
  874. rsp, cur_thread->stack, cur_thread->stack_top);
  875. } else {
  876. sys_printf("*** context dismatched with thread stack (RSP = %p, stack = %p-%p) ***\n",
  877. rsp, cur_thread->stack, cur_thread->stack_top);
  878. }
  879. }
  880. #ifdef PROFILE
  881. static void print_profile_result (PAL_HANDLE hdl, struct shim_profile * root,
  882. int level)
  883. {
  884. unsigned long total_interval_time = 0;
  885. unsigned long total_interval_count = 0;
  886. for (int i = 0 ; i < N_PROFILE ; i++) {
  887. struct shim_profile * profile = &PROFILES[i];
  888. if (profile->root != root || profile->disabled)
  889. continue;
  890. switch (profile->type) {
  891. case OCCURENCE: {
  892. unsigned int count =
  893. atomic_read(&profile->val.occurence.count);
  894. if (count) {
  895. for (int j = 0 ; j < level ; j++)
  896. __sys_fprintf(hdl, " ");
  897. __sys_fprintf(hdl, "- %s: %u times\n", profile->name, count);
  898. }
  899. break;
  900. }
  901. case INTERVAL: {
  902. unsigned int count =
  903. atomic_read(&profile->val.interval.count);
  904. if (count) {
  905. unsigned long time =
  906. atomic_read(&profile->val.interval.time);
  907. unsigned long ind_time = time / count;
  908. total_interval_time += time;
  909. total_interval_count += count;
  910. for (int j = 0 ; j < level ; j++)
  911. __sys_fprintf(hdl, " ");
  912. __sys_fprintf(hdl, "- (%11.11lu) %s: %u times, %lu msec\n",
  913. time, profile->name, count, ind_time);
  914. }
  915. break;
  916. }
  917. case CATAGORY:
  918. for (int j = 0 ; j < level ; j++)
  919. __sys_fprintf(hdl, " ");
  920. __sys_fprintf(hdl, "- %s:\n", profile->name);
  921. print_profile_result(hdl, profile, level + 1);
  922. break;
  923. }
  924. }
  925. if (total_interval_count) {
  926. __sys_fprintf(hdl, " - (%11.11u) total: %u times, %lu msec\n",
  927. total_interval_time, total_interval_count,
  928. total_interval_time / total_interval_count);
  929. }
  930. }
  931. #endif /* PROFILE */
  932. static struct atomic_int in_terminate = { .counter = 0, };
  933. int shim_terminate (void)
  934. {
  935. debug("teminating the whole process\n");
  936. /* do last clean-up of the process */
  937. shim_clean();
  938. DkProcessExit(0);
  939. return 0;
  940. }
  941. int shim_clean (void)
  942. {
  943. /* preventing multiple cleanup, this is mostly caused by
  944. assertion in shim_clean */
  945. atomic_inc(&in_terminate);
  946. if (atomic_read(&in_terminate) > 1)
  947. return 0;
  948. store_all_msg_persist();
  949. #ifdef PROFILE
  950. if (ENTER_TIME) {
  951. switch (SHIM_GET_TLS()->context.syscall_nr) {
  952. case __NR_exit_group:
  953. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit_group, ENTER_TIME);
  954. break;
  955. case __NR_exit:
  956. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit, ENTER_TIME);
  957. break;
  958. }
  959. }
  960. if (ipc_cld_profile_send()) {
  961. master_lock();
  962. PAL_HANDLE hdl = __open_shim_stdio();
  963. if (hdl) {
  964. __sys_fprintf(hdl, "******************************\n");
  965. __sys_fprintf(hdl, "profiling:\n");
  966. print_profile_result(hdl, &profile_root, 0);
  967. __sys_fprintf(hdl, "******************************\n");
  968. }
  969. master_unlock();
  970. DkObjectClose(hdl);
  971. }
  972. #endif
  973. del_all_ipc_ports(0);
  974. if (shim_stdio && shim_stdio != (PAL_HANDLE) -1)
  975. DkObjectClose(shim_stdio);
  976. shim_stdio = NULL;
  977. debug("process %u successfully terminated\n", cur_process.vmid & 0xFFFF);
  978. master_lock();
  979. DkProcessExit(cur_process.exit_code);
  980. return 0;
  981. }
  982. int message_confirm (const char * message, const char * options)
  983. {
  984. char answer;
  985. int noptions = strlen(options);
  986. char * option_str = __alloca(noptions * 2 + 3), * str = option_str;
  987. int ret = 0;
  988. *(str++) = ' ';
  989. *(str++) = '[';
  990. for (int i = 0 ; i < noptions ; i++) {
  991. *(str++) = options[i];
  992. *(str++) = '/';
  993. }
  994. str--;
  995. *(str++) = ']';
  996. *(str++) = ' ';
  997. master_lock();
  998. PAL_HANDLE hdl = __open_shim_stdio();
  999. if (!hdl) {
  1000. master_unlock();
  1001. return -EACCES;
  1002. }
  1003. #define WRITE(buf, len) \
  1004. ({ int _ret = DkStreamWrite(hdl, 0, len, (void *) buf, NULL); \
  1005. _ret ? : -PAL_ERRNO; })
  1006. #define READ(buf, len) \
  1007. ({ int _ret = DkStreamRead(hdl, 0, len, buf, NULL, 0); \
  1008. _ret ? : -PAL_ERRNO; })
  1009. if ((ret = WRITE(message, strlen(message))) < 0)
  1010. goto out;
  1011. if ((ret = WRITE(option_str, noptions * 2 + 3)) < 0)
  1012. goto out;
  1013. if ((ret = READ(&answer, 1)) < 0)
  1014. goto out;
  1015. out:
  1016. DkObjectClose(hdl);
  1017. master_unlock();
  1018. return (ret < 0) ? ret : answer;
  1019. }