shim_init.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_init.c
  17. *
  18. * This file contains entry and exit functions of library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_tls.h>
  22. #include <shim_thread.h>
  23. #include <shim_handle.h>
  24. #include <shim_vma.h>
  25. #include <shim_checkpoint.h>
  26. #include <shim_fs.h>
  27. #include <shim_ipc.h>
  28. #include <shim_profile.h>
  29. #include <pal.h>
  30. #include <pal_debug.h>
  31. #include <pal_error.h>
  32. #include <sys/mman.h>
  33. #include <asm/unistd.h>
  34. #include <asm/fcntl.h>
  35. unsigned long allocsize;
  36. unsigned long allocshift;
  37. unsigned long allocmask;
  38. /* The following constants will help matching glibc version with compatible
  39. SHIM libraries */
  40. #include "glibc-version.h"
  41. const unsigned int glibc_vers_2_17 = GLIBC_VERSION_2_17;
  42. static void handle_failure (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
  43. {
  44. SHIM_GET_TLS()->pal_errno = (arg <= PAL_ERROR_BOUND) ? arg : 0;
  45. }
  46. void __assert_fail (const char * assertion, const char * file,
  47. unsigned int line, const char * function)
  48. {
  49. __sys_printf("assert failed %s:%d %s\n", file, line, assertion);
  50. pause();
  51. shim_terminate();
  52. }
  53. void __stack_chk_fail (void)
  54. {
  55. }
  56. static int pal_errno_to_unix_errno [PAL_ERROR_BOUND + 1] = {
  57. /* reserved */ 0,
  58. /* PAL_ERROR_NOTIMPLEMENTED */ ENOSYS,
  59. /* PAL_ERROR_NOTDEFINED */ ENOSYS,
  60. /* PAL_ERROR_NOTSUPPORT */ EACCES,
  61. /* PAL_ERROR_INVAL */ EINVAL,
  62. /* PAL_ERROR_TOOLONG */ ENAMETOOLONG,
  63. /* PAL_ERROR_DENIED */ EACCES,
  64. /* PAL_ERROR_BADHANDLE */ EFAULT,
  65. /* PAL_ERROR_STREAMEXIST */ EEXIST,
  66. /* PAL_ERROR_STREAMNOTEXIST */ ENOENT,
  67. /* PAL_ERROR_STREAMISFILE */ ENOTDIR,
  68. /* PAL_ERROR_STREAMISDIR */ EISDIR,
  69. /* PAL_ERROR_STREAMISDEVICE */ ESPIPE,
  70. /* PAL_ERROR_INTERRUPTED */ EINTR,
  71. /* PAL_ERROR_OVERFLOW */ EFAULT,
  72. /* PAL_ERROR_BADADDR */ EFAULT,
  73. /* PAL_ERROR_NOMEM */ ENOMEM,
  74. /* PAL_ERROR_NOTKILLABLE */ EACCES,
  75. /* PAL_ERROR_INCONSIST */ EFAULT,
  76. /* PAL_ERROR_TRYAGAIN */ EAGAIN,
  77. /* PAL_ERROR_ENDOFSTREAM */ 0,
  78. /* PAL_ERROR_NOTSERVER */ EINVAL,
  79. /* PAL_ERROR_NOTCONNECTION */ ENOTCONN,
  80. /* PAL_ERROR_ZEROSIZE */ 0,
  81. /* PAL_ERROR_CONNFAILED */ ECONNRESET,
  82. /* PAL_ERROR_ADDRNOTEXIST */ EADDRNOTAVAIL,
  83. };
  84. long convert_pal_errno (long err)
  85. {
  86. return (err >= 0 && err <= PAL_ERROR_BOUND) ?
  87. pal_errno_to_unix_errno[err] : 0;
  88. }
  89. void * initial_stack = NULL;
  90. const char ** initial_envp __attribute_migratable = NULL;
  91. void * migrated_memory_start = 0;
  92. void * migrated_memory_end = 0;
  93. extern void * migrated_shim_addr;
  94. const char ** library_paths = NULL;
  95. bool in_gdb = false;
  96. LOCKTYPE __master_lock;
  97. bool lock_enabled = false;
  98. void init_tcb (shim_tcb_t * tcb)
  99. {
  100. tcb->canary = SHIM_TLS_CANARY;
  101. tcb->self = tcb;
  102. }
  103. void copy_tcb (shim_tcb_t * new_tcb, const shim_tcb_t * old_tcb)
  104. {
  105. memset(new_tcb, 0, sizeof(shim_tcb_t));
  106. new_tcb->canary = SHIM_TLS_CANARY;
  107. new_tcb->self = new_tcb;
  108. new_tcb->tp = old_tcb->tp;
  109. memcpy(&new_tcb->context, &old_tcb->context, sizeof(struct shim_context));
  110. new_tcb->tid = old_tcb->tid;
  111. new_tcb->debug_buf = old_tcb->debug_buf;
  112. }
  113. /* This function is used to allocate tls before interpreter start running */
  114. void allocate_tls (void * tcb_location, struct shim_thread * thread)
  115. {
  116. __libc_tcb_t * tcb = tcb_location;
  117. assert(tcb);
  118. tcb->tcb = tcb;
  119. init_tcb(&tcb->shim_tcb);
  120. if (thread) {
  121. thread->tcb = tcb;
  122. thread->user_tcb = false;
  123. tcb->shim_tcb.tp = thread;
  124. tcb->shim_tcb.tid = thread->tid;
  125. } else {
  126. tcb->shim_tcb.tp = NULL;
  127. tcb->shim_tcb.tid = 0;
  128. }
  129. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  130. assert(SHIM_TLS_CHECK_CANARY());
  131. }
  132. void populate_tls (void * tcb_location, bool user)
  133. {
  134. __libc_tcb_t * tcb = (__libc_tcb_t *) tcb_location;
  135. assert(tcb);
  136. tcb->tcb = tcb;
  137. copy_tcb(&tcb->shim_tcb, SHIM_GET_TLS());
  138. struct shim_thread * thread = (struct shim_thread *) tcb->shim_tcb.tp;
  139. if (thread) {
  140. thread->tcb = tcb;
  141. thread->user_tcb = user;
  142. }
  143. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  144. assert(SHIM_TLS_CHECK_CANARY());
  145. }
  146. DEFINE_PROFILE_OCCURENCE(alloc_stack, memory);
  147. DEFINE_PROFILE_OCCURENCE(alloc_stack_count, memory);
  148. #define STACK_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL)
  149. void * allocate_stack (size_t size, size_t protect_size, bool user)
  150. {
  151. size = ALIGN_UP(size);
  152. protect_size = ALIGN_UP(protect_size);
  153. /* preserve a non-readable, non-writeable page below the user
  154. stack to stop user program to clobber other vmas */
  155. void * stack = user ?
  156. get_unmapped_vma(size + protect_size, STACK_FLAGS) :
  157. NULL;
  158. if (user)
  159. stack = (void *) DkVirtualMemoryAlloc(stack, size + protect_size,
  160. 0, PAL_PROT_READ|PAL_PROT_WRITE);
  161. else
  162. stack = system_malloc(size + protect_size);
  163. if (!stack)
  164. return NULL;
  165. ADD_PROFILE_OCCURENCE(alloc_stack, size + protect_size);
  166. INC_PROFILE_OCCURENCE(alloc_stack_count);
  167. if (protect_size &&
  168. !DkVirtualMemoryProtect(stack, protect_size, PAL_PROT_NONE))
  169. return NULL;
  170. stack += protect_size;
  171. if (user) {
  172. if (bkeep_mmap(stack, size, PROT_READ|PROT_WRITE,
  173. STACK_FLAGS, NULL, 0, "stack") < 0)
  174. return NULL;
  175. if (protect_size &&
  176. bkeep_mmap(stack - protect_size, protect_size, 0,
  177. STACK_FLAGS, NULL, 0, NULL) < 0)
  178. return NULL;
  179. }
  180. debug("allocated stack at %p (size = %d)\n", stack, size);
  181. return stack;
  182. }
  183. int populate_user_stack (void * stack, size_t stack_size,
  184. int nauxv, elf_auxv_t ** auxpp,
  185. const char *** argvp, const char *** envpp)
  186. {
  187. const char ** argv = *argvp, ** envp = *envpp;
  188. const char ** new_argv = NULL, ** new_envp = NULL;
  189. void * stack_bottom = stack;
  190. void * stack_top = stack + stack_size;
  191. #define ALLOCATE_TOP(size) \
  192. ({ if ((stack_top -= (size)) < stack_bottom) return -ENOMEM; \
  193. stack_top; })
  194. #define ALLOCATE_BOTTOM(size) \
  195. ({ if ((stack_bottom += (size)) > stack_top) return -ENOMEM; \
  196. stack_bottom - (size); })
  197. if (!argv) {
  198. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  199. goto copy_envp;
  200. }
  201. new_argv = stack_bottom;
  202. while (argv) {
  203. for (const char ** a = argv ; *a ; a++) {
  204. const char ** t = ALLOCATE_BOTTOM(sizeof(const char *));
  205. int len = strlen(*a) + 1;
  206. char * abuf = ALLOCATE_TOP(len);
  207. memcpy(abuf, *a, len);
  208. *t = abuf;
  209. }
  210. *((const char **) ALLOCATE_BOTTOM(sizeof(const char *))) = NULL;
  211. copy_envp:
  212. if (!envp)
  213. break;
  214. new_envp = stack_bottom;
  215. argv = envp;
  216. envp = NULL;
  217. }
  218. if (!new_envp)
  219. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  220. stack_bottom = (void *) ((unsigned long) stack_bottom & ~7UL);
  221. *((unsigned long *) ALLOCATE_TOP(sizeof(unsigned long))) = 0;
  222. if (nauxv) {
  223. elf_auxv_t * old_auxp = *auxpp;
  224. *auxpp = ALLOCATE_TOP(sizeof(elf_auxv_t) * nauxv);
  225. if (old_auxp)
  226. memcpy(*auxpp, old_auxp, nauxv * sizeof(elf_auxv_t));
  227. }
  228. memmove(stack_top - (stack_bottom - stack), stack, stack_bottom - stack);
  229. if (new_argv)
  230. *argvp = (void *) new_argv + (stack_top - stack_bottom);
  231. if (new_envp)
  232. *envpp = (void *) new_envp + (stack_top - stack_bottom);
  233. return 0;
  234. }
  235. unsigned long sys_stack_size = 0;
  236. int init_stack (const char ** argv, const char ** envp, const char *** argpp,
  237. int nauxv, elf_auxv_t ** auxpp)
  238. {
  239. if (!sys_stack_size) {
  240. sys_stack_size = DEFAULT_SYS_STACK_SIZE;
  241. if (root_config) {
  242. char stack_cfg[CONFIG_MAX];
  243. if (get_config(root_config, "sys.stack.size", stack_cfg,
  244. CONFIG_MAX) > 0)
  245. sys_stack_size = ALIGN_UP(atoi(stack_cfg));
  246. }
  247. }
  248. struct shim_thread * cur_thread = get_cur_thread();
  249. if (!cur_thread || cur_thread->stack)
  250. return 0;
  251. void * stack = allocate_stack(sys_stack_size, allocsize, true);
  252. if (!stack)
  253. return -ENOMEM;
  254. if (initial_envp)
  255. envp = initial_envp;
  256. int ret = populate_user_stack(stack, sys_stack_size,
  257. nauxv, auxpp, &argv, &envp);
  258. if (ret < 0)
  259. return ret;
  260. *argpp = argv;
  261. initial_envp = envp;
  262. cur_thread->stack_top = stack + sys_stack_size;
  263. cur_thread->stack = stack;
  264. cur_thread->stack_red = stack - allocsize;
  265. return 0;
  266. }
  267. int read_environs (const char ** envp)
  268. {
  269. for (const char ** e = envp ; *e ; e++) {
  270. switch ((*e)[0]) {
  271. case 'L': {
  272. if (!memcmp(*e, "LD_LIBRARY_PATH=", 16)) {
  273. int npaths = 0;
  274. for (const char * s = (*e) + 16 ; *s ; s++)
  275. if (*s == ':')
  276. npaths++;
  277. const char ** paths = malloc(sizeof(const char *) *
  278. (npaths + 1));
  279. if (!paths)
  280. return -ENOMEM;
  281. const char * s = (*e) + 16, * next;
  282. int cnt = 0;
  283. while (*s) {
  284. for (next = s ; *next && *next != ':' ; next++);
  285. int len = next - s;
  286. char * str = malloc(len + 1);
  287. if (!str)
  288. return -ENOMEM;
  289. memcpy(str, s, len);
  290. str[len] = 0;
  291. paths[cnt++] = str;
  292. s = *next ? next + 1 : next;
  293. }
  294. paths[cnt] = NULL;
  295. library_paths = paths;
  296. break;
  297. }
  298. break;
  299. }
  300. case 'I': {
  301. if (!memcmp(*e, "IN_GDB=1", 8)) {
  302. in_gdb = true;
  303. break;
  304. }
  305. break;
  306. }
  307. }
  308. }
  309. return 0;
  310. }
  311. struct config_store * root_config = NULL;
  312. static void * __malloc (int size)
  313. {
  314. return malloc(size);
  315. }
  316. static void __free (void * mem)
  317. {
  318. free(mem);
  319. }
  320. extern bool ask_for_checkpoint;
  321. int init_manifest (PAL_HANDLE manifest_handle)
  322. {
  323. PAL_STREAM_ATTR attr;
  324. if (!DkStreamAttributesQuerybyHandle(manifest_handle, &attr))
  325. return -PAL_ERRNO;
  326. size_t cfg_size = attr.pending_size;
  327. void * cfg_addr = (void *) DkStreamMap(manifest_handle, NULL,
  328. PAL_PROT_READ|PAL_PROT_WRITECOPY, 0,
  329. ALIGN_UP(cfg_size));
  330. if (!cfg_addr)
  331. return -PAL_ERRNO;
  332. root_config = malloc(sizeof(struct config_store));
  333. root_config->raw_data = cfg_addr;
  334. root_config->raw_size = cfg_size;
  335. root_config->malloc = __malloc;
  336. root_config->free = __free;
  337. const char * errstring = "Unexpected error";
  338. int ret = 0;
  339. if ((ret = read_config(root_config, NULL, &errstring)) < 0) {
  340. root_config = NULL;
  341. sys_printf("Unable to read manifest file: %s\n", errstring);
  342. return ret;
  343. }
  344. char cfgbuf[CONFIG_MAX];
  345. if (get_config(root_config, "sys.ask_for_checkpoint", cfgbuf,
  346. CONFIG_MAX) > 0 &&
  347. cfgbuf[0] == '1' && !cfgbuf[1])
  348. ask_for_checkpoint = true;
  349. return 0;
  350. }
  351. #ifdef PROFILE
  352. struct shim_profile profile_root;
  353. #endif
  354. # define FIND_ARG_COMPONENTS(cookie, argc, argv, envp, auxp) \
  355. do { \
  356. void *_tmp = (cookie); \
  357. (argv) = _tmp; \
  358. _tmp += sizeof(char *) * ((argc) + 1); \
  359. (envp) = _tmp; \
  360. for ( ; *(char **) _tmp; _tmp += sizeof(char *)); \
  361. (auxp) = _tmp + sizeof(char *); \
  362. } while (0)
  363. static void * __process_auxv (elf_auxv_t * auxp)
  364. {
  365. elf_auxv_t * av;
  366. for (av = auxp; av->a_type != AT_NULL; av++)
  367. switch (av->a_type) {
  368. default: break;
  369. }
  370. return av + 1;
  371. }
  372. #define FIND_LAST_STACK(stack) \
  373. do { \
  374. /* check if exist a NULL end */ \
  375. assert(*(uint64_t *) stack == 0); \
  376. stack += sizeof(uint64_t); \
  377. } while (0)
  378. #ifdef PROFILE
  379. static void set_profile_enabled (const char ** envp)
  380. {
  381. const char ** p;
  382. for (p = envp ; (*p) ; p++)
  383. if (!memcmp(*p, "PROFILE_ENABLED=", 16))
  384. break;
  385. if (!(*p))
  386. return;
  387. for (int i = 0 ; i < N_PROFILE ; i++)
  388. PROFILES[i].disabled = true;
  389. const char * str = (*p) + 16;
  390. bool enabled = false;
  391. while (*str) {
  392. const char * next = str;
  393. for ( ; (*next) && (*next) != ',' ; next++);
  394. if (next > str) {
  395. int len = next - str;
  396. for (int i = 0 ; i < N_PROFILE ; i++) {
  397. struct shim_profile * profile = &PROFILES[i];
  398. if (!memcmp(profile->name, str, len) && !profile->name[len]) {
  399. profile->disabled = false;
  400. if (profile->type == CATAGORY)
  401. enabled = true;
  402. }
  403. }
  404. }
  405. str = (*next) ? next + 1 : next;
  406. }
  407. while (enabled) {
  408. enabled = false;
  409. for (int i = 0 ; i < N_PROFILE ; i++) {
  410. struct shim_profile * profile = &PROFILES[i];
  411. if (!profile->disabled || profile->root == &profile_)
  412. continue;
  413. if (!profile->root->disabled) {
  414. profile->disabled = false;
  415. if (profile->type == CATAGORY)
  416. enabled = true;
  417. }
  418. }
  419. }
  420. for (int i = 0 ; i < N_PROFILE ; i++) {
  421. struct shim_profile * profile = &PROFILES[i];
  422. if (profile->type == CATAGORY || profile->disabled)
  423. continue;
  424. for (profile = profile->root ;
  425. profile != &profile_ && profile->disabled ;
  426. profile = profile->root)
  427. profile->disabled = false;
  428. }
  429. }
  430. #endif
  431. DEFINE_PROFILE_CATAGORY(resume, );
  432. DEFINE_PROFILE_INTERVAL(child_created_in_new_process, resume);
  433. DEFINE_PROFILE_INTERVAL(child_receive_header, resume);
  434. DEFINE_PROFILE_INTERVAL(child_total_migration_time, resume);
  435. static int init_newproc (struct newproc_header * hdr)
  436. {
  437. int bytes = DkStreamRead(PAL_CB(parent_process), 0,
  438. sizeof(struct newproc_header), hdr,
  439. NULL, 0);
  440. if (!bytes)
  441. return -PAL_ERRNO;
  442. SAVE_PROFILE_INTERVAL_SINCE(child_receive_header, hdr->write_proc_time);
  443. return hdr->failure;
  444. }
  445. DEFINE_PROFILE_CATAGORY(pal, );
  446. DEFINE_PROFILE_INTERVAL(pal_startup_time, pal);
  447. DEFINE_PROFILE_INTERVAL(pal_host_specific_startup_time, pal);
  448. DEFINE_PROFILE_INTERVAL(pal_relocation_time, pal);
  449. DEFINE_PROFILE_INTERVAL(pal_linking_time, pal);
  450. DEFINE_PROFILE_INTERVAL(pal_manifest_loading_time, pal);
  451. DEFINE_PROFILE_INTERVAL(pal_allocation_time, pal);
  452. DEFINE_PROFILE_INTERVAL(pal_tail_startup_time, pal);
  453. DEFINE_PROFILE_INTERVAL(pal_child_creation_time, pal);
  454. DEFINE_PROFILE_CATAGORY(init, );
  455. DEFINE_PROFILE_INTERVAL(init_randgen, init);
  456. DEFINE_PROFILE_INTERVAL(init_heap, init);
  457. DEFINE_PROFILE_INTERVAL(init_slab, init);
  458. DEFINE_PROFILE_INTERVAL(init_str_mgr, init);
  459. DEFINE_PROFILE_INTERVAL(init_internal_map, init);
  460. DEFINE_PROFILE_INTERVAL(init_vma, init);
  461. DEFINE_PROFILE_INTERVAL(init_fs, init);
  462. DEFINE_PROFILE_INTERVAL(init_handle, init);
  463. DEFINE_PROFILE_INTERVAL(read_from_checkpoint, init);
  464. DEFINE_PROFILE_INTERVAL(read_from_file, init);
  465. DEFINE_PROFILE_INTERVAL(init_newproc, init);
  466. DEFINE_PROFILE_INTERVAL(do_migration, init);
  467. DEFINE_PROFILE_INTERVAL(init_mount_root, init);
  468. DEFINE_PROFILE_INTERVAL(init_from_checkpoint_file, init);
  469. DEFINE_PROFILE_INTERVAL(restore_from_file, init);
  470. DEFINE_PROFILE_INTERVAL(restore_checkpoint, init);
  471. DEFINE_PROFILE_INTERVAL(init_manifest, init);
  472. DEFINE_PROFILE_INTERVAL(init_ipc, init);
  473. DEFINE_PROFILE_INTERVAL(init_thread, init);
  474. DEFINE_PROFILE_INTERVAL(init_important_handles, init);
  475. DEFINE_PROFILE_INTERVAL(init_mount, init);
  476. DEFINE_PROFILE_INTERVAL(init_async, init);
  477. DEFINE_PROFILE_INTERVAL(init_stack, init);
  478. DEFINE_PROFILE_INTERVAL(read_environs, init);
  479. DEFINE_PROFILE_INTERVAL(init_loader, init);
  480. DEFINE_PROFILE_INTERVAL(init_ipc_helper, init);
  481. DEFINE_PROFILE_INTERVAL(init_signal, init);
  482. #define CALL_INIT(func, args ...) func(args)
  483. #define RUN_INIT(func, ...) \
  484. do { \
  485. int _err = CALL_INIT(func, ##__VA_ARGS__); \
  486. if (_err < 0) { \
  487. sys_printf("shim initialization failed in " #func " (%d)", \
  488. _err); \
  489. shim_terminate(); \
  490. } \
  491. SAVE_PROFILE_INTERVAL(func); \
  492. } while (0)
  493. extern PAL_HANDLE thread_start_event;
  494. int shim_init (int argc, void * args)
  495. {
  496. debug_handle = PAL_CB(debug_stream);
  497. cur_process.vmid = (IDTYPE) PAL_CB(process_id);
  498. /* create the initial TCB, shim can not be run without a tcb */
  499. __libc_tcb_t tcb;
  500. memset(&tcb, 0, sizeof(__libc_tcb_t));
  501. allocate_tls(&tcb, NULL);
  502. debug_setbuf(&tcb.shim_tcb, true);
  503. debug("set tcb to %p\n", &tcb);
  504. #ifdef PROFILE
  505. unsigned long begin_time = GET_PROFILE_INTERVAL();
  506. #endif
  507. DkSetExceptionHandler(&handle_failure, PAL_EVENT_FAILURE, 0);
  508. allocsize = PAL_CB(alloc_align);
  509. allocshift = allocsize - 1;
  510. allocmask = ~allocshift;
  511. create_lock(__master_lock);
  512. const char ** argv, ** envp, ** argp = NULL;
  513. elf_auxv_t * auxp;
  514. /* call to figure out where the arguments are */
  515. FIND_ARG_COMPONENTS(args, argc, argv, envp, auxp);
  516. initial_stack = __process_auxv(auxp);
  517. int nauxv = (elf_auxv_t *) initial_stack - auxp;
  518. FIND_LAST_STACK(initial_stack);
  519. #ifdef PROFILE
  520. set_profile_enabled(envp);
  521. #endif
  522. struct newproc_header hdr;
  523. void * cpaddr = NULL;
  524. #ifdef PROFILE
  525. unsigned long begin_create_time = 0;
  526. #endif
  527. BEGIN_PROFILE_INTERVAL();
  528. RUN_INIT(init_randgen);
  529. RUN_INIT(init_heap);
  530. RUN_INIT(init_slab);
  531. RUN_INIT(init_str_mgr);
  532. RUN_INIT(init_internal_map);
  533. RUN_INIT(init_vma);
  534. RUN_INIT(init_fs);
  535. RUN_INIT(init_handle);
  536. debug("shim loaded at %p, ready to initialize\n", &__load_address);
  537. if (argc && argv[0][0] == '-') {
  538. if (!memcmp(argv[0], "-resume", 8) && argc >= 2) {
  539. const char * filename = *(argv + 1);
  540. argc -= 2;
  541. argv += 2;
  542. RUN_INIT(init_mount_root);
  543. RUN_INIT(init_from_checkpoint_file, filename, &hdr.checkpoint,
  544. &cpaddr);
  545. goto restore;
  546. }
  547. }
  548. if (PAL_CB(parent_process)) {
  549. RUN_INIT(init_newproc, &hdr);
  550. SAVE_PROFILE_INTERVAL_SET(child_created_in_new_process,
  551. hdr.create_time, begin_time);
  552. #ifdef PROFILE
  553. begin_create_time = hdr.begin_create_time;
  554. #endif
  555. if (hdr.checkpoint.data.cpsize)
  556. RUN_INIT(do_migration, &hdr.checkpoint, &cpaddr);
  557. }
  558. if (cpaddr) {
  559. restore:
  560. thread_start_event = DkNotificationEventCreate(0);
  561. RUN_INIT(restore_checkpoint, cpaddr, &hdr.checkpoint.data, 0);
  562. }
  563. if (PAL_CB(manifest_handle))
  564. RUN_INIT(init_manifest, PAL_CB(manifest_handle));
  565. RUN_INIT(init_mount_root);
  566. RUN_INIT(init_ipc);
  567. RUN_INIT(init_thread);
  568. RUN_INIT(init_important_handles);
  569. RUN_INIT(init_mount);
  570. RUN_INIT(init_async);
  571. RUN_INIT(init_stack, argv, envp, &argp, nauxv, &auxp);
  572. RUN_INIT(read_environs, envp);
  573. RUN_INIT(init_loader);
  574. RUN_INIT(init_ipc_helper);
  575. RUN_INIT(init_signal);
  576. debug("shim process initialized\n");
  577. #ifdef PROFILE
  578. if (begin_create_time)
  579. SAVE_PROFILE_INTERVAL_SINCE(child_total_migration_time,
  580. begin_create_time);
  581. #endif
  582. SAVE_PROFILE_INTERVAL_SET(pal_startup_time, 0, pal_control.startup_time);
  583. SAVE_PROFILE_INTERVAL_SET(pal_host_specific_startup_time, 0,
  584. pal_control.host_specific_startup_time);
  585. SAVE_PROFILE_INTERVAL_SET(pal_relocation_time, 0,
  586. pal_control.relocation_time);
  587. SAVE_PROFILE_INTERVAL_SET(pal_linking_time, 0, pal_control.linking_time);
  588. SAVE_PROFILE_INTERVAL_SET(pal_manifest_loading_time, 0,
  589. pal_control.manifest_loading_time);
  590. SAVE_PROFILE_INTERVAL_SET(pal_allocation_time, 0,
  591. pal_control.allocation_time);
  592. SAVE_PROFILE_INTERVAL_SET(pal_tail_startup_time, 0,
  593. pal_control.tail_startup_time);
  594. SAVE_PROFILE_INTERVAL_SET(pal_child_creation_time, 0,
  595. pal_control.child_creation_time);
  596. if (thread_start_event)
  597. DkEventSet(thread_start_event);
  598. shim_tcb_t * cur_tcb = SHIM_GET_TLS();
  599. struct shim_thread * cur_thread = (struct shim_thread *) cur_tcb->tp;
  600. if (cur_tcb->context.sp)
  601. restore_context(&cur_tcb->context);
  602. if (cur_thread->exec)
  603. execute_elf_object(cur_thread->exec,
  604. argc, argp, nauxv, auxp);
  605. return 0;
  606. }
  607. static int create_unique (int (*mkname) (char *, size_t, void *),
  608. int (*create) (const char *, void *),
  609. int (*output) (char *, size_t, const void *,
  610. struct shim_qstr *),
  611. char * name, size_t size, void * id, void * obj,
  612. struct shim_qstr * qstr)
  613. {
  614. int ret, len;
  615. while (1) {
  616. len = mkname(name, size, id);
  617. if (len < 0)
  618. return len;
  619. if ((ret = create(name, obj)) < 0)
  620. return ret;
  621. if (ret)
  622. continue;
  623. if (output)
  624. return output(name, size, id, qstr);
  625. if (qstr)
  626. qstrsetstr(qstr, name, len);
  627. return len;
  628. }
  629. }
  630. static int name_pipe (char * uri, size_t size, void * id)
  631. {
  632. IDTYPE pipeid;
  633. int len;
  634. if (getrand(&pipeid, sizeof(IDTYPE)) < sizeof(IDTYPE))
  635. return -EACCES;
  636. if ((len = snprintf(uri, size, "pipe.srv:%u", pipeid)) == size)
  637. return -ERANGE;
  638. *((IDTYPE *) id) = pipeid;
  639. return len;
  640. }
  641. static int open_pipe (const char * uri, void * obj)
  642. {
  643. PAL_HANDLE pipe = DkStreamOpen(uri, 0, 0, 0, 0);
  644. if (!pipe)
  645. return PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST ? 1 :
  646. -PAL_ERRNO;
  647. if (obj)
  648. *((PAL_HANDLE *) obj) = pipe;
  649. else
  650. DkObjectClose(pipe);
  651. return 0;
  652. }
  653. static int pipe_addr (char * uri, size_t size, const void * id,
  654. struct shim_qstr * qstr)
  655. {
  656. IDTYPE pipeid = *((IDTYPE *) id);
  657. int len;
  658. if ((len = snprintf(uri, size, "pipe:%u", pipeid)) == size)
  659. return -ERANGE;
  660. if (qstr)
  661. qstrsetstr(qstr, uri, len);
  662. return len;
  663. }
  664. int create_pipe (IDTYPE * id, char * uri, size_t size, PAL_HANDLE * hdl,
  665. struct shim_qstr * qstr)
  666. {
  667. IDTYPE pipeid;
  668. int ret = create_unique(&name_pipe, &open_pipe, &pipe_addr,
  669. uri, size, &pipeid, hdl, qstr);
  670. if (ret > 0 && id)
  671. *id = pipeid;
  672. return ret;
  673. }
  674. static int name_path (char * path, size_t size, void * id)
  675. {
  676. unsigned int suffix;
  677. int prefix_len = strlen(path);
  678. int len;
  679. if (getrand(&suffix, sizeof(unsigned int)) < sizeof(unsigned int))
  680. return -EACCES;
  681. len = snprintf(path + prefix_len, size - prefix_len, "%08x", suffix);
  682. if (len == size)
  683. return -ERANGE;
  684. *((unsigned int *) id) = suffix;
  685. return prefix_len + len;
  686. }
  687. static int open_dir (const char * path, void * obj)
  688. {
  689. struct shim_handle * dir = NULL;
  690. if (obj) {
  691. dir = get_new_handle();
  692. if (!dir)
  693. return -ENOMEM;
  694. }
  695. int ret = open_namei(dir, NULL, path, O_CREAT|O_EXCL|O_DIRECTORY, 0700,
  696. NULL);
  697. if (ret < 0)
  698. return ret = -EEXIST ? 1 : ret;
  699. if (obj)
  700. *((struct shim_handle **) obj) = dir;
  701. return 0;
  702. }
  703. static int open_file (const char * path, void * obj)
  704. {
  705. struct shim_handle * file = NULL;
  706. if (obj) {
  707. file = get_new_handle();
  708. if (!file)
  709. return -ENOMEM;
  710. }
  711. int ret = open_namei(file, NULL, path, O_CREAT|O_EXCL|O_RDWR, 0600,
  712. NULL);
  713. if (ret < 0)
  714. return ret = -EEXIST ? 1 : ret;
  715. if (obj)
  716. *((struct shim_handle **) obj) = file;
  717. return 0;
  718. }
  719. static int open_pal_handle (const char * uri, void * obj)
  720. {
  721. PAL_HANDLE hdl;
  722. if (!memcmp(uri, "dir:", 4))
  723. hdl = DkStreamOpen(uri, 0,
  724. PAL_SHARE_OWNER_X|PAL_SHARE_OWNER_W|
  725. PAL_SHARE_OWNER_R,
  726. PAL_CREAT_TRY|PAL_CREAT_ALWAYS,
  727. 0);
  728. else
  729. hdl = DkStreamOpen(uri, PAL_ACCESS_RDWR,
  730. PAL_SHARE_OWNER_W|PAL_SHARE_OWNER_R,
  731. PAL_CREAT_TRY|PAL_CREAT_ALWAYS,
  732. 0);
  733. if (!hdl) {
  734. if (PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST)
  735. return 0;
  736. else
  737. return -PAL_ERRNO;
  738. }
  739. if (obj)
  740. *((PAL_HANDLE *) obj) = hdl;
  741. return 0;
  742. }
  743. static int output_path (char * path, size_t size, const void * id,
  744. struct shim_qstr * qstr)
  745. {
  746. int len = strlen(path);
  747. if (qstr)
  748. qstrsetstr(qstr, path, len);
  749. return len;
  750. }
  751. int create_dir (const char * prefix, char * path, size_t size,
  752. struct shim_handle ** hdl)
  753. {
  754. unsigned int suffix;
  755. if (prefix) {
  756. int len = strlen(prefix);
  757. if (len >= size)
  758. return -ERANGE;
  759. memcpy(path, prefix, len + 1);
  760. }
  761. return create_unique(&name_path, &open_dir, &output_path, path, size,
  762. &suffix, hdl, NULL);
  763. }
  764. int create_file (const char * prefix, char * path, size_t size,
  765. struct shim_handle ** hdl)
  766. {
  767. unsigned int suffix;
  768. if (prefix) {
  769. int len = strlen(prefix);
  770. if (len >= size)
  771. return -ERANGE;
  772. memcpy(path, prefix, len + 1);
  773. }
  774. return create_unique(&name_path, &open_file, &output_path, path, size,
  775. &suffix, hdl, NULL);
  776. }
  777. int create_handle (const char * prefix, char * uri, size_t size,
  778. PAL_HANDLE * hdl, unsigned int * id)
  779. {
  780. unsigned int suffix;
  781. if (prefix) {
  782. int len = strlen(prefix);
  783. if (len >= size)
  784. return -ERANGE;
  785. memcpy(uri, prefix, len + 1);
  786. }
  787. return create_unique(&name_path, &open_pal_handle, &output_path, uri, size,
  788. id ? : &suffix, hdl, NULL);
  789. }
  790. void check_stack_hook (void)
  791. {
  792. struct shim_thread * cur_thread = get_cur_thread();
  793. void * rsp;
  794. asm volatile ("movq %%rsp, %0" : "=r"(rsp) :: "memory");
  795. if (rsp <= cur_thread->stack_top && rsp > cur_thread->stack) {
  796. if (rsp - cur_thread->stack < PAL_CB(pagesize))
  797. sys_printf("*** stack is almost drained (RSP = %p, stack = %p-%p) ***\n",
  798. rsp, cur_thread->stack, cur_thread->stack_top);
  799. } else {
  800. sys_printf("*** context dismateched with thread stack (RSP = %p, stack = %p-%p) ***\n",
  801. rsp, cur_thread->stack, cur_thread->stack_top);
  802. }
  803. }
  804. #ifdef PROFILE
  805. static void print_profile_result (PAL_HANDLE hdl, struct shim_profile * root,
  806. int level)
  807. {
  808. unsigned long total_interval_time = 0;
  809. unsigned long total_interval_count = 0;
  810. for (int i = 0 ; i < N_PROFILE ; i++) {
  811. struct shim_profile * profile = &PROFILES[i];
  812. if (profile->root != root || profile->disabled)
  813. continue;
  814. switch (profile->type) {
  815. case OCCURENCE: {
  816. unsigned int count =
  817. atomic_read(&profile->val.occurence.count);
  818. if (count) {
  819. for (int j = 0 ; j < level ; j++)
  820. __sys_fprintf(hdl, " ");
  821. __sys_fprintf(hdl, "- %s: %u times\n", profile->name, count);
  822. }
  823. break;
  824. }
  825. case INTERVAL: {
  826. unsigned int count =
  827. atomic_read(&profile->val.interval.count);
  828. if (count) {
  829. unsigned long time =
  830. atomic_read(&profile->val.interval.time);
  831. unsigned long ind_time = time / count;
  832. total_interval_time += time;
  833. total_interval_count += count;
  834. for (int j = 0 ; j < level ; j++)
  835. __sys_fprintf(hdl, " ");
  836. __sys_fprintf(hdl, "- (%11.11lu) %s: %u times, %lu msec\n",
  837. time, profile->name, count, ind_time);
  838. }
  839. break;
  840. }
  841. case CATAGORY:
  842. for (int j = 0 ; j < level ; j++)
  843. __sys_fprintf(hdl, " ");
  844. __sys_fprintf(hdl, "- %s:\n", profile->name);
  845. print_profile_result(hdl, profile, level + 1);
  846. break;
  847. }
  848. }
  849. if (total_interval_count) {
  850. __sys_fprintf(hdl, "- (%11.11u) total: %u times, %lu msec\n",
  851. total_interval_time, total_interval_count,
  852. total_interval_time / total_interval_count);
  853. }
  854. }
  855. #endif /* PROFILE */
  856. static struct shim_atomic in_terminate = { .counter = 0, };
  857. int shim_terminate (void)
  858. {
  859. debug("teminating the whole process\n");
  860. /* do last clean-up of the process */
  861. shim_clean();
  862. DkProcessExit(0);
  863. return 0;
  864. }
  865. int shim_clean (void)
  866. {
  867. /* preventing multiple cleanup, this is mostly caused by
  868. assertion in shim_clean */
  869. atomic_inc(&in_terminate);
  870. if (atomic_read(&in_terminate) > 1)
  871. return 0;
  872. store_all_msg_persist();
  873. #ifdef PROFILE
  874. if (ENTER_TIME) {
  875. switch (SHIM_GET_TLS()->context.syscall_nr) {
  876. case __NR_exit_group:
  877. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit_group, ENTER_TIME);
  878. break;
  879. case __NR_exit:
  880. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit, ENTER_TIME);
  881. break;
  882. }
  883. }
  884. if (ipc_cld_profile_send()) {
  885. master_lock();
  886. PAL_HANDLE hdl = __open_shim_stdio();
  887. if (hdl) {
  888. __sys_fprintf(hdl, "******************************\n");
  889. __sys_fprintf(hdl, "profiling:\n");
  890. print_profile_result(hdl, &profile_root, 0);
  891. __sys_fprintf(hdl, "******************************\n");
  892. }
  893. master_unlock();
  894. }
  895. #endif
  896. del_all_ipc_ports(0);
  897. if (shim_stdio && shim_stdio != (PAL_HANDLE) -1)
  898. DkObjectClose(shim_stdio);
  899. shim_stdio = NULL;
  900. debug("process %u successfully terminated\n", cur_process.vmid);
  901. master_lock();
  902. DkProcessExit(cur_process.exit_code);
  903. return 0;
  904. }
  905. int message_confirm (const char * message, const char * options)
  906. {
  907. char answer;
  908. int noptions = strlen(options);
  909. char * option_str = __alloca(noptions * 2 + 3), * str = option_str;
  910. int ret = 0;
  911. *(str++) = ' ';
  912. *(str++) = '[';
  913. for (int i = 0 ; i < noptions ; i++) {
  914. *(str++) = options[i];
  915. *(str++) = '/';
  916. }
  917. str--;
  918. *(str++) = ']';
  919. *(str++) = ' ';
  920. master_lock();
  921. PAL_HANDLE hdl = __open_shim_stdio();
  922. if (!hdl) {
  923. master_unlock();
  924. return -EACCES;
  925. }
  926. #define WRITE(buf, len) \
  927. ({ int _ret = DkStreamWrite(hdl, 0, len, buf, NULL); \
  928. _ret ? : -PAL_ERRNO; })
  929. #define READ(buf, len) \
  930. ({ int _ret = DkStreamRead(hdl, 0, len, buf, NULL, 0); \
  931. _ret ? : -PAL_ERRNO; })
  932. if ((ret = WRITE(message, strlen(message))) < 0)
  933. goto out;
  934. if ((ret = WRITE(option_str, noptions * 2 + 3)) < 0)
  935. goto out;
  936. if ((ret = READ(&answer, 1)) < 0)
  937. goto out;
  938. out:
  939. master_unlock();
  940. return (ret < 0) ? ret : answer;
  941. }