shim_init.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_init.c
  15. *
  16. * This file contains entry and exit functions of library OS.
  17. */
  18. #include <shim_internal.h>
  19. #include <shim_table.h>
  20. #include <shim_tls.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_vma.h>
  24. #include <shim_checkpoint.h>
  25. #include <shim_fs.h>
  26. #include <shim_ipc.h>
  27. #include <shim_profile.h>
  28. #include <shim_vdso.h>
  29. #include <pal.h>
  30. #include <pal_debug.h>
  31. #include <pal_error.h>
  32. #include <sys/mman.h>
  33. #include <asm/unistd.h>
  34. #include <asm/fcntl.h>
  35. unsigned long allocsize;
  36. unsigned long allocshift;
  37. unsigned long allocmask;
  38. /* The following constants will help matching glibc version with compatible
  39. SHIM libraries */
  40. #include "glibc-version.h"
  41. const unsigned int glibc_version = GLIBC_VERSION;
  42. static void handle_failure (PAL_PTR event, PAL_NUM arg, PAL_CONTEXT * context)
  43. {
  44. __UNUSED(event);
  45. __UNUSED(context);
  46. shim_get_tls()->pal_errno = (arg <= PAL_ERROR_BOUND) ? arg : 0;
  47. }
  48. noreturn void __abort(void) {
  49. PAUSE();
  50. shim_terminate(-ENOTRECOVERABLE);
  51. }
  52. void warn (const char *format, ...)
  53. {
  54. va_list args;
  55. va_start (args, format);
  56. __SYS_VPRINTF(format, args);
  57. va_end (args);
  58. }
  59. void __stack_chk_fail (void)
  60. {
  61. }
  62. static int pal_errno_to_unix_errno [PAL_ERROR_BOUND + 1] = {
  63. /* reserved */ 0,
  64. /* PAL_ERROR_NOTIMPLEMENTED */ ENOSYS,
  65. /* PAL_ERROR_NOTDEFINED */ ENOSYS,
  66. /* PAL_ERROR_NOTSUPPORT */ EACCES,
  67. /* PAL_ERROR_INVAL */ EINVAL,
  68. /* PAL_ERROR_TOOLONG */ ENAMETOOLONG,
  69. /* PAL_ERROR_DENIED */ EACCES,
  70. /* PAL_ERROR_BADHANDLE */ EFAULT,
  71. /* PAL_ERROR_STREAMEXIST */ EEXIST,
  72. /* PAL_ERROR_STREAMNOTEXIST */ ENOENT,
  73. /* PAL_ERROR_STREAMISFILE */ ENOTDIR,
  74. /* PAL_ERROR_STREAMISDIR */ EISDIR,
  75. /* PAL_ERROR_STREAMISDEVICE */ ESPIPE,
  76. /* PAL_ERROR_INTERRUPTED */ EINTR,
  77. /* PAL_ERROR_OVERFLOW */ EFAULT,
  78. /* PAL_ERROR_BADADDR */ EFAULT,
  79. /* PAL_ERROR_NOMEM */ ENOMEM,
  80. /* PAL_ERROR_NOTKILLABLE */ EACCES,
  81. /* PAL_ERROR_INCONSIST */ EFAULT,
  82. /* PAL_ERROR_TRYAGAIN */ EAGAIN,
  83. /* PAL_ERROR_ENDOFSTREAM */ 0,
  84. /* PAL_ERROR_NOTSERVER */ EINVAL,
  85. /* PAL_ERROR_NOTCONNECTION */ ENOTCONN,
  86. /* PAL_ERROR_ZEROSIZE */ 0,
  87. /* PAL_ERROR_CONNFAILED */ ECONNRESET,
  88. /* PAL_ERROR_ADDRNOTEXIST */ EADDRNOTAVAIL,
  89. };
  90. long convert_pal_errno (long err)
  91. {
  92. return (err >= 0 && err <= PAL_ERROR_BOUND) ?
  93. pal_errno_to_unix_errno[err] : 0;
  94. }
  95. unsigned long parse_int (const char * str)
  96. {
  97. unsigned long num = 0;
  98. int radix = 10;
  99. char c;
  100. if (str[0] == '0') {
  101. str++;
  102. radix = 8;
  103. if (str[0] == 'x') {
  104. str++;
  105. radix = 16;
  106. }
  107. }
  108. while ((c = *(str++))) {
  109. int val;
  110. if (c >= 'A' && c <= 'F')
  111. val = c - 'A' + 10;
  112. else if (c >= 'a' && c <= 'f')
  113. val = c - 'a' + 10;
  114. else if (c >= '0' && c <= '9')
  115. val = c - '0';
  116. else
  117. break;
  118. if (val >= radix)
  119. break;
  120. num = num * radix + val;
  121. }
  122. if (c == 'G' || c == 'g')
  123. num *= 1024 * 1024 * 1024;
  124. else if (c == 'M' || c == 'm')
  125. num *= 1024 * 1024;
  126. else if (c == 'K' || c == 'k')
  127. num *= 1024;
  128. return num;
  129. }
  130. long int glibc_option (const char * opt)
  131. {
  132. char cfg[CONFIG_MAX];
  133. if (strcmp_static(opt, "heap_size")) {
  134. ssize_t ret = get_config(root_config, "glibc.heap_size", cfg, CONFIG_MAX);
  135. if (ret <= 0) {
  136. debug("no glibc option: %s (err=%ld)\n", opt, ret);
  137. return -ENOENT;
  138. }
  139. long int heap_size = parse_int(cfg);
  140. debug("glibc option: heap_size = %ld\n", heap_size);
  141. return (long int) heap_size;
  142. }
  143. return -EINVAL;
  144. }
  145. void * migrated_memory_start;
  146. void * migrated_memory_end;
  147. const char ** initial_envp __attribute_migratable;
  148. /* library_paths is populated with LD_PRELOAD entries once during LibOS
  149. * initialization and is used in __load_interp_object() to search for ELF
  150. * program interpreter in specific paths. Once allocated, its memory is
  151. * never freed or updated. */
  152. char ** library_paths = NULL;
  153. struct shim_lock __master_lock;
  154. bool lock_enabled;
  155. void init_tcb (shim_tcb_t * tcb)
  156. {
  157. tcb->canary = SHIM_TLS_CANARY;
  158. tcb->self = tcb;
  159. }
  160. void copy_tcb (shim_tcb_t * new_tcb, const shim_tcb_t * old_tcb)
  161. {
  162. memset(new_tcb, 0, sizeof(shim_tcb_t));
  163. new_tcb->canary = SHIM_TLS_CANARY;
  164. new_tcb->self = new_tcb;
  165. new_tcb->tp = old_tcb->tp;
  166. memcpy(&new_tcb->context, &old_tcb->context, sizeof(struct shim_context));
  167. new_tcb->tid = old_tcb->tid;
  168. new_tcb->debug_buf = old_tcb->debug_buf;
  169. }
  170. /* This function is used to allocate tls before interpreter start running */
  171. void allocate_tls (__libc_tcb_t * tcb, bool user, struct shim_thread * thread)
  172. {
  173. assert(tcb);
  174. tcb->tcb = tcb;
  175. init_tcb(&tcb->shim_tcb);
  176. if (thread) {
  177. thread->tcb = tcb;
  178. thread->user_tcb = user;
  179. tcb->shim_tcb.tp = thread;
  180. tcb->shim_tcb.tid = thread->tid;
  181. } else {
  182. tcb->shim_tcb.tp = NULL;
  183. tcb->shim_tcb.tid = 0;
  184. }
  185. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  186. assert(shim_tls_check_canary());
  187. }
  188. void populate_tls (__libc_tcb_t * tcb, bool user)
  189. {
  190. assert(tcb);
  191. tcb->tcb = tcb;
  192. copy_tcb(&tcb->shim_tcb, shim_get_tls());
  193. struct shim_thread * thread = (struct shim_thread *) tcb->shim_tcb.tp;
  194. if (thread) {
  195. thread->tcb = tcb;
  196. thread->user_tcb = user;
  197. }
  198. DkSegmentRegister(PAL_SEGMENT_FS, tcb);
  199. assert(shim_tls_check_canary());
  200. }
  201. DEFINE_PROFILE_OCCURENCE(alloc_stack, memory);
  202. DEFINE_PROFILE_OCCURENCE(alloc_stack_count, memory);
  203. #define STACK_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
  204. void * allocate_stack (size_t size, size_t protect_size, bool user)
  205. {
  206. size = ALIGN_UP(size);
  207. protect_size = ALIGN_UP(protect_size);
  208. /* preserve a non-readable, non-writable page below the user
  209. stack to stop user program to clobber other vmas */
  210. void * stack = NULL;
  211. int flags = STACK_FLAGS|(user ? 0 : VMA_INTERNAL);
  212. if (user) {
  213. stack = bkeep_unmapped_heap(size + protect_size, PROT_NONE,
  214. flags, NULL, 0, "stack");
  215. if (!stack)
  216. return NULL;
  217. stack = (void *)
  218. DkVirtualMemoryAlloc(stack, size + protect_size,
  219. 0, PAL_PROT_NONE);
  220. } else {
  221. stack = system_malloc(size + protect_size);
  222. }
  223. if (!stack)
  224. return NULL;
  225. ADD_PROFILE_OCCURENCE(alloc_stack, size + protect_size);
  226. INC_PROFILE_OCCURENCE(alloc_stack_count);
  227. stack += protect_size;
  228. // Ensure proper alignment for process' initial stack pointer value.
  229. stack += (16 - (uintptr_t)stack % 16) % 16;
  230. DkVirtualMemoryProtect(stack, size, PAL_PROT_READ|PAL_PROT_WRITE);
  231. if (bkeep_mprotect(stack, size, PROT_READ|PROT_WRITE, flags) < 0)
  232. return NULL;
  233. debug("allocated stack at %p (size = %ld)\n", stack, size);
  234. return stack;
  235. }
  236. static int populate_user_stack (void * stack, size_t stack_size,
  237. elf_auxv_t ** auxpp, int ** argcpp,
  238. const char *** argvp, const char *** envpp)
  239. {
  240. const int argc = **argcpp;
  241. const char ** argv = *argvp, ** envp = *envpp;
  242. const char ** new_argv = NULL, ** new_envp = NULL;
  243. elf_auxv_t *new_auxp = NULL;
  244. void * stack_bottom = stack;
  245. void * stack_top = stack + stack_size;
  246. #define ALLOCATE_TOP(size) \
  247. ({ if ((stack_top -= (size)) < stack_bottom) return -ENOMEM; \
  248. stack_top; })
  249. #define ALLOCATE_BOTTOM(size) \
  250. ({ if ((stack_bottom += (size)) > stack_top) return -ENOMEM; \
  251. stack_bottom - (size); })
  252. /* ld.so expects argc as long on stack, not int. */
  253. long * argcp = ALLOCATE_BOTTOM(sizeof(long));
  254. *argcp = **argcpp;
  255. if (!argv) {
  256. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  257. goto copy_envp;
  258. }
  259. new_argv = stack_bottom;
  260. while (argv) {
  261. for (const char ** a = argv ; *a ; a++) {
  262. const char ** t = ALLOCATE_BOTTOM(sizeof(const char *));
  263. int len = strlen(*a) + 1;
  264. char * abuf = ALLOCATE_TOP(len);
  265. memcpy(abuf, *a, len);
  266. *t = abuf;
  267. }
  268. *((const char **) ALLOCATE_BOTTOM(sizeof(const char *))) = NULL;
  269. copy_envp:
  270. if (!envp)
  271. break;
  272. new_envp = stack_bottom;
  273. argv = envp;
  274. envp = NULL;
  275. }
  276. if (!new_envp)
  277. *(const char **) ALLOCATE_BOTTOM(sizeof(const char *)) = NULL;
  278. /* reserve space for ELF aux vectors, populated later by LibOS */
  279. new_auxp = ALLOCATE_BOTTOM(REQUIRED_ELF_AUXV * sizeof(elf_auxv_t) +
  280. REQUIRED_ELF_AUXV_SPACE);
  281. /* x86_64 ABI requires 16 bytes alignment on stack on every function
  282. call. */
  283. size_t move_size = stack_bottom - stack;
  284. *argcpp = stack_top - move_size;
  285. *argcpp = ALIGN_DOWN_PTR(*argcpp, 16UL);
  286. **argcpp = argc;
  287. size_t shift = (void*)(*argcpp) - stack;
  288. memmove(*argcpp, stack, move_size);
  289. *argvp = new_argv ? (void *) new_argv + shift : NULL;
  290. *envpp = new_envp ? (void *) new_envp + shift : NULL;
  291. *auxpp = new_auxp ? (void *) new_auxp + shift : NULL;
  292. /* clear working area at the bottom */
  293. memset(stack, 0, shift);
  294. return 0;
  295. }
  296. unsigned long sys_stack_size = 0;
  297. int init_stack (const char ** argv, const char ** envp,
  298. int ** argcpp, const char *** argpp,
  299. elf_auxv_t ** auxpp)
  300. {
  301. if (!sys_stack_size) {
  302. sys_stack_size = DEFAULT_SYS_STACK_SIZE;
  303. if (root_config) {
  304. char stack_cfg[CONFIG_MAX];
  305. if (get_config(root_config, "sys.stack.size", stack_cfg,
  306. CONFIG_MAX) > 0)
  307. sys_stack_size = ALIGN_UP(parse_int(stack_cfg));
  308. }
  309. }
  310. struct shim_thread * cur_thread = get_cur_thread();
  311. if (!cur_thread || cur_thread->stack)
  312. return 0;
  313. void * stack = allocate_stack(sys_stack_size, allocsize, true);
  314. if (!stack)
  315. return -ENOMEM;
  316. if (initial_envp)
  317. envp = initial_envp;
  318. int ret = populate_user_stack(stack, sys_stack_size, auxpp, argcpp, &argv, &envp);
  319. if (ret < 0)
  320. return ret;
  321. *argpp = argv;
  322. initial_envp = envp;
  323. cur_thread->stack_top = stack + sys_stack_size;
  324. cur_thread->stack = stack;
  325. cur_thread->stack_red = stack - allocsize;
  326. return 0;
  327. }
  328. int read_environs (const char ** envp)
  329. {
  330. for (const char ** e = envp ; *e ; e++) {
  331. if (strpartcmp_static(*e, "LD_LIBRARY_PATH=")) {
  332. /* populate library_paths with entries from LD_LIBRARY_PATH envvar */
  333. const char * s = *e + static_strlen("LD_LIBRARY_PATH=");
  334. size_t npaths = 2; // One for the first entry, one for the last
  335. // NULL.
  336. for (const char * tmp = s ; *tmp ; tmp++)
  337. if (*tmp == ':')
  338. npaths++;
  339. char** paths = malloc(sizeof(const char *) *
  340. npaths);
  341. if (!paths)
  342. return -ENOMEM;
  343. size_t cnt = 0;
  344. while (*s) {
  345. const char * next;
  346. for (next = s ; *next && *next != ':' ; next++);
  347. size_t len = next - s;
  348. char * str = malloc(len + 1);
  349. if (!str) {
  350. for (size_t i = 0; i < cnt; i++)
  351. free(paths[i]);
  352. free(paths);
  353. return -ENOMEM;
  354. }
  355. memcpy(str, s, len);
  356. str[len] = 0;
  357. paths[cnt++] = str;
  358. s = *next ? next + 1 : next;
  359. }
  360. paths[cnt] = NULL;
  361. assert(!library_paths);
  362. library_paths = paths;
  363. return 0;
  364. }
  365. }
  366. return 0;
  367. }
  368. struct config_store * root_config = NULL;
  369. static void * __malloc (size_t size)
  370. {
  371. return malloc(size);
  372. }
  373. static void __free (void * mem)
  374. {
  375. free(mem);
  376. }
  377. int init_manifest (PAL_HANDLE manifest_handle)
  378. {
  379. int ret = 0;
  380. void * addr = NULL;
  381. size_t size = 0, map_size = 0;
  382. #define MAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL)
  383. if (PAL_CB(manifest_preload.start)) {
  384. addr = PAL_CB(manifest_preload.start);
  385. size = PAL_CB(manifest_preload.end) - PAL_CB(manifest_preload.start);
  386. } else {
  387. PAL_STREAM_ATTR attr;
  388. if (!DkStreamAttributesQueryByHandle(manifest_handle, &attr))
  389. return -PAL_ERRNO;
  390. size = attr.pending_size;
  391. map_size = ALIGN_UP(size);
  392. addr = bkeep_unmapped_any(map_size, PROT_READ, MAP_FLAGS,
  393. NULL, 0, "manifest");
  394. if (!addr)
  395. return -ENOMEM;
  396. void * ret_addr = DkStreamMap(manifest_handle, addr,
  397. PAL_PROT_READ, 0,
  398. ALIGN_UP(size));
  399. if (!ret_addr) {
  400. bkeep_munmap(addr, map_size, MAP_FLAGS);
  401. return -ENOMEM;
  402. } else {
  403. assert(addr == ret_addr);
  404. }
  405. }
  406. struct config_store * new_root_config = malloc(sizeof(struct config_store));
  407. if (!new_root_config) {
  408. ret = -ENOMEM;
  409. goto fail;
  410. }
  411. new_root_config->raw_data = addr;
  412. new_root_config->raw_size = size;
  413. new_root_config->malloc = __malloc;
  414. new_root_config->free = __free;
  415. const char * errstring = "Unexpected error";
  416. if ((ret = read_config(new_root_config, NULL, &errstring)) < 0) {
  417. SYS_PRINTF("Unable to read manifest file: %s\n", errstring);
  418. goto fail;
  419. }
  420. root_config = new_root_config;
  421. return 0;
  422. fail:
  423. if (map_size) {
  424. DkStreamUnmap(addr, map_size);
  425. if (bkeep_munmap(addr, map_size, MAP_FLAGS) < 0)
  426. BUG();
  427. }
  428. free(new_root_config);
  429. return ret;
  430. }
  431. #ifdef PROFILE
  432. struct shim_profile profile_root;
  433. #endif
  434. # define FIND_ARG_COMPONENTS(cookie, argc, argv, envp, auxp) \
  435. do { \
  436. void *_tmp = (cookie); \
  437. (argv) = _tmp; \
  438. _tmp += sizeof(char *) * ((argc) + 1); \
  439. (envp) = _tmp; \
  440. for ( ; *(char **) _tmp; _tmp += sizeof(char *)); \
  441. (auxp) = _tmp + sizeof(char *); \
  442. } while (0)
  443. #ifdef PROFILE
  444. static void set_profile_enabled (const char ** envp)
  445. {
  446. const char ** p;
  447. for (p = envp ; (*p) ; p++)
  448. if (strpartcmp_static(*p, "PROFILE_ENABLED="))
  449. break;
  450. if (!(*p))
  451. return;
  452. for (size_t i = 0 ; i < N_PROFILE ; i++)
  453. PROFILES[i].disabled = true;
  454. const char * str = (*p) + 16;
  455. bool enabled = false;
  456. while (*str) {
  457. const char * next = str;
  458. for ( ; (*next) && (*next) != ',' ; next++);
  459. if (next > str) {
  460. size_t len = next - str;
  461. for (size_t i = 0 ; i < N_PROFILE ; i++) {
  462. struct shim_profile * profile = &PROFILES[i];
  463. if (!memcmp(profile->name, str, len) && !profile->name[len]) {
  464. profile->disabled = false;
  465. if (profile->type == CATEGORY)
  466. enabled = true;
  467. }
  468. }
  469. }
  470. str = (*next) ? next + 1 : next;
  471. }
  472. while (enabled) {
  473. enabled = false;
  474. for (size_t i = 0 ; i < N_PROFILE ; i++) {
  475. struct shim_profile * profile = &PROFILES[i];
  476. if (!profile->disabled || profile->root == &profile_)
  477. continue;
  478. if (!profile->root->disabled) {
  479. profile->disabled = false;
  480. if (profile->type == CATEGORY)
  481. enabled = true;
  482. }
  483. }
  484. }
  485. for (size_t i = 0 ; i < N_PROFILE ; i++) {
  486. struct shim_profile * profile = &PROFILES[i];
  487. if (profile->type == CATEGORY || profile->disabled)
  488. continue;
  489. for (profile = profile->root ;
  490. profile != &profile_ && profile->disabled ;
  491. profile = profile->root)
  492. profile->disabled = false;
  493. }
  494. }
  495. #endif
  496. static int init_newproc (struct newproc_header * hdr)
  497. {
  498. BEGIN_PROFILE_INTERVAL();
  499. int bytes = DkStreamRead(PAL_CB(parent_process), 0,
  500. sizeof(struct newproc_header), hdr,
  501. NULL, 0);
  502. if (!bytes)
  503. return -PAL_ERRNO;
  504. SAVE_PROFILE_INTERVAL(child_wait_header);
  505. SAVE_PROFILE_INTERVAL_SINCE(child_receive_header, hdr->write_proc_time);
  506. return hdr->failure;
  507. }
  508. DEFINE_PROFILE_CATEGORY(pal, );
  509. DEFINE_PROFILE_INTERVAL(pal_startup_time, pal);
  510. DEFINE_PROFILE_INTERVAL(pal_host_specific_startup_time, pal);
  511. DEFINE_PROFILE_INTERVAL(pal_relocation_time, pal);
  512. DEFINE_PROFILE_INTERVAL(pal_linking_time, pal);
  513. DEFINE_PROFILE_INTERVAL(pal_manifest_loading_time, pal);
  514. DEFINE_PROFILE_INTERVAL(pal_allocation_time, pal);
  515. DEFINE_PROFILE_INTERVAL(pal_tail_startup_time, pal);
  516. DEFINE_PROFILE_INTERVAL(pal_child_creation_time, pal);
  517. DEFINE_PROFILE_CATEGORY(init, );
  518. DEFINE_PROFILE_INTERVAL(init_vma, init);
  519. DEFINE_PROFILE_INTERVAL(init_slab, init);
  520. DEFINE_PROFILE_INTERVAL(init_str_mgr, init);
  521. DEFINE_PROFILE_INTERVAL(init_internal_map, init);
  522. DEFINE_PROFILE_INTERVAL(init_fs, init);
  523. DEFINE_PROFILE_INTERVAL(init_dcache, init);
  524. DEFINE_PROFILE_INTERVAL(init_handle, init);
  525. DEFINE_PROFILE_INTERVAL(read_from_checkpoint, init);
  526. DEFINE_PROFILE_INTERVAL(read_from_file, init);
  527. DEFINE_PROFILE_INTERVAL(init_newproc, init);
  528. DEFINE_PROFILE_INTERVAL(init_mount_root, init);
  529. DEFINE_PROFILE_INTERVAL(init_from_checkpoint_file, init);
  530. DEFINE_PROFILE_INTERVAL(restore_from_file, init);
  531. DEFINE_PROFILE_INTERVAL(init_manifest, init);
  532. DEFINE_PROFILE_INTERVAL(init_ipc, init);
  533. DEFINE_PROFILE_INTERVAL(init_thread, init);
  534. DEFINE_PROFILE_INTERVAL(init_important_handles, init);
  535. DEFINE_PROFILE_INTERVAL(init_mount, init);
  536. DEFINE_PROFILE_INTERVAL(init_async, init);
  537. DEFINE_PROFILE_INTERVAL(init_stack, init);
  538. DEFINE_PROFILE_INTERVAL(read_environs, init);
  539. DEFINE_PROFILE_INTERVAL(init_loader, init);
  540. DEFINE_PROFILE_INTERVAL(init_ipc_helper, init);
  541. DEFINE_PROFILE_INTERVAL(init_signal, init);
  542. #define CALL_INIT(func, args ...) func(args)
  543. #define RUN_INIT(func, ...) \
  544. do { \
  545. int _err = CALL_INIT(func, ##__VA_ARGS__); \
  546. if (_err < 0) { \
  547. SYS_PRINTF("shim_init() in " #func " (%d)\n", _err); \
  548. shim_terminate(_err); \
  549. } \
  550. SAVE_PROFILE_INTERVAL(func); \
  551. } while (0)
  552. extern PAL_HANDLE thread_start_event;
  553. noreturn void* shim_init (int argc, void * args)
  554. {
  555. debug_handle = PAL_CB(debug_stream);
  556. cur_process.vmid = (IDTYPE) PAL_CB(process_id);
  557. /* create the initial TCB, shim can not be run without a tcb */
  558. __libc_tcb_t tcb;
  559. memset(&tcb, 0, sizeof(__libc_tcb_t));
  560. allocate_tls(&tcb, false, NULL);
  561. __disable_preempt(&tcb.shim_tcb); // Temporarily disable preemption for delaying any signal
  562. // that arrives during initialization
  563. debug_setbuf(&tcb.shim_tcb, true);
  564. debug("set tcb to %p\n", &tcb);
  565. #ifdef PROFILE
  566. unsigned long begin_time = GET_PROFILE_INTERVAL();
  567. #endif
  568. debug("host: %s\n", PAL_CB(host_type));
  569. DkSetExceptionHandler(&handle_failure, PAL_EVENT_FAILURE);
  570. allocsize = PAL_CB(alloc_align);
  571. allocshift = allocsize - 1;
  572. allocmask = ~allocshift;
  573. create_lock(&__master_lock);
  574. int * argcp = &argc;
  575. const char ** argv, ** envp, ** argp = NULL;
  576. elf_auxv_t * auxp;
  577. /* call to figure out where the arguments are */
  578. FIND_ARG_COMPONENTS(args, argc, argv, envp, auxp);
  579. #ifdef PROFILE
  580. set_profile_enabled(envp);
  581. #endif
  582. struct newproc_header hdr;
  583. void * cpaddr = NULL;
  584. #ifdef PROFILE
  585. unsigned long begin_create_time = 0;
  586. #endif
  587. BEGIN_PROFILE_INTERVAL();
  588. RUN_INIT(init_vma);
  589. RUN_INIT(init_slab);
  590. RUN_INIT(read_environs, envp);
  591. RUN_INIT(init_str_mgr);
  592. RUN_INIT(init_internal_map);
  593. RUN_INIT(init_fs);
  594. RUN_INIT(init_dcache);
  595. RUN_INIT(init_handle);
  596. debug("shim loaded at %p, ready to initialize\n", &__load_address);
  597. if (argc && argv[0][0] == '-') {
  598. if (strcmp_static(argv[0], "-resume") && argc >= 2) {
  599. const char * filename = *(argv + 1);
  600. argc -= 2;
  601. argv += 2;
  602. RUN_INIT(init_mount_root);
  603. RUN_INIT(init_from_checkpoint_file, filename, &hdr.checkpoint,
  604. &cpaddr);
  605. goto restore;
  606. }
  607. }
  608. if (PAL_CB(parent_process)) {
  609. RUN_INIT(init_newproc, &hdr);
  610. SAVE_PROFILE_INTERVAL_SET(child_created_in_new_process,
  611. hdr.create_time, begin_time);
  612. #ifdef PROFILE
  613. begin_create_time = hdr.begin_create_time;
  614. #endif
  615. if (hdr.checkpoint.hdr.size)
  616. RUN_INIT(do_migration, &hdr.checkpoint, &cpaddr);
  617. }
  618. if (cpaddr) {
  619. restore:
  620. thread_start_event = DkNotificationEventCreate(PAL_FALSE);
  621. RUN_INIT(restore_checkpoint,
  622. &hdr.checkpoint.hdr, &hdr.checkpoint.mem,
  623. (ptr_t) cpaddr, 0);
  624. }
  625. if (PAL_CB(manifest_handle))
  626. RUN_INIT(init_manifest, PAL_CB(manifest_handle));
  627. RUN_INIT(init_mount_root);
  628. RUN_INIT(init_ipc);
  629. RUN_INIT(init_thread);
  630. RUN_INIT(init_mount);
  631. RUN_INIT(init_important_handles);
  632. RUN_INIT(init_async);
  633. RUN_INIT(init_stack, argv, envp, &argcp, &argp, &auxp);
  634. RUN_INIT(init_loader);
  635. RUN_INIT(init_ipc_helper);
  636. RUN_INIT(init_signal);
  637. if (PAL_CB(parent_process)) {
  638. /* Notify the parent process */
  639. struct newproc_response res;
  640. res.child_vmid = cur_process.vmid;
  641. res.failure = 0;
  642. if (!DkStreamWrite(PAL_CB(parent_process), 0,
  643. sizeof(struct newproc_response),
  644. &res, NULL))
  645. shim_do_exit(-PAL_ERRNO);
  646. }
  647. debug("shim process initialized\n");
  648. #ifdef PROFILE
  649. if (begin_create_time)
  650. SAVE_PROFILE_INTERVAL_SINCE(child_total_migration_time,
  651. begin_create_time);
  652. #endif
  653. SAVE_PROFILE_INTERVAL_SET(pal_startup_time, 0, pal_control.startup_time);
  654. SAVE_PROFILE_INTERVAL_SET(pal_host_specific_startup_time, 0,
  655. pal_control.host_specific_startup_time);
  656. SAVE_PROFILE_INTERVAL_SET(pal_relocation_time, 0,
  657. pal_control.relocation_time);
  658. SAVE_PROFILE_INTERVAL_SET(pal_linking_time, 0, pal_control.linking_time);
  659. SAVE_PROFILE_INTERVAL_SET(pal_manifest_loading_time, 0,
  660. pal_control.manifest_loading_time);
  661. SAVE_PROFILE_INTERVAL_SET(pal_allocation_time, 0,
  662. pal_control.allocation_time);
  663. SAVE_PROFILE_INTERVAL_SET(pal_tail_startup_time, 0,
  664. pal_control.tail_startup_time);
  665. SAVE_PROFILE_INTERVAL_SET(pal_child_creation_time, 0,
  666. pal_control.child_creation_time);
  667. if (thread_start_event)
  668. DkEventSet(thread_start_event);
  669. shim_tcb_t * cur_tcb = shim_get_tls();
  670. struct shim_thread * cur_thread = (struct shim_thread *) cur_tcb->tp;
  671. if (cur_tcb->context.regs && cur_tcb->context.regs->rsp) {
  672. vdso_map_migrate();
  673. restore_context(&cur_tcb->context);
  674. }
  675. if (cur_thread->exec)
  676. execute_elf_object(cur_thread->exec, argcp, argp, auxp);
  677. shim_do_exit(0);
  678. }
  679. static int create_unique (int (*mkname) (char *, size_t, void *),
  680. int (*create) (const char *, void *),
  681. int (*output) (char *, size_t, const void *,
  682. struct shim_qstr *),
  683. char * name, size_t size, void * id, void * obj,
  684. struct shim_qstr * qstr)
  685. {
  686. int ret, len;
  687. while (1) {
  688. len = mkname(name, size, id);
  689. if (len < 0)
  690. return len;
  691. if ((ret = create(name, obj)) < 0)
  692. return ret;
  693. if (ret)
  694. continue;
  695. if (output)
  696. return output(name, size, id, qstr);
  697. if (qstr)
  698. qstrsetstr(qstr, name, len);
  699. return len;
  700. }
  701. }
  702. static int name_pipe_rand (char * uri, size_t size, void * id)
  703. {
  704. IDTYPE pipeid;
  705. size_t len;
  706. int ret = DkRandomBitsRead(&pipeid, sizeof(pipeid));
  707. if (ret < 0)
  708. return -convert_pal_errno(-ret);
  709. debug("creating pipe: pipe.srv:%u\n", pipeid);
  710. if ((len = snprintf(uri, size, "pipe.srv:%u", pipeid)) >= size)
  711. return -ERANGE;
  712. *((IDTYPE *)id) = pipeid;
  713. return len;
  714. }
  715. static int name_pipe_vmid (char * uri, size_t size, void * id)
  716. {
  717. IDTYPE pipeid = cur_process.vmid;
  718. size_t len;
  719. debug("creating pipe: pipe.srv:%u\n", pipeid);
  720. if ((len = snprintf(uri, size, "pipe.srv:%u", pipeid)) >= size)
  721. return -ERANGE;
  722. *((IDTYPE *)id) = pipeid;
  723. return len;
  724. }
  725. static int open_pipe (const char * uri, void * obj)
  726. {
  727. PAL_HANDLE pipe = DkStreamOpen(uri, 0, 0, 0, 0);
  728. if (!pipe)
  729. return PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST ? 1 :
  730. -PAL_ERRNO;
  731. if (obj)
  732. *((PAL_HANDLE *) obj) = pipe;
  733. else
  734. DkObjectClose(pipe);
  735. return 0;
  736. }
  737. static int pipe_addr (char * uri, size_t size, const void * id,
  738. struct shim_qstr * qstr)
  739. {
  740. IDTYPE pipeid = *((IDTYPE *) id);
  741. size_t len;
  742. if ((len = snprintf(uri, size, "pipe:%u", pipeid)) == size)
  743. return -ERANGE;
  744. if (qstr)
  745. qstrsetstr(qstr, uri, len);
  746. return len;
  747. }
  748. int create_pipe (IDTYPE * id, char * uri, size_t size, PAL_HANDLE * hdl,
  749. struct shim_qstr * qstr, bool use_vmid_for_name)
  750. {
  751. IDTYPE pipeid;
  752. int ret;
  753. if (use_vmid_for_name)
  754. ret = create_unique(&name_pipe_vmid, &open_pipe, &pipe_addr,
  755. uri, size, &pipeid, hdl, qstr);
  756. else
  757. ret = create_unique(&name_pipe_rand, &open_pipe, &pipe_addr,
  758. uri, size, &pipeid, hdl, qstr);
  759. if (ret > 0 && id)
  760. *id = pipeid;
  761. return ret;
  762. }
  763. static int name_path (char * path, size_t size, void * id)
  764. {
  765. unsigned int suffix;
  766. int prefix_len = strlen(path);
  767. size_t len;
  768. int ret = DkRandomBitsRead(&suffix, sizeof(suffix));
  769. if (ret < 0)
  770. return -convert_pal_errno(-ret);
  771. len = snprintf(path + prefix_len, size - prefix_len, "%08x", suffix);
  772. if (len == size)
  773. return -ERANGE;
  774. *((unsigned int *) id) = suffix;
  775. return prefix_len + len;
  776. }
  777. static int open_dir (const char * path, void * obj)
  778. {
  779. struct shim_handle * dir = NULL;
  780. if (obj) {
  781. dir = get_new_handle();
  782. if (!dir)
  783. return -ENOMEM;
  784. }
  785. int ret = open_namei(dir, NULL, path, O_CREAT|O_EXCL|O_DIRECTORY, 0700,
  786. NULL);
  787. if (ret < 0)
  788. return ret = -EEXIST ? 1 : ret;
  789. if (obj)
  790. *((struct shim_handle **) obj) = dir;
  791. return 0;
  792. }
  793. static int open_file (const char * path, void * obj)
  794. {
  795. struct shim_handle * file = NULL;
  796. if (obj) {
  797. file = get_new_handle();
  798. if (!file)
  799. return -ENOMEM;
  800. }
  801. int ret = open_namei(file, NULL, path, O_CREAT|O_EXCL|O_RDWR, 0600,
  802. NULL);
  803. if (ret < 0)
  804. return ret = -EEXIST ? 1 : ret;
  805. if (obj)
  806. *((struct shim_handle **) obj) = file;
  807. return 0;
  808. }
  809. static int open_pal_handle (const char * uri, void * obj)
  810. {
  811. PAL_HANDLE hdl;
  812. if (strpartcmp_static(uri, "dev:"))
  813. hdl = DkStreamOpen(uri, 0,
  814. PAL_SHARE_OWNER_X|PAL_SHARE_OWNER_W|
  815. PAL_SHARE_OWNER_R,
  816. PAL_CREATE_TRY|PAL_CREATE_ALWAYS,
  817. 0);
  818. else
  819. hdl = DkStreamOpen(uri, PAL_ACCESS_RDWR,
  820. PAL_SHARE_OWNER_W|PAL_SHARE_OWNER_R,
  821. PAL_CREATE_TRY|PAL_CREATE_ALWAYS,
  822. 0);
  823. if (!hdl) {
  824. if (PAL_NATIVE_ERRNO == PAL_ERROR_STREAMEXIST)
  825. return 0;
  826. else
  827. return -PAL_ERRNO;
  828. }
  829. if (obj) {
  830. *((PAL_HANDLE *) obj) = hdl;
  831. } else {
  832. DkObjectClose(hdl);
  833. }
  834. return 0;
  835. }
  836. static int output_path (char * path, size_t size, const void * id,
  837. struct shim_qstr * qstr)
  838. {
  839. size_t len = strlen(path);
  840. // API compatibility
  841. __UNUSED(size);
  842. __UNUSED(id);
  843. if (qstr)
  844. qstrsetstr(qstr, path, len);
  845. return len;
  846. }
  847. int create_dir (const char * prefix, char * path, size_t size,
  848. struct shim_handle ** hdl)
  849. {
  850. unsigned int suffix;
  851. if (prefix) {
  852. size_t len = strlen(prefix);
  853. if (len >= size)
  854. return -ERANGE;
  855. memcpy(path, prefix, len + 1);
  856. }
  857. return create_unique(&name_path, &open_dir, &output_path, path, size,
  858. &suffix, hdl, NULL);
  859. }
  860. int create_file (const char * prefix, char * path, size_t size,
  861. struct shim_handle ** hdl)
  862. {
  863. unsigned int suffix;
  864. if (prefix) {
  865. size_t len = strlen(prefix);
  866. if (len >= size)
  867. return -ERANGE;
  868. memcpy(path, prefix, len + 1);
  869. }
  870. return create_unique(&name_path, &open_file, &output_path, path, size,
  871. &suffix, hdl, NULL);
  872. }
  873. int create_handle (const char * prefix, char * uri, size_t size,
  874. PAL_HANDLE * hdl, unsigned int * id)
  875. {
  876. unsigned int suffix;
  877. if (prefix) {
  878. size_t len = strlen(prefix);
  879. if (len >= size)
  880. return -ERANGE;
  881. memcpy(uri, prefix, len + 1);
  882. }
  883. return create_unique(&name_path, &open_pal_handle, &output_path, uri, size,
  884. id ? : &suffix, hdl, NULL);
  885. }
  886. void check_stack_hook (void)
  887. {
  888. struct shim_thread * cur_thread = get_cur_thread();
  889. void * rsp;
  890. __asm__ volatile ("movq %%rsp, %0" : "=r"(rsp) :: "memory");
  891. if (rsp <= cur_thread->stack_top && rsp > cur_thread->stack) {
  892. if ((uintptr_t) rsp - (uintptr_t) cur_thread->stack < PAL_CB(pagesize))
  893. SYS_PRINTF("*** stack is almost drained (RSP = %p, stack = %p-%p) ***\n",
  894. rsp, cur_thread->stack, cur_thread->stack_top);
  895. } else {
  896. SYS_PRINTF("*** context dismatched with thread stack (RSP = %p, stack = %p-%p) ***\n",
  897. rsp, cur_thread->stack, cur_thread->stack_top);
  898. }
  899. }
  900. #ifdef PROFILE
  901. static void print_profile_result (PAL_HANDLE hdl, struct shim_profile * root,
  902. int level)
  903. {
  904. unsigned long total_interval_time = 0;
  905. unsigned long total_interval_count = 0;
  906. for (size_t i = 0 ; i < N_PROFILE ; i++) {
  907. struct shim_profile * profile = &PROFILES[i];
  908. if (profile->root != root || profile->disabled)
  909. continue;
  910. switch (profile->type) {
  911. case OCCURENCE: {
  912. unsigned int count =
  913. atomic_read(&profile->val.occurence.count);
  914. if (count) {
  915. for (int j = 0 ; j < level ; j++)
  916. __SYS_FPRINTF(hdl, " ");
  917. __SYS_FPRINTF(hdl, "- %s: %u times\n", profile->name, count);
  918. }
  919. break;
  920. }
  921. case INTERVAL: {
  922. unsigned int count =
  923. atomic_read(&profile->val.interval.count);
  924. if (count) {
  925. unsigned long time =
  926. atomic_read(&profile->val.interval.time);
  927. unsigned long ind_time = time / count;
  928. total_interval_time += time;
  929. total_interval_count += count;
  930. for (int j = 0 ; j < level ; j++)
  931. __SYS_FPRINTF(hdl, " ");
  932. __SYS_FPRINTF(hdl, "- (%11.11lu) %s: %u times, %lu msec\n",
  933. time, profile->name, count, ind_time);
  934. }
  935. break;
  936. }
  937. case CATEGORY:
  938. for (int j = 0 ; j < level ; j++)
  939. __SYS_FPRINTF(hdl, " ");
  940. __SYS_FPRINTF(hdl, "- %s:\n", profile->name);
  941. print_profile_result(hdl, profile, level + 1);
  942. break;
  943. }
  944. }
  945. if (total_interval_count) {
  946. __SYS_FPRINTF(hdl, " - (%11.11lu) total: %lu times, %lu msec\n",
  947. total_interval_time, total_interval_count,
  948. total_interval_time / total_interval_count);
  949. }
  950. }
  951. #endif /* PROFILE */
  952. static struct atomic_int in_terminate = { .counter = 0, };
  953. noreturn void shim_terminate (int err)
  954. {
  955. debug("teminating the whole process (%d)\n", err);
  956. /* do last clean-up of the process */
  957. shim_clean(err);
  958. DkProcessExit(err);
  959. }
  960. /* cleanup and terminate process, preserve exit code if err == 0 */
  961. int shim_clean (int err)
  962. {
  963. /* preventing multiple cleanup, this is mostly caused by
  964. assertion in shim_clean */
  965. if (atomic_inc_return(&in_terminate) > 1)
  966. return 0;
  967. if (err != 0)
  968. cur_process.exit_code = err;
  969. store_all_msg_persist();
  970. #ifdef PROFILE
  971. if (ENTER_TIME) {
  972. switch (shim_get_tls()->context.orig_rax) {
  973. case __NR_exit_group:
  974. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit_group, ENTER_TIME);
  975. break;
  976. case __NR_exit:
  977. SAVE_PROFILE_INTERVAL_SINCE(syscall_exit, ENTER_TIME);
  978. break;
  979. }
  980. }
  981. if (ipc_cld_profile_send()) {
  982. MASTER_LOCK();
  983. PAL_HANDLE hdl = __open_shim_stdio();
  984. if (hdl) {
  985. __SYS_FPRINTF(hdl, "******************************\n");
  986. __SYS_FPRINTF(hdl, "profiling:\n");
  987. print_profile_result(hdl, &profile_root, 0);
  988. __SYS_FPRINTF(hdl, "******************************\n");
  989. }
  990. MASTER_UNLOCK();
  991. DkObjectClose(hdl);
  992. }
  993. #endif
  994. del_all_ipc_ports();
  995. if (shim_stdio && shim_stdio != (PAL_HANDLE) -1)
  996. DkObjectClose(shim_stdio);
  997. shim_stdio = NULL;
  998. debug("process %u exited with status %d\n", cur_process.vmid & 0xFFFF, cur_process.exit_code);
  999. MASTER_LOCK();
  1000. DkProcessExit(cur_process.exit_code);
  1001. return 0;
  1002. }
  1003. int message_confirm (const char * message, const char * options)
  1004. {
  1005. char answer;
  1006. int noptions = strlen(options);
  1007. char * option_str = __alloca(noptions * 2 + 3), * str = option_str;
  1008. int ret = 0;
  1009. *(str++) = ' ';
  1010. *(str++) = '[';
  1011. for (int i = 0 ; i < noptions ; i++) {
  1012. *(str++) = options[i];
  1013. *(str++) = '/';
  1014. }
  1015. str--;
  1016. *(str++) = ']';
  1017. *(str++) = ' ';
  1018. MASTER_LOCK();
  1019. PAL_HANDLE hdl = __open_shim_stdio();
  1020. if (!hdl) {
  1021. MASTER_UNLOCK();
  1022. return -EACCES;
  1023. }
  1024. #define WRITE(buf, len) \
  1025. ({ int _ret = DkStreamWrite(hdl, 0, len, (void*)(buf), NULL); \
  1026. _ret ? : -PAL_ERRNO; })
  1027. #define READ(buf, len) \
  1028. ({ int _ret = DkStreamRead(hdl, 0, len, buf, NULL, 0); \
  1029. _ret ? : -PAL_ERRNO; })
  1030. if ((ret = WRITE(message, strlen(message))) < 0)
  1031. goto out;
  1032. if ((ret = WRITE(option_str, noptions * 2 + 3)) < 0)
  1033. goto out;
  1034. if ((ret = READ(&answer, 1)) < 0)
  1035. goto out;
  1036. out:
  1037. DkObjectClose(hdl);
  1038. MASTER_UNLOCK();
  1039. return (ret < 0) ? ret : answer;
  1040. }