shim_exec.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_exec.c
  15. *
  16. * Implementation of system call "execve".
  17. */
  18. #include <shim_internal.h>
  19. #include <shim_table.h>
  20. #include <shim_thread.h>
  21. #include <shim_fs.h>
  22. #include <shim_ipc.h>
  23. #include <shim_profile.h>
  24. #include <pal.h>
  25. #include <pal_error.h>
  26. #include <errno.h>
  27. #include <linux/futex.h>
  28. #include <sys/syscall.h>
  29. #include <sys/mman.h>
  30. #include <asm/prctl.h>
  31. static int close_on_exec (struct shim_fd_handle * fd_hdl,
  32. struct shim_handle_map * map)
  33. {
  34. if (fd_hdl->flags & FD_CLOEXEC) {
  35. struct shim_handle * hdl = __detach_fd_handle(fd_hdl, NULL, map);
  36. put_handle(hdl);
  37. }
  38. return 0;
  39. }
  40. static int close_cloexec_handle (struct shim_handle_map * map)
  41. {
  42. return walk_handle_map(&close_on_exec, map);
  43. }
  44. DEFINE_PROFILE_CATEGORY(exec_rtld, exec);
  45. DEFINE_PROFILE_INTERVAL(alloc_new_stack_for_exec, exec_rtld);
  46. DEFINE_PROFILE_INTERVAL(arrange_arguments_for_exec, exec_rtld);
  47. DEFINE_PROFILE_INTERVAL(unmap_executable_for_exec, exec_rtld);
  48. DEFINE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec, exec_rtld);
  49. DEFINE_PROFILE_INTERVAL(unmap_all_vmas_for_exec, exec_rtld);
  50. DEFINE_PROFILE_INTERVAL(load_new_executable_for_exec, exec_rtld);
  51. int init_brk_from_executable (struct shim_handle * exec);
  52. struct execve_rtld_arg
  53. {
  54. void * old_stack_top;
  55. void * old_stack;
  56. void * old_stack_red;
  57. const char ** new_argp;
  58. int * new_argcp;
  59. elf_auxv_t * new_auxp;
  60. };
  61. noreturn static void __shim_do_execve_rtld (struct execve_rtld_arg * __arg)
  62. {
  63. struct execve_rtld_arg arg;
  64. memcpy(&arg, __arg, sizeof(arg));
  65. void * old_stack_top = arg.old_stack_top;
  66. void * old_stack = arg.old_stack;
  67. void * old_stack_red = arg.old_stack_red;
  68. const char ** new_argp = arg.new_argp;
  69. int * new_argcp = arg.new_argcp;
  70. elf_auxv_t * new_auxp = arg.new_auxp;
  71. struct shim_thread * cur_thread = get_cur_thread();
  72. int ret = 0;
  73. #ifdef SHIM_TCB_USE_GS
  74. /* libc tcb is not needed because PAL provides storage for shim_tcb */
  75. __libc_tcb_t* tcb = NULL;
  76. #else
  77. # define LIBC_TCB_ALLOC_SIZE (sizeof(__libc_tcb_t) + __alignof__(__libc_tcb_t))
  78. __libc_tcb_t* tcb = ALIGN_UP_PTR(
  79. cur_thread->stack_top - LIBC_TCB_ALLOC_SIZE,
  80. __alignof__(*tcb));
  81. memset(tcb, 0, sizeof(*tcb));
  82. #endif
  83. populate_tls(tcb, false);
  84. debug("set tcb to %p\n", tcb);
  85. UPDATE_PROFILE_INTERVAL();
  86. DkVirtualMemoryFree(old_stack, old_stack_top - old_stack);
  87. DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red);
  88. if (bkeep_munmap(old_stack, old_stack_top - old_stack, 0) < 0 ||
  89. bkeep_munmap(old_stack_red, old_stack - old_stack_red, 0) < 0)
  90. BUG();
  91. remove_loaded_libraries();
  92. clean_link_map_list();
  93. SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec);
  94. reset_brk();
  95. size_t count = DEFAULT_VMA_COUNT;
  96. struct shim_vma_val * vmas = malloc(sizeof(struct shim_vma_val) * count);
  97. if (!vmas) {
  98. ret = -ENOMEM;
  99. goto error;
  100. }
  101. retry_dump_vmas:
  102. ret = dump_all_vmas(vmas, count);
  103. if (ret == -EOVERFLOW) {
  104. struct shim_vma_val * new_vmas
  105. = malloc(sizeof(struct shim_vma_val) * count * 2);
  106. if (!new_vmas) {
  107. free(vmas);
  108. ret = -ENOMEM;
  109. goto error;
  110. }
  111. free(vmas);
  112. vmas = new_vmas;
  113. count *= 2;
  114. goto retry_dump_vmas;
  115. }
  116. if (ret < 0) {
  117. free(vmas);
  118. goto error;
  119. }
  120. count = ret;
  121. for (struct shim_vma_val * vma = vmas ; vma < vmas + count ; vma++) {
  122. /* Don't free the current stack */
  123. if (vma->addr == cur_thread->stack)
  124. continue;
  125. /* Free all the mapped VMAs */
  126. if (!(vma->flags & VMA_UNMAPPED))
  127. DkVirtualMemoryFree(vma->addr, vma->length);
  128. /* Remove the VMAs */
  129. bkeep_munmap(vma->addr, vma->length, vma->flags);
  130. }
  131. free_vma_val_array(vmas, count);
  132. SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec);
  133. if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0)
  134. goto error;
  135. if ((ret = init_brk_from_executable(cur_thread->exec)) < 0)
  136. goto error;
  137. load_elf_interp(cur_thread->exec);
  138. SAVE_PROFILE_INTERVAL(load_new_executable_for_exec);
  139. cur_thread->robust_list = NULL;
  140. #ifdef PROFILE
  141. if (ENTER_TIME)
  142. SAVE_PROFILE_INTERVAL_SINCE(syscall_execve, ENTER_TIME);
  143. #endif
  144. debug("execve: start execution\n");
  145. execute_elf_object(cur_thread->exec, new_argcp, new_argp, new_auxp);
  146. /* NOTREACHED */
  147. error:
  148. debug("execve: failed %d\n", ret);
  149. shim_terminate(ret);
  150. }
  151. static int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv,
  152. const char ** envp)
  153. {
  154. BEGIN_PROFILE_INTERVAL();
  155. struct shim_thread * cur_thread = get_cur_thread();
  156. int ret;
  157. if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0)
  158. return ret;
  159. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  160. put_handle(cur_thread->exec);
  161. get_handle(hdl);
  162. cur_thread->exec = hdl;
  163. void * old_stack_top = cur_thread->stack_top;
  164. void * old_stack = cur_thread->stack;
  165. void * old_stack_red = cur_thread->stack_red;
  166. cur_thread->stack_top = NULL;
  167. cur_thread->stack = NULL;
  168. cur_thread->stack_red = NULL;
  169. initial_envp = NULL;
  170. int new_argc = 0;
  171. for (const char ** a = argv ; *a ; a++, new_argc++);
  172. int * new_argcp = &new_argc;
  173. const char ** new_argp;
  174. elf_auxv_t * new_auxp;
  175. #ifdef SHIM_TCB_USE_GS
  176. size_t reserve = 0;
  177. #else
  178. /* reserve __libc_tcb_t for startup use. see __shim_do_execve_rtld() */
  179. size_t reserve = LIBC_TCB_ALLOC_SIZE;
  180. #endif
  181. if ((ret = init_stack(argv, envp, &new_argcp, &new_argp, &new_auxp,
  182. reserve)) < 0)
  183. return ret;
  184. __disable_preempt(shim_get_tls()); // Temporarily disable preemption
  185. // during execve().
  186. SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec);
  187. struct execve_rtld_arg arg = {
  188. .old_stack_top = old_stack_top,
  189. .old_stack = old_stack,
  190. .old_stack_red = old_stack_red,
  191. .new_argp = new_argp,
  192. .new_argcp = new_argcp,
  193. .new_auxp = new_auxp
  194. };
  195. __SWITCH_STACK(new_argcp, &__shim_do_execve_rtld, &arg);
  196. return 0;
  197. }
  198. #include <shim_checkpoint.h>
  199. DEFINE_PROFILE_CATEGORY(exec, );
  200. DEFINE_PROFILE_INTERVAL(search_and_check_file_for_exec, exec);
  201. DEFINE_PROFILE_INTERVAL(open_file_for_exec, exec);
  202. DEFINE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec, exec);
  203. /* thread is cur_thread stripped off stack & tcb (see below func);
  204. * process is new process which is forked and waits for checkpoint. */
  205. static int migrate_execve (struct shim_cp_store * cpstore,
  206. struct shim_thread * thread,
  207. struct shim_process * process, va_list ap)
  208. {
  209. struct shim_handle_map * handle_map;
  210. const char ** envp = va_arg(ap, const char **);
  211. int ret;
  212. BEGIN_PROFILE_INTERVAL();
  213. if ((ret = dup_handle_map(&handle_map, thread->handle_map)) < 0)
  214. return ret;
  215. set_handle_map(thread, handle_map);
  216. if ((ret = close_cloexec_handle(handle_map)) < 0)
  217. return ret;
  218. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  219. /* Now we start to migrate bookkeeping for exec.
  220. The data we need to migrate are:
  221. 1. cur_threadrent thread
  222. 2. cur_threadrent filesystem
  223. 3. handle mapping
  224. 4. each handle */
  225. BEGIN_MIGRATION_DEF(execve,
  226. struct shim_thread * thread,
  227. struct shim_process * proc,
  228. const char ** envp)
  229. {
  230. DEFINE_MIGRATE(process, proc, sizeof(struct shim_process));
  231. DEFINE_MIGRATE(all_mounts, NULL, 0);
  232. DEFINE_MIGRATE(running_thread, thread, sizeof(struct shim_thread));
  233. DEFINE_MIGRATE(handle_map, thread->handle_map,
  234. sizeof (struct shim_handle_map));
  235. DEFINE_MIGRATE(migratable, NULL, 0);
  236. DEFINE_MIGRATE(environ, envp, 0);
  237. }
  238. END_MIGRATION_DEF(execve)
  239. return START_MIGRATE(cpstore, execve, thread, process, envp);
  240. }
  241. int shim_do_execve (const char * file, const char ** argv,
  242. const char ** envp)
  243. {
  244. struct shim_thread * cur_thread = get_cur_thread();
  245. struct shim_dentry * dent = NULL;
  246. int ret = 0, argc = 0;
  247. if (test_user_string(file))
  248. return -EFAULT;
  249. for (const char** a = argv; /* no condition*/; a++, argc++) {
  250. if (test_user_memory(a, sizeof(*a), false))
  251. return -EFAULT;
  252. if (*a == NULL)
  253. break;
  254. if (test_user_string(*a))
  255. return -EFAULT;
  256. }
  257. if (!envp)
  258. envp = initial_envp;
  259. for (const char** e = envp; /* no condition*/; e++) {
  260. if (test_user_memory(e, sizeof(*e), false))
  261. return -EFAULT;
  262. if (*e == NULL)
  263. break;
  264. if (test_user_string(*e))
  265. return -EFAULT;
  266. }
  267. BEGIN_PROFILE_INTERVAL();
  268. DEFINE_LIST(sharg);
  269. struct sharg {
  270. LIST_TYPE(sharg) list;
  271. int len;
  272. char arg[0];
  273. };
  274. DEFINE_LISTP(sharg);
  275. LISTP_TYPE(sharg) shargs;
  276. INIT_LISTP(&shargs);
  277. reopen:
  278. /* XXX: Not sure what to do here yet */
  279. assert(cur_thread);
  280. if ((ret = path_lookupat(NULL, file, LOOKUP_OPEN, &dent, NULL)) < 0)
  281. return ret;
  282. struct shim_mount * fs = dent->fs;
  283. get_dentry(dent);
  284. if (!fs->d_ops->open) {
  285. ret = -EACCES;
  286. err:
  287. put_dentry(dent);
  288. return ret;
  289. }
  290. if (fs->d_ops->mode) {
  291. __kernel_mode_t mode;
  292. if ((ret = fs->d_ops->mode(dent, &mode)) < 0)
  293. goto err;
  294. }
  295. SAVE_PROFILE_INTERVAL(search_and_check_file_for_exec);
  296. struct shim_handle * exec = NULL;
  297. if (!(exec = get_new_handle())) {
  298. ret = -ENOMEM;
  299. goto err;
  300. }
  301. set_handle_fs(exec, fs);
  302. exec->flags = O_RDONLY;
  303. exec->acc_mode = MAY_READ;
  304. ret = fs->d_ops->open(exec, dent, O_RDONLY);
  305. if (qstrempty(&exec->uri)) {
  306. put_handle(exec);
  307. return -EACCES;
  308. }
  309. size_t pathlen;
  310. char *path = dentry_get_path(dent, true, &pathlen);
  311. qstrsetstr(&exec->path, path, pathlen);
  312. if ((ret = check_elf_object(exec)) < 0 && ret != -EINVAL) {
  313. put_handle(exec);
  314. return ret;
  315. }
  316. if (ret == -EINVAL) { /* it's a shebang */
  317. LISTP_TYPE(sharg) new_shargs = LISTP_INIT;
  318. struct sharg * next = NULL;
  319. bool ended = false, started = false;
  320. char buf[80];
  321. do {
  322. ret = do_handle_read(exec, buf, 80);
  323. if (ret <= 0)
  324. break;
  325. char * s = buf, * c = buf, * e = buf + ret;
  326. if (!started) {
  327. if (ret < 2 || buf[0] != '#' || buf[1] != '!')
  328. break;
  329. s += 2;
  330. c += 2;
  331. started = true;
  332. }
  333. for (; c < e ; c++) {
  334. if (*c == ' ' || *c == '\n' || c == e - 1) {
  335. int l = (*c == ' ' || * c == '\n') ? c - s : e - s;
  336. if (next) {
  337. struct sharg * sh =
  338. __alloca(sizeof(struct sharg) + next->len + l + 1);
  339. sh->len = next->len + l;
  340. memcpy(sh->arg, next->arg, next->len);
  341. memcpy(sh->arg + next->len, s, l);
  342. sh->arg[next->len + l] = 0;
  343. next = sh;
  344. } else {
  345. next = __alloca(sizeof(struct sharg) + l + 1);
  346. next->len = l;
  347. memcpy(next->arg, s, l);
  348. next->arg[l] = 0;
  349. }
  350. if (*c == ' ' || *c == '\n') {
  351. INIT_LIST_HEAD(next, list);
  352. LISTP_ADD_TAIL(next, &new_shargs, list);
  353. next = NULL;
  354. s = c + 1;
  355. if (*c == '\n') {
  356. ended = true;
  357. break;
  358. }
  359. }
  360. }
  361. }
  362. } while (!ended);
  363. if (started) {
  364. if (next) {
  365. INIT_LIST_HEAD(next, list);
  366. LISTP_ADD_TAIL(next, &new_shargs, list);
  367. }
  368. struct sharg * first =
  369. LISTP_FIRST_ENTRY(&new_shargs, struct sharg, list);
  370. assert(first);
  371. debug("detected as script: run by %s\n", first->arg);
  372. file = first->arg;
  373. LISTP_SPLICE(&new_shargs, &shargs, list, sharg);
  374. put_handle(exec);
  375. goto reopen;
  376. }
  377. }
  378. SAVE_PROFILE_INTERVAL(open_file_for_exec);
  379. #if EXECVE_RTLD == 1
  380. if (!strcmp_static(PAL_CB(host_type), "Linux-SGX")) {
  381. int is_last = check_last_thread(cur_thread) == 0;
  382. if (is_last) {
  383. debug("execve() in the same process\n");
  384. return shim_do_execve_rtld(exec, argv, envp);
  385. }
  386. debug("execve() in a new process\n");
  387. }
  388. #endif
  389. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  390. if (!LISTP_EMPTY(&shargs)) {
  391. struct sharg * sh;
  392. int shargc = 0, cnt = 0;
  393. LISTP_FOR_EACH_ENTRY(sh, &shargs, list)
  394. shargc++;
  395. const char ** new_argv =
  396. __alloca(sizeof(const char *) * (argc + shargc + 1));
  397. LISTP_FOR_EACH_ENTRY(sh, &shargs, list)
  398. new_argv[cnt++] = sh->arg;
  399. for (cnt = 0 ; cnt < argc ; cnt++)
  400. new_argv[shargc + cnt] = argv[cnt];
  401. new_argv[shargc + argc] = NULL;
  402. argv = new_argv;
  403. }
  404. lock(&cur_thread->lock);
  405. put_handle(cur_thread->exec);
  406. cur_thread->exec = exec;
  407. void * stack = cur_thread->stack;
  408. void * stack_top = cur_thread->stack_top;
  409. __libc_tcb_t * tcb = cur_thread->tcb;
  410. bool user_tcb = cur_thread->user_tcb;
  411. void * frameptr = cur_thread->frameptr;
  412. cur_thread->stack = NULL;
  413. cur_thread->stack_top = NULL;
  414. cur_thread->frameptr = NULL;
  415. cur_thread->tcb = NULL;
  416. cur_thread->user_tcb = false;
  417. cur_thread->in_vm = false;
  418. unlock(&cur_thread->lock);
  419. ret = do_migrate_process(&migrate_execve, exec, argv, cur_thread, envp);
  420. lock(&cur_thread->lock);
  421. cur_thread->stack = stack;
  422. cur_thread->stack_top = stack_top;
  423. cur_thread->frameptr = frameptr;
  424. cur_thread->tcb = tcb;
  425. cur_thread->user_tcb = user_tcb;
  426. if (ret < 0) {
  427. /* execve failed, so reanimate this thread as if nothing happened */
  428. cur_thread->in_vm = true;
  429. unlock(&cur_thread->lock);
  430. return ret;
  431. }
  432. /* This "temporary" process must die quietly, not sending any messages
  433. * to not confuse the parent and the execve'ed child */
  434. debug("Temporary process %u exited after emulating execve (by forking new process to replace this one)\n",
  435. cur_process.vmid & 0xFFFF);
  436. MASTER_LOCK();
  437. DkProcessExit(0);
  438. return 0;
  439. }