shim_exec.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_exec.c
  15. *
  16. * Implementation of system call "execve".
  17. */
  18. #include <asm/prctl.h>
  19. #include <errno.h>
  20. #include <linux/futex.h>
  21. #include <sys/mman.h>
  22. #include <sys/syscall.h>
  23. #include <pal.h>
  24. #include <pal_error.h>
  25. #include <shim_fs.h>
  26. #include <shim_internal.h>
  27. #include <shim_ipc.h>
  28. #include <shim_profile.h>
  29. #include <shim_table.h>
  30. #include <shim_thread.h>
  31. /* returns 0 if normalized URIs are the same; assumes file URIs */
  32. static int normalize_and_cmp_uris(const char* uri1, const char* uri2) {
  33. char norm1[STR_SIZE];
  34. char norm2[STR_SIZE];
  35. size_t len;
  36. int ret;
  37. if (!strstartswith_static(uri1, URI_PREFIX_FILE) ||
  38. !strstartswith_static(uri2, URI_PREFIX_FILE))
  39. return -1;
  40. uri1 += URI_PREFIX_FILE_LEN;
  41. len = sizeof(norm1);
  42. ret = get_norm_path(uri1, norm1, &len);
  43. if (ret < 0)
  44. return ret;
  45. uri2 += URI_PREFIX_FILE_LEN;
  46. len = sizeof(norm2);
  47. ret = get_norm_path(uri2, norm2, &len);
  48. if (ret < 0)
  49. return ret;
  50. return memcmp(norm1, norm2, len + 1);
  51. }
  52. static int close_on_exec(struct shim_fd_handle* fd_hdl, struct shim_handle_map* map) {
  53. if (fd_hdl->flags & FD_CLOEXEC) {
  54. struct shim_handle* hdl = __detach_fd_handle(fd_hdl, NULL, map);
  55. put_handle(hdl);
  56. }
  57. return 0;
  58. }
  59. static int close_cloexec_handle(struct shim_handle_map* map) {
  60. return walk_handle_map(&close_on_exec, map);
  61. }
  62. DEFINE_PROFILE_CATEGORY(exec_rtld, exec);
  63. DEFINE_PROFILE_INTERVAL(alloc_new_stack_for_exec, exec_rtld);
  64. DEFINE_PROFILE_INTERVAL(arrange_arguments_for_exec, exec_rtld);
  65. DEFINE_PROFILE_INTERVAL(unmap_executable_for_exec, exec_rtld);
  66. DEFINE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec, exec_rtld);
  67. DEFINE_PROFILE_INTERVAL(unmap_all_vmas_for_exec, exec_rtld);
  68. DEFINE_PROFILE_INTERVAL(load_new_executable_for_exec, exec_rtld);
  69. int init_brk_from_executable(struct shim_handle* exec);
  70. struct execve_rtld_arg {
  71. void* old_stack_top;
  72. void* old_stack;
  73. void* old_stack_red;
  74. const char** new_argp;
  75. int* new_argcp;
  76. elf_auxv_t* new_auxp;
  77. };
  78. noreturn static void __shim_do_execve_rtld(struct execve_rtld_arg* __arg) {
  79. struct execve_rtld_arg arg;
  80. memcpy(&arg, __arg, sizeof(arg));
  81. void* old_stack_top = arg.old_stack_top;
  82. void* old_stack = arg.old_stack;
  83. void* old_stack_red = arg.old_stack_red;
  84. const char** new_argp = arg.new_argp;
  85. int* new_argcp = arg.new_argcp;
  86. elf_auxv_t* new_auxp = arg.new_auxp;
  87. struct shim_thread* cur_thread = get_cur_thread();
  88. int ret = 0;
  89. unsigned long fs_base = 0;
  90. update_fs_base(fs_base);
  91. debug("set fs_base to 0x%lx\n", fs_base);
  92. UPDATE_PROFILE_INTERVAL();
  93. DkVirtualMemoryFree(old_stack, old_stack_top - old_stack);
  94. DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red);
  95. if (bkeep_munmap(old_stack, old_stack_top - old_stack, 0) < 0 ||
  96. bkeep_munmap(old_stack_red, old_stack - old_stack_red, 0) < 0)
  97. BUG();
  98. remove_loaded_libraries();
  99. clean_link_map_list();
  100. SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec);
  101. reset_brk();
  102. size_t count = DEFAULT_VMA_COUNT;
  103. struct shim_vma_val* vmas = malloc(sizeof(struct shim_vma_val) * count);
  104. if (!vmas) {
  105. ret = -ENOMEM;
  106. goto error;
  107. }
  108. retry_dump_vmas:
  109. ret = dump_all_vmas(vmas, count);
  110. if (ret == -EOVERFLOW) {
  111. struct shim_vma_val* new_vmas = malloc(sizeof(struct shim_vma_val) * count * 2);
  112. if (!new_vmas) {
  113. free(vmas);
  114. ret = -ENOMEM;
  115. goto error;
  116. }
  117. free(vmas);
  118. vmas = new_vmas;
  119. count *= 2;
  120. goto retry_dump_vmas;
  121. }
  122. if (ret < 0) {
  123. free(vmas);
  124. goto error;
  125. }
  126. count = ret;
  127. for (struct shim_vma_val* vma = vmas; vma < vmas + count; vma++) {
  128. /* Don't free the current stack */
  129. if (vma->addr == cur_thread->stack)
  130. continue;
  131. /* Free all the mapped VMAs */
  132. if (!(vma->flags & VMA_UNMAPPED))
  133. DkVirtualMemoryFree(vma->addr, vma->length);
  134. /* Remove the VMAs */
  135. bkeep_munmap(vma->addr, vma->length, vma->flags);
  136. }
  137. free_vma_val_array(vmas, count);
  138. SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec);
  139. if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0)
  140. goto error;
  141. if ((ret = init_brk_from_executable(cur_thread->exec)) < 0)
  142. goto error;
  143. load_elf_interp(cur_thread->exec);
  144. SAVE_PROFILE_INTERVAL(load_new_executable_for_exec);
  145. cur_thread->robust_list = NULL;
  146. #ifdef PROFILE
  147. if (ENTER_TIME)
  148. SAVE_PROFILE_INTERVAL_SINCE(syscall_execve, ENTER_TIME);
  149. #endif
  150. debug("execve: start execution\n");
  151. execute_elf_object(cur_thread->exec, new_argcp, new_argp, new_auxp);
  152. /* NOTREACHED */
  153. error:
  154. debug("execve: failed %d\n", ret);
  155. shim_clean_and_exit(ret);
  156. }
  157. static int shim_do_execve_rtld(struct shim_handle* hdl, const char** argv, const char** envp) {
  158. BEGIN_PROFILE_INTERVAL();
  159. struct shim_thread* cur_thread = get_cur_thread();
  160. int ret;
  161. if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0)
  162. return ret;
  163. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  164. put_handle(cur_thread->exec);
  165. get_handle(hdl);
  166. cur_thread->exec = hdl;
  167. void* old_stack_top = cur_thread->stack_top;
  168. void* old_stack = cur_thread->stack;
  169. void* old_stack_red = cur_thread->stack_red;
  170. cur_thread->stack_top = NULL;
  171. cur_thread->stack = NULL;
  172. cur_thread->stack_red = NULL;
  173. initial_envp = NULL;
  174. int new_argc = 0;
  175. for (const char** a = argv; *a; a++, new_argc++)
  176. ;
  177. int* new_argcp = &new_argc;
  178. const char** new_argp;
  179. elf_auxv_t* new_auxp;
  180. if ((ret = init_stack(argv, envp, &new_argcp, &new_argp, &new_auxp)) < 0)
  181. return ret;
  182. __disable_preempt(shim_get_tcb()); // Temporarily disable preemption during execve().
  183. SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec);
  184. struct execve_rtld_arg arg = {
  185. .old_stack_top = old_stack_top,
  186. .old_stack = old_stack,
  187. .old_stack_red = old_stack_red,
  188. .new_argp = new_argp,
  189. .new_argcp = new_argcp,
  190. .new_auxp = new_auxp
  191. };
  192. __SWITCH_STACK(new_argcp, &__shim_do_execve_rtld, &arg);
  193. return 0;
  194. }
  195. #include <shim_checkpoint.h>
  196. DEFINE_PROFILE_CATEGORY(exec, );
  197. DEFINE_PROFILE_INTERVAL(search_and_check_file_for_exec, exec);
  198. DEFINE_PROFILE_INTERVAL(open_file_for_exec, exec);
  199. DEFINE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec, exec);
  200. /* thread is cur_thread stripped off stack & tcb (see below func);
  201. * process is new process which is forked and waits for checkpoint. */
  202. static int migrate_execve(struct shim_cp_store* cpstore, struct shim_thread* thread,
  203. struct shim_process* process, va_list ap) {
  204. struct shim_handle_map* handle_map;
  205. const char** envp = va_arg(ap, const char**);
  206. int ret;
  207. BEGIN_PROFILE_INTERVAL();
  208. if ((ret = dup_handle_map(&handle_map, thread->handle_map)) < 0)
  209. return ret;
  210. set_handle_map(thread, handle_map);
  211. if ((ret = close_cloexec_handle(handle_map)) < 0)
  212. return ret;
  213. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  214. /* Now we start to migrate bookkeeping for exec.
  215. The data we need to migrate are:
  216. 1. cur_threadrent thread
  217. 2. cur_threadrent filesystem
  218. 3. handle mapping
  219. 4. each handle */
  220. BEGIN_MIGRATION_DEF(execve, struct shim_thread* thread, struct shim_process* proc,
  221. const char** envp) {
  222. DEFINE_MIGRATE(process, proc, sizeof(struct shim_process));
  223. DEFINE_MIGRATE(all_mounts, NULL, 0);
  224. DEFINE_MIGRATE(running_thread, thread, sizeof(struct shim_thread));
  225. DEFINE_MIGRATE(handle_map, thread->handle_map, sizeof(struct shim_handle_map));
  226. DEFINE_MIGRATE(migratable, NULL, 0);
  227. DEFINE_MIGRATE(environ, envp, 0);
  228. }
  229. END_MIGRATION_DEF(execve)
  230. return START_MIGRATE(cpstore, execve, thread, process, envp);
  231. }
  232. int shim_do_execve(const char* file, const char** argv, const char** envp) {
  233. struct shim_thread* cur_thread = get_cur_thread();
  234. struct shim_dentry* dent = NULL;
  235. int ret = 0, argc = 0;
  236. if (test_user_string(file))
  237. return -EFAULT;
  238. for (const char** a = argv; /* no condition*/; a++, argc++) {
  239. if (test_user_memory(a, sizeof(*a), false))
  240. return -EFAULT;
  241. if (*a == NULL)
  242. break;
  243. if (test_user_string(*a))
  244. return -EFAULT;
  245. }
  246. if (!envp)
  247. envp = initial_envp;
  248. for (const char** e = envp; /* no condition*/; e++) {
  249. if (test_user_memory(e, sizeof(*e), false))
  250. return -EFAULT;
  251. if (*e == NULL)
  252. break;
  253. if (test_user_string(*e))
  254. return -EFAULT;
  255. }
  256. BEGIN_PROFILE_INTERVAL();
  257. DEFINE_LIST(sharg);
  258. struct sharg {
  259. LIST_TYPE(sharg) list;
  260. int len;
  261. char arg[0];
  262. };
  263. DEFINE_LISTP(sharg);
  264. LISTP_TYPE(sharg) shargs;
  265. INIT_LISTP(&shargs);
  266. reopen:
  267. /* XXX: Not sure what to do here yet */
  268. assert(cur_thread);
  269. if ((ret = path_lookupat(NULL, file, LOOKUP_OPEN, &dent, NULL)) < 0)
  270. return ret;
  271. struct shim_mount* fs = dent->fs;
  272. get_dentry(dent);
  273. if (!fs->d_ops->open) {
  274. ret = -EACCES;
  275. err:
  276. put_dentry(dent);
  277. return ret;
  278. }
  279. if (fs->d_ops->mode) {
  280. __kernel_mode_t mode;
  281. if ((ret = fs->d_ops->mode(dent, &mode)) < 0)
  282. goto err;
  283. }
  284. SAVE_PROFILE_INTERVAL(search_and_check_file_for_exec);
  285. struct shim_handle* exec = NULL;
  286. if (!(exec = get_new_handle())) {
  287. ret = -ENOMEM;
  288. goto err;
  289. }
  290. set_handle_fs(exec, fs);
  291. exec->flags = O_RDONLY;
  292. exec->acc_mode = MAY_READ;
  293. ret = fs->d_ops->open(exec, dent, O_RDONLY);
  294. if (qstrempty(&exec->uri)) {
  295. put_handle(exec);
  296. return -EACCES;
  297. }
  298. size_t pathlen;
  299. char* path = dentry_get_path(dent, true, &pathlen);
  300. qstrsetstr(&exec->path, path, pathlen);
  301. if ((ret = check_elf_object(exec)) < 0 && ret != -EINVAL) {
  302. put_handle(exec);
  303. return ret;
  304. }
  305. if (ret == -EINVAL) { /* it's a shebang */
  306. LISTP_TYPE(sharg) new_shargs = LISTP_INIT;
  307. struct sharg* next = NULL;
  308. bool ended = false, started = false;
  309. char buf[80];
  310. do {
  311. ret = do_handle_read(exec, buf, 80);
  312. if (ret <= 0)
  313. break;
  314. char* s = buf;
  315. char* c = buf;
  316. char* e = buf + ret;
  317. if (!started) {
  318. if (ret < 2 || buf[0] != '#' || buf[1] != '!')
  319. break;
  320. s += 2;
  321. c += 2;
  322. started = true;
  323. }
  324. for (; c < e; c++) {
  325. if (*c == ' ' || *c == '\n' || c == e - 1) {
  326. int l = (*c == ' ' || *c == '\n') ? c - s : e - s;
  327. if (next) {
  328. struct sharg* sh = __alloca(sizeof(struct sharg) + next->len + l + 1);
  329. sh->len = next->len + l;
  330. memcpy(sh->arg, next->arg, next->len);
  331. memcpy(sh->arg + next->len, s, l);
  332. sh->arg[next->len + l] = 0;
  333. next = sh;
  334. } else {
  335. next = __alloca(sizeof(struct sharg) + l + 1);
  336. next->len = l;
  337. memcpy(next->arg, s, l);
  338. next->arg[l] = 0;
  339. }
  340. if (*c == ' ' || *c == '\n') {
  341. INIT_LIST_HEAD(next, list);
  342. LISTP_ADD_TAIL(next, &new_shargs, list);
  343. next = NULL;
  344. s = c + 1;
  345. if (*c == '\n') {
  346. ended = true;
  347. break;
  348. }
  349. }
  350. }
  351. }
  352. } while (!ended);
  353. if (started) {
  354. if (next) {
  355. INIT_LIST_HEAD(next, list);
  356. LISTP_ADD_TAIL(next, &new_shargs, list);
  357. }
  358. struct sharg* first = LISTP_FIRST_ENTRY(&new_shargs, struct sharg, list);
  359. assert(first);
  360. debug("detected as script: run by %s\n", first->arg);
  361. file = first->arg;
  362. LISTP_SPLICE(&new_shargs, &shargs, list, sharg);
  363. put_handle(exec);
  364. goto reopen;
  365. }
  366. }
  367. SAVE_PROFILE_INTERVAL(open_file_for_exec);
  368. bool use_same_process = check_last_thread(cur_thread) == 0;
  369. if (use_same_process && !strcmp_static(PAL_CB(host_type), "Linux-SGX")) {
  370. /* for SGX PALs, can use same process only if it is the same executable (because a different
  371. * executable has a different measurement and thus requires a new enclave); this special
  372. * case is to correctly handle e.g. Bash process replacing itself */
  373. assert(cur_thread->exec);
  374. if (normalize_and_cmp_uris(qstrgetstr(&cur_thread->exec->uri), qstrgetstr(&exec->uri))) {
  375. /* it is not the same executable, definitely cannot use same process */
  376. use_same_process = false;
  377. }
  378. }
  379. if (use_same_process) {
  380. debug("execve() in the same process\n");
  381. return shim_do_execve_rtld(exec, argv, envp);
  382. }
  383. debug("execve() in a new process\n");
  384. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  385. if (!LISTP_EMPTY(&shargs)) {
  386. struct sharg* sh;
  387. int shargc = 0, cnt = 0;
  388. LISTP_FOR_EACH_ENTRY(sh, &shargs, list) {
  389. shargc++;
  390. }
  391. const char** new_argv = __alloca(sizeof(const char*) * (argc + shargc + 1));
  392. LISTP_FOR_EACH_ENTRY(sh, &shargs, list) {
  393. new_argv[cnt++] = sh->arg;
  394. }
  395. for (cnt = 0; cnt < argc; cnt++)
  396. new_argv[shargc + cnt] = argv[cnt];
  397. new_argv[shargc + argc] = NULL;
  398. argv = new_argv;
  399. }
  400. lock(&cur_thread->lock);
  401. put_handle(cur_thread->exec);
  402. cur_thread->exec = exec;
  403. void* stack = cur_thread->stack;
  404. void* stack_top = cur_thread->stack_top;
  405. shim_tcb_t* shim_tcb = cur_thread->shim_tcb;
  406. void* frameptr = cur_thread->frameptr;
  407. cur_thread->stack = NULL;
  408. cur_thread->stack_top = NULL;
  409. cur_thread->frameptr = NULL;
  410. cur_thread->shim_tcb = NULL;
  411. cur_thread->in_vm = false;
  412. unlock(&cur_thread->lock);
  413. ret = do_migrate_process(&migrate_execve, exec, argv, cur_thread, envp);
  414. lock(&cur_thread->lock);
  415. cur_thread->stack = stack;
  416. cur_thread->stack_top = stack_top;
  417. cur_thread->frameptr = frameptr;
  418. cur_thread->shim_tcb = shim_tcb;
  419. if (ret < 0) {
  420. /* execve failed, so reanimate this thread as if nothing happened */
  421. cur_thread->in_vm = true;
  422. unlock(&cur_thread->lock);
  423. return ret;
  424. }
  425. /* This "temporary" process must die quietly, not sending any messages to not confuse the parent
  426. * and the execve'ed child */
  427. debug(
  428. "Temporary process %u exited after emulating execve (by forking new process to replace this"
  429. " one)\n",
  430. cur_process.vmid & 0xFFFF);
  431. MASTER_LOCK();
  432. DkProcessExit(0);
  433. return 0;
  434. }