shim_exec.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_epoll.c
  17. *
  18. * Implementation of system call "execve".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_thread.h>
  23. #include <shim_fs.h>
  24. #include <shim_ipc.h>
  25. #include <shim_profile.h>
  26. #include <pal.h>
  27. #include <pal_error.h>
  28. #include <errno.h>
  29. #include <linux/futex.h>
  30. #include <sys/syscall.h>
  31. #include <sys/mman.h>
  32. #include <asm/prctl.h>
  33. static int close_on_exec (struct shim_fd_handle * fd_hdl,
  34. struct shim_handle_map * map, void * arg)
  35. {
  36. if (fd_hdl->flags & FD_CLOEXEC) {
  37. struct shim_handle * hdl = __detach_fd_handle(fd_hdl, NULL, map);
  38. close_handle(hdl);
  39. }
  40. return 0;
  41. }
  42. static int close_cloexec_handle (struct shim_handle_map * map)
  43. {
  44. return walk_handle_map(&close_on_exec, map, NULL);
  45. }
  46. DEFINE_PROFILE_CATAGORY(exec_rtld, exec);
  47. DEFINE_PROFILE_INTERVAL(alloc_new_stack_for_exec, exec_rtld);
  48. DEFINE_PROFILE_INTERVAL(arrange_arguments_for_exec, exec_rtld);
  49. DEFINE_PROFILE_INTERVAL(unmap_executable_for_exec, exec_rtld);
  50. DEFINE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec, exec_rtld);
  51. DEFINE_PROFILE_INTERVAL(unmap_all_vmas_for_exec, exec_rtld);
  52. DEFINE_PROFILE_INTERVAL(load_new_executable_for_exec, exec_rtld);
  53. static void * old_stack_top, * old_stack, * old_stack_red;
  54. static const char ** new_argp;
  55. static int new_argc;
  56. static int * new_argcp;
  57. static elf_auxv_t * new_auxp;
  58. #define REQUIRED_ELF_AUXV 6
  59. int init_brk_from_executable (struct shim_handle * exec);
  60. int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv,
  61. const char ** envp)
  62. {
  63. BEGIN_PROFILE_INTERVAL();
  64. struct shim_thread * cur_thread = get_cur_thread();
  65. int ret;
  66. if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0)
  67. return ret;
  68. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  69. void * tcb = malloc(sizeof(__libc_tcb_t));
  70. if (!tcb)
  71. return -ENOMEM;
  72. populate_tls(tcb, false);
  73. __disable_preempt(&((__libc_tcb_t *) tcb)->shim_tcb); // Temporarily disable preemption
  74. // during execve().
  75. debug("set tcb to %p\n", tcb);
  76. put_handle(cur_thread->exec);
  77. get_handle(hdl);
  78. cur_thread->exec = hdl;
  79. old_stack_top = cur_thread->stack_top;
  80. old_stack = cur_thread->stack;
  81. old_stack_red = cur_thread->stack_red;
  82. cur_thread->stack_top = NULL;
  83. cur_thread->stack = NULL;
  84. cur_thread->stack_red = NULL;
  85. initial_envp = NULL;
  86. new_argc = 0;
  87. for (const char ** a = argv ; *a ; a++, new_argc++);
  88. new_argcp = &new_argc;
  89. if ((ret = init_stack(argv, envp, &new_argcp, &new_argp,
  90. REQUIRED_ELF_AUXV, &new_auxp)) < 0)
  91. return ret;
  92. SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec);
  93. switch_stack(new_argp);
  94. cur_thread = get_cur_thread();
  95. UPDATE_PROFILE_INTERVAL();
  96. DkVirtualMemoryFree(old_stack, old_stack_top - old_stack);
  97. DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red);
  98. if (bkeep_munmap(old_stack, old_stack_top - old_stack, 0) < 0 ||
  99. bkeep_munmap(old_stack_red, old_stack - old_stack_red, 0) < 0)
  100. bug();
  101. remove_loaded_libraries();
  102. clean_link_map_list();
  103. SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec);
  104. reset_brk();
  105. size_t count = DEFAULT_VMA_COUNT;
  106. struct shim_vma_val * vmas = malloc(sizeof(struct shim_vma_val) * count);
  107. if (!vmas)
  108. return -ENOMEM;
  109. retry_dump_vmas:
  110. ret = dump_all_vmas(vmas, count);
  111. if (ret == -EOVERFLOW) {
  112. struct shim_vma_val * new_vmas
  113. = malloc(sizeof(struct shim_vma_val) * count * 2);
  114. if (!new_vmas) {
  115. free(vmas);
  116. return -ENOMEM;
  117. }
  118. free(vmas);
  119. vmas = new_vmas;
  120. count *= 2;
  121. goto retry_dump_vmas;
  122. }
  123. if (ret < 0) {
  124. free(vmas);
  125. return ret;
  126. }
  127. count = ret;
  128. for (struct shim_vma_val * vma = vmas ; vma < vmas + count ; vma++) {
  129. /* Don't free the current stack */
  130. if (vma->addr == cur_thread->stack)
  131. continue;
  132. /* Free all the mapped VMAs */
  133. if (!(vma->flags & VMA_UNMAPPED))
  134. DkVirtualMemoryFree(vma->addr, vma->length);
  135. /* Remove the VMAs */
  136. bkeep_munmap(vma->addr, vma->length, vma->flags);
  137. }
  138. free_vma_val_array(vmas, count);
  139. SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec);
  140. if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0)
  141. shim_terminate();
  142. init_brk_from_executable(cur_thread->exec);
  143. load_elf_interp(cur_thread->exec);
  144. SAVE_PROFILE_INTERVAL(load_new_executable_for_exec);
  145. cur_thread->robust_list = NULL;
  146. #ifdef PROFILE
  147. if (ENTER_TIME)
  148. SAVE_PROFILE_INTERVAL_SINCE(syscall_execve, ENTER_TIME);
  149. #endif
  150. debug("execve: start execution\n");
  151. execute_elf_object(cur_thread->exec, new_argcp, new_argp,
  152. REQUIRED_ELF_AUXV, new_auxp);
  153. return 0;
  154. }
  155. #include <shim_checkpoint.h>
  156. DEFINE_PROFILE_CATAGORY(exec, );
  157. DEFINE_PROFILE_INTERVAL(search_and_check_file_for_exec, exec);
  158. DEFINE_PROFILE_INTERVAL(open_file_for_exec, exec);
  159. DEFINE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec, exec);
  160. static int migrate_execve (struct shim_cp_store * cpstore,
  161. struct shim_thread * thread,
  162. struct shim_process * process, va_list ap)
  163. {
  164. struct shim_handle_map * handle_map;
  165. const char ** envp = va_arg(ap, const char **);
  166. int ret;
  167. BEGIN_PROFILE_INTERVAL();
  168. if ((ret = dup_handle_map(&handle_map, thread->handle_map)) < 0)
  169. return ret;
  170. set_handle_map(thread, handle_map);
  171. if ((ret = close_cloexec_handle(handle_map)) < 0)
  172. return ret;
  173. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  174. /* Now we start to migrate bookkeeping for exec.
  175. The data we need to migrate are:
  176. 1. cur_threadrent thread
  177. 2. cur_threadrent filesystem
  178. 3. handle mapping
  179. 4. each handle */
  180. BEGIN_MIGRATION_DEF(execve,
  181. struct shim_thread * thread,
  182. struct shim_process * proc,
  183. const char ** envp)
  184. {
  185. DEFINE_MIGRATE(process, proc, sizeof(struct shim_process));
  186. DEFINE_MIGRATE(all_mounts, NULL, 0);
  187. DEFINE_MIGRATE(running_thread, thread, sizeof(struct shim_thread));
  188. DEFINE_MIGRATE(handle_map, thread->handle_map,
  189. sizeof (struct shim_handle_map));
  190. DEFINE_MIGRATE(migratable, NULL, 0);
  191. DEFINE_MIGRATE(environ, envp, 0);
  192. }
  193. END_MIGRATION_DEF(execve)
  194. return START_MIGRATE(cpstore, execve, thread, process, envp);
  195. }
  196. int shim_do_execve (const char * file, const char ** argv,
  197. const char ** envp)
  198. {
  199. struct shim_thread * cur_thread = get_cur_thread();
  200. struct shim_dentry * dent = NULL;
  201. int ret = 0, argc = 0;
  202. for (const char ** a = argv ; *a ; a++, argc++);
  203. if (!envp)
  204. envp = initial_envp;
  205. BEGIN_PROFILE_INTERVAL();
  206. DEFINE_LIST(sharg);
  207. struct sharg {
  208. LIST_TYPE(sharg) list;
  209. int len;
  210. char arg[0];
  211. };
  212. DEFINE_LISTP(sharg);
  213. LISTP_TYPE(sharg) shargs;
  214. INIT_LISTP(&shargs);
  215. reopen:
  216. /* XXX: Not sure what to do here yet */
  217. assert(cur_thread);
  218. if ((ret = path_lookupat(NULL, file, LOOKUP_OPEN, &dent, NULL)) < 0)
  219. return ret;
  220. struct shim_mount * fs = dent->fs;
  221. get_dentry(dent);
  222. if (!fs->d_ops->open) {
  223. ret = -EACCES;
  224. err:
  225. put_dentry(dent);
  226. return ret;
  227. }
  228. if (fs->d_ops->mode) {
  229. __kernel_mode_t mode;
  230. if ((ret = fs->d_ops->mode(dent, &mode, 1)) < 0)
  231. goto err;
  232. }
  233. SAVE_PROFILE_INTERVAL(search_and_check_file_for_exec);
  234. struct shim_handle * exec = NULL;
  235. if (!(exec = get_new_handle())) {
  236. ret = -ENOMEM;
  237. goto err;
  238. }
  239. set_handle_fs(exec, fs);
  240. exec->flags = O_RDONLY;
  241. exec->acc_mode = MAY_READ;
  242. ret = fs->d_ops->open(exec, dent, O_RDONLY);
  243. if (qstrempty(&exec->uri)) {
  244. put_handle(exec);
  245. return -EACCES;
  246. }
  247. int pathlen;
  248. char *path = dentry_get_path(dent, true, &pathlen);
  249. qstrsetstr(&exec->path, path, pathlen);
  250. if ((ret = check_elf_object(exec)) < 0 && ret != -EINVAL) {
  251. put_handle(exec);
  252. return ret;
  253. }
  254. if (ret == -EINVAL) { /* it's a shebang */
  255. LISTP_TYPE(sharg) new_shargs = LISTP_INIT;
  256. struct sharg * next = NULL;
  257. bool ended = false, started = false;
  258. char buf[80];
  259. do {
  260. ret = do_handle_read(exec, buf, 80);
  261. if (ret <= 0)
  262. break;
  263. char * s = buf, * c = buf, * e = buf + ret;
  264. if (!started) {
  265. if (ret < 2 || buf[0] != '#' || buf[1] != '!')
  266. break;
  267. s += 2;
  268. c += 2;
  269. started = true;
  270. }
  271. for (; c < e ; c++) {
  272. if (*c == ' ' || *c == '\n' || c == e - 1) {
  273. int l = (*c == ' ' || * c == '\n') ? c - s : e - s;
  274. if (next) {
  275. struct sharg * sh =
  276. __alloca(sizeof(struct sharg) + next->len + l + 1);
  277. sh->len = next->len + l;
  278. memcpy(sh->arg, next->arg, next->len);
  279. memcpy(sh->arg + next->len, s, l);
  280. sh->arg[next->len + l] = 0;
  281. next = sh;
  282. } else {
  283. next = __alloca(sizeof(struct sharg) + l + 1);
  284. next->len = l;
  285. memcpy(next->arg, s, l);
  286. next->arg[l] = 0;
  287. }
  288. if (*c == ' ' || *c == '\n') {
  289. INIT_LIST_HEAD(next, list);
  290. listp_add_tail(next, &new_shargs, list);
  291. next = NULL;
  292. s = c + 1;
  293. if (*c == '\n') {
  294. ended = true;
  295. break;
  296. }
  297. }
  298. }
  299. }
  300. } while (!ended);
  301. if (started) {
  302. if (next) {
  303. INIT_LIST_HEAD(next, list);
  304. listp_add_tail(next, &new_shargs, list);
  305. }
  306. struct sharg * first =
  307. listp_first_entry(&new_shargs, struct sharg, list);
  308. assert(first);
  309. debug("detected as script: run by %s\n", first->arg);
  310. file = first->arg;
  311. listp_splice(&new_shargs, &shargs, list, sharg);
  312. put_handle(exec);
  313. goto reopen;
  314. }
  315. }
  316. SAVE_PROFILE_INTERVAL(open_file_for_exec);
  317. #if EXECVE_RTLD == 1
  318. if (!strcmp_static(PAL_CB(host_type), "Linux-SGX")) {
  319. int is_last = check_last_thread(cur_thread) == 0;
  320. if (is_last) {
  321. debug("execve() in the same process\n");
  322. return shim_do_execve_rtld(exec, argv, envp);
  323. }
  324. debug("execve() in a new process\n");
  325. }
  326. #endif
  327. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  328. if (!listp_empty(&shargs)) {
  329. struct sharg * sh;
  330. int shargc = 0, cnt = 0;
  331. listp_for_each_entry(sh, &shargs, list)
  332. shargc++;
  333. const char ** new_argv =
  334. __alloca(sizeof(const char *) * (argc + shargc + 1));
  335. listp_for_each_entry(sh, &shargs, list)
  336. new_argv[cnt++] = sh->arg;
  337. for (cnt = 0 ; cnt < argc ; cnt++)
  338. new_argv[shargc + cnt] = argv[cnt];
  339. new_argv[shargc + argc] = NULL;
  340. argv = new_argv;
  341. }
  342. lock(cur_thread->lock);
  343. put_handle(cur_thread->exec);
  344. cur_thread->exec = exec;
  345. void * stack = cur_thread->stack;
  346. void * stack_top = cur_thread->stack_top;
  347. void * tcb = cur_thread->tcb;
  348. bool user_tcb = cur_thread->user_tcb;
  349. void * frameptr = cur_thread->frameptr;
  350. cur_thread->stack = NULL;
  351. cur_thread->stack_top = NULL;
  352. cur_thread->frameptr = NULL;
  353. cur_thread->tcb = NULL;
  354. cur_thread->user_tcb = false;
  355. cur_thread->in_vm = false;
  356. unlock(cur_thread->lock);
  357. ret = do_migrate_process(&migrate_execve, exec, argv, cur_thread, envp);
  358. lock(cur_thread->lock);
  359. cur_thread->stack = stack;
  360. cur_thread->stack_top = stack_top;
  361. cur_thread->frameptr = frameptr;
  362. cur_thread->tcb = tcb;
  363. cur_thread->user_tcb = user_tcb;
  364. if (ret < 0) {
  365. cur_thread->in_vm = true;
  366. unlock(cur_thread->lock);
  367. return ret;
  368. }
  369. struct shim_handle_map * handle_map = cur_thread->handle_map;
  370. cur_thread->handle_map = NULL;
  371. unlock(cur_thread->lock);
  372. if (handle_map)
  373. put_handle_map(handle_map);
  374. if (cur_thread->dummy)
  375. switch_dummy_thread(cur_thread);
  376. try_process_exit(0, 0);
  377. return 0;
  378. }