shim_exec.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_epoll.c
  17. *
  18. * Implementation of system call "execve".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_thread.h>
  23. #include <shim_fs.h>
  24. #include <shim_ipc.h>
  25. #include <shim_profile.h>
  26. #include <pal.h>
  27. #include <pal_error.h>
  28. #include <fcntl.h>
  29. #include <sys/syscall.h>
  30. #include <sys/mman.h>
  31. #include <asm/prctl.h>
  32. #include <linux/futex.h>
  33. #include <errno.h>
  34. static int close_cloexec_handle (struct shim_handle_map * map)
  35. {
  36. auto int close_on_exec (struct shim_fd_handle * fd_hdl,
  37. struct shim_handle_map * map, void * arg)
  38. {
  39. if (fd_hdl->flags & FD_CLOEXEC) {
  40. struct shim_handle * hdl = __detach_fd_handle(fd_hdl, NULL, map);
  41. close_handle(hdl);
  42. }
  43. return 0;
  44. }
  45. return walk_handle_map(&close_on_exec, map, NULL);
  46. }
  47. DEFINE_PROFILE_CATAGORY(exec_rtld, exec);
  48. DEFINE_PROFILE_INTERVAL(alloc_new_stack_for_exec, exec_rtld);
  49. DEFINE_PROFILE_INTERVAL(arrange_arguments_for_exec, exec_rtld);
  50. DEFINE_PROFILE_INTERVAL(unmap_executable_for_exec, exec_rtld);
  51. DEFINE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec, exec_rtld);
  52. DEFINE_PROFILE_INTERVAL(unmap_all_vmas_for_exec, exec_rtld);
  53. DEFINE_PROFILE_INTERVAL(load_new_executable_for_exec, exec_rtld);
  54. static void * old_stack_top, * old_stack, * old_stack_red;
  55. static const char ** new_argp;
  56. static int new_argc;
  57. static elf_auxv_t * new_auxp;
  58. #define REQUIRED_ELF_AUXV 6
  59. int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv,
  60. const char ** envp)
  61. {
  62. BEGIN_PROFILE_INTERVAL();
  63. struct shim_thread * cur_thread = get_cur_thread();
  64. int ret;
  65. if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0)
  66. return ret;
  67. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  68. void * tcb = malloc(sizeof(__libc_tcb_t));
  69. if (!tcb)
  70. return -ENOMEM;
  71. populate_tls(tcb, false);
  72. debug("set tcb to %p\n", tcb);
  73. put_handle(cur_thread->exec);
  74. get_handle(hdl);
  75. cur_thread->exec = hdl;
  76. old_stack_top = cur_thread->stack_top;
  77. old_stack = cur_thread->stack;
  78. old_stack_red = cur_thread->stack_red;
  79. cur_thread->stack_top = NULL;
  80. cur_thread->stack = NULL;
  81. cur_thread->stack_red = NULL;
  82. initial_envp = NULL;
  83. new_argc = 0;
  84. for (const char ** a = argv ; *a ; a++, new_argc++);
  85. if ((ret = init_stack(argv, envp, &new_argp,
  86. REQUIRED_ELF_AUXV, &new_auxp)) < 0)
  87. return ret;
  88. SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec);
  89. switch_stack(new_argp);
  90. cur_thread = get_cur_thread();
  91. UPDATE_PROFILE_INTERVAL();
  92. DkVirtualMemoryFree(old_stack, old_stack_top - old_stack);
  93. DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red);
  94. int flags = VMA_INTERNAL;
  95. bkeep_munmap(old_stack, old_stack_top - old_stack, &flags);
  96. bkeep_munmap(old_stack_red, old_stack - old_stack_red, &flags);
  97. remove_loaded_libraries();
  98. clean_link_map_list();
  99. SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec);
  100. init_brk();
  101. unmap_all_vmas();
  102. SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec);
  103. if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0)
  104. shim_terminate();
  105. load_elf_interp(cur_thread->exec);
  106. SAVE_PROFILE_INTERVAL(load_new_executable_for_exec);
  107. cur_thread->robust_list = NULL;
  108. debug("execve: start execution\n");
  109. execute_elf_object(cur_thread->exec, new_argc, new_argp,
  110. REQUIRED_ELF_AUXV, new_auxp);
  111. return 0;
  112. }
  113. static void * __malloc (size_t size)
  114. {
  115. int flags = MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL;
  116. size = ALIGN_UP(size);
  117. void * addr = get_unmapped_vma(size, flags);
  118. addr = (void *)
  119. DkVirtualMemoryAlloc(addr, size, 0, PAL_PROT_READ|PAL_PROT_WRITE);
  120. if (!addr)
  121. return NULL;
  122. bkeep_mmap(addr, size, PROT_READ|PROT_WRITE, flags, NULL, 0, NULL);
  123. return addr;
  124. }
  125. #define malloc_method __malloc
  126. #include <shim_checkpoint.h>
  127. DEFINE_PROFILE_CATAGORY(exec, );
  128. DEFINE_PROFILE_INTERVAL(search_and_check_file_for_exec, exec);
  129. DEFINE_PROFILE_INTERVAL(open_file_for_exec, exec);
  130. DEFINE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec, exec);
  131. static int migrate_execve (struct shim_cp_store * cpstore,
  132. struct shim_process * process,
  133. struct shim_thread * thread, va_list ap)
  134. {
  135. struct shim_handle_map * handle_map = NULL;
  136. int ret;
  137. const char ** envp = va_arg (ap, const char **);
  138. size_t envsize = va_arg (ap, size_t);
  139. BEGIN_PROFILE_INTERVAL();
  140. if ((ret = dup_handle_map(&handle_map, thread->handle_map)) < 0)
  141. return ret;
  142. set_handle_map(thread, handle_map);
  143. if ((ret = close_cloexec_handle(handle_map)) < 0)
  144. return ret;
  145. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  146. /* Now we start to migrate bookkeeping for exec.
  147. The data we need to migrate are:
  148. 1. cur_threadrent thread
  149. 2. cur_threadrent filesystem
  150. 3. handle mapping
  151. 4. each handle */
  152. BEGIN_MIGRATION_DEF(execve, struct shim_process * proc,
  153. struct shim_thread * thread,
  154. const char ** envp, size_t envsize)
  155. {
  156. store->use_gipc = true;
  157. DEFINE_MIGRATE(process, proc, sizeof(struct shim_process), false);
  158. DEFINE_MIGRATE(all_mounts, NULL, 0, false);
  159. DEFINE_MIGRATE(running_thread, thread, sizeof(struct shim_thread),
  160. false);
  161. DEFINE_MIGRATE(handle_map, thread->handle_map,
  162. sizeof (struct shim_handle_map), true);
  163. DEFINE_MIGRATE(migratable, NULL, 0, false);
  164. DEFINE_MIGRATE(environ, envp, envsize, true);
  165. }
  166. END_MIGRATION_DEF
  167. return START_MIGRATE(cpstore, execve, 0, process, thread, envp, envsize);
  168. }
  169. int shim_do_execve (const char * file, const char ** argv,
  170. const char ** envp)
  171. {
  172. struct shim_thread * cur_thread = get_cur_thread();
  173. struct shim_dentry * dent = NULL;
  174. int ret = 0;
  175. if (!envp)
  176. envp = initial_envp;
  177. BEGIN_PROFILE_INTERVAL();
  178. if ((ret = path_lookupat(NULL, file, LOOKUP_OPEN, &dent)) < 0)
  179. return ret;
  180. struct shim_mount * fs = dent->fs;
  181. get_dentry(dent);
  182. if (!fs->d_ops->open) {
  183. ret = -EACCES;
  184. err:
  185. put_dentry(dent);
  186. return ret;
  187. }
  188. if (fs->d_ops->mode) {
  189. mode_t mode;
  190. if ((ret = fs->d_ops->mode(dent, &mode, 1)) < 0)
  191. goto err;
  192. }
  193. SAVE_PROFILE_INTERVAL(search_and_check_file_for_exec);
  194. struct shim_handle * exec = NULL;
  195. if (!(exec = get_new_handle())) {
  196. ret = -ENOMEM;
  197. goto err;
  198. }
  199. set_handle_fs(exec, fs);
  200. exec->flags = O_RDONLY;
  201. exec->acc_mode = MAY_READ;
  202. ret = fs->d_ops->open(exec, dent, O_RDONLY);
  203. if (qstrempty(&exec->uri)) {
  204. put_handle(exec);
  205. return -EACCES;
  206. }
  207. int sz;
  208. char *path = dentry_get_path(dent, true, &sz);
  209. qstrsetstr(&exec->path, path, sz);
  210. if ((ret = check_elf_object(&exec)) < 0) {
  211. put_handle(exec);
  212. return ret;
  213. }
  214. SAVE_PROFILE_INTERVAL(open_file_for_exec);
  215. int is_last = check_last_thread(cur_thread) == 0;
  216. if (is_last)
  217. return shim_do_execve_rtld(exec, argv, envp);
  218. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  219. size_t envsize = allocsize;
  220. void * envptr = NULL;
  221. const char ** empty_argv = NULL;
  222. retry:
  223. envptr = system_malloc(envsize);
  224. if (!envptr)
  225. return -ENOMEM;
  226. ret = populate_user_stack(envptr, envsize, 0, NULL, &empty_argv, &envp);
  227. if (ret == -ENOMEM) {
  228. system_free(envptr, envsize);
  229. envsize += allocsize;
  230. goto retry;
  231. }
  232. lock(cur_thread->lock);
  233. put_handle(cur_thread->exec);
  234. cur_thread->exec = exec;
  235. void * stack = cur_thread->stack;
  236. void * stack_top = cur_thread->stack_top;
  237. void * tcb = cur_thread->tcb;
  238. bool user_tcb = cur_thread->user_tcb;
  239. void * frameptr = cur_thread->frameptr;
  240. cur_thread->stack = NULL;
  241. cur_thread->stack_top = NULL;
  242. cur_thread->frameptr = NULL;
  243. cur_thread->tcb = NULL;
  244. cur_thread->user_tcb = false;
  245. cur_thread->in_vm = false;
  246. unlock(cur_thread->lock);
  247. ret = do_migrate_process(&migrate_execve, exec, argv, cur_thread, envp,
  248. envptr + envsize - (void *) envp);
  249. system_free(envptr, envsize);
  250. lock(cur_thread->lock);
  251. cur_thread->stack = stack;
  252. cur_thread->stack_top = stack_top;
  253. cur_thread->frameptr = frameptr;
  254. cur_thread->tcb = tcb;
  255. cur_thread->user_tcb = user_tcb;
  256. if (ret < 0) {
  257. cur_thread->in_vm = true;
  258. unlock(cur_thread->lock);
  259. return ret;
  260. }
  261. struct shim_handle_map * handle_map = cur_thread->handle_map;
  262. cur_thread->handle_map = NULL;
  263. unlock(cur_thread->lock);
  264. if (handle_map)
  265. put_handle_map(handle_map);
  266. if (cur_thread->dummy)
  267. switch_dummy_thread(cur_thread);
  268. try_process_exit(0);
  269. return 0;
  270. }