shim_exec.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_epoll.c
  17. *
  18. * Implementation of system call "execve".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_thread.h>
  23. #include <shim_fs.h>
  24. #include <shim_ipc.h>
  25. #include <shim_profile.h>
  26. #include <pal.h>
  27. #include <pal_error.h>
  28. #include <fcntl.h>
  29. #include <sys/syscall.h>
  30. #include <sys/mman.h>
  31. #include <asm/prctl.h>
  32. #include <linux/futex.h>
  33. #include <errno.h>
  34. static int close_cloexec_handle (struct shim_handle_map * map)
  35. {
  36. auto int close_on_exec (struct shim_fd_handle * fd_hdl,
  37. struct shim_handle_map * map, void * arg)
  38. {
  39. if (fd_hdl->flags & FD_CLOEXEC) {
  40. struct shim_handle * hdl = __detach_fd_handle(fd_hdl, NULL, map);
  41. close_handle(hdl);
  42. }
  43. return 0;
  44. }
  45. return walk_handle_map(&close_on_exec, map, NULL);
  46. }
  47. DEFINE_PROFILE_CATAGORY(exec_rtld, exec);
  48. DEFINE_PROFILE_INTERVAL(alloc_new_stack_for_exec, exec_rtld);
  49. DEFINE_PROFILE_INTERVAL(arrange_arguments_for_exec, exec_rtld);
  50. DEFINE_PROFILE_INTERVAL(unmap_executable_for_exec, exec_rtld);
  51. DEFINE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec, exec_rtld);
  52. DEFINE_PROFILE_INTERVAL(unmap_all_vmas_for_exec, exec_rtld);
  53. DEFINE_PROFILE_INTERVAL(load_new_executable_for_exec, exec_rtld);
  54. static void * old_stack_top, * old_stack, * old_stack_red;
  55. static const char ** new_argp;
  56. static int new_argc;
  57. static elf_auxv_t * new_auxp;
  58. #define REQUIRED_ELF_AUXV 6
  59. int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv,
  60. const char ** envp)
  61. {
  62. BEGIN_PROFILE_INTERVAL();
  63. struct shim_thread * cur_thread = get_cur_thread();
  64. int ret;
  65. if ((ret = close_cloexec_handle(cur_thread->handle_map)) < 0)
  66. return ret;
  67. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  68. void * tcb = malloc(sizeof(__libc_tcb_t));
  69. if (!tcb)
  70. return -ENOMEM;
  71. populate_tls(tcb, false);
  72. debug("set tcb to %p\n", tcb);
  73. put_handle(cur_thread->exec);
  74. get_handle(hdl);
  75. cur_thread->exec = hdl;
  76. old_stack_top = cur_thread->stack_top;
  77. old_stack = cur_thread->stack;
  78. old_stack_red = cur_thread->stack_red;
  79. cur_thread->stack_top = NULL;
  80. cur_thread->stack = NULL;
  81. cur_thread->stack_red = NULL;
  82. initial_envp = NULL;
  83. new_argc = 0;
  84. for (const char ** a = argv ; *a ; a++, new_argc++);
  85. if ((ret = init_stack(argv, envp, &new_argp,
  86. REQUIRED_ELF_AUXV, &new_auxp)) < 0)
  87. return ret;
  88. SAVE_PROFILE_INTERVAL(alloc_new_stack_for_exec);
  89. switch_stack(new_argp);
  90. cur_thread = get_cur_thread();
  91. UPDATE_PROFILE_INTERVAL();
  92. DkVirtualMemoryFree(old_stack, old_stack_top - old_stack);
  93. DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red);
  94. int flags = VMA_INTERNAL;
  95. bkeep_munmap(old_stack, old_stack_top - old_stack, &flags);
  96. bkeep_munmap(old_stack_red, old_stack - old_stack_red, &flags);
  97. remove_loaded_libraries();
  98. clean_link_map_list();
  99. SAVE_PROFILE_INTERVAL(unmap_loaded_binaries_for_exec);
  100. init_brk();
  101. unmap_all_vmas();
  102. SAVE_PROFILE_INTERVAL(unmap_all_vmas_for_exec);
  103. if ((ret = load_elf_object(cur_thread->exec, NULL, 0)) < 0)
  104. shim_terminate();
  105. load_elf_interp(cur_thread->exec);
  106. SAVE_PROFILE_INTERVAL(load_new_executable_for_exec);
  107. cur_thread->robust_list = NULL;
  108. debug("execve: start execution\n");
  109. execute_elf_object(cur_thread->exec, new_argc, new_argp,
  110. REQUIRED_ELF_AUXV, new_auxp);
  111. return 0;
  112. }
  113. static void * __malloc (size_t size)
  114. {
  115. int flags = MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL;
  116. size = ALIGN_UP(size);
  117. void * addr = get_unmapped_vma(size, flags);
  118. addr = DkVirtualMemoryAlloc(addr, size, 0, PAL_PROT_READ|PAL_PROT_WRITE);
  119. if (addr)
  120. bkeep_mmap(addr, size, PROT_READ|PROT_WRITE, flags, NULL, 0, NULL);
  121. return addr;
  122. }
  123. #define malloc_method __malloc
  124. #include <shim_checkpoint.h>
  125. DEFINE_PROFILE_CATAGORY(exec, );
  126. DEFINE_PROFILE_INTERVAL(search_and_check_file_for_exec, exec);
  127. DEFINE_PROFILE_INTERVAL(open_file_for_exec, exec);
  128. DEFINE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec, exec);
  129. static int migrate_execve (struct shim_cp_store * cpstore,
  130. struct shim_process * process,
  131. struct shim_thread * thread, va_list ap)
  132. {
  133. struct shim_handle_map * handle_map = NULL;
  134. int ret;
  135. const char ** envp = va_arg (ap, const char **);
  136. size_t envsize = va_arg (ap, size_t);
  137. BEGIN_PROFILE_INTERVAL();
  138. if ((ret = dup_handle_map(&handle_map, thread->handle_map)) < 0)
  139. return ret;
  140. set_handle_map(thread, handle_map);
  141. if ((ret = close_cloexec_handle(handle_map)) < 0)
  142. return ret;
  143. SAVE_PROFILE_INTERVAL(close_CLOEXEC_files_for_exec);
  144. /* Now we start to migrate bookkeeping for exec.
  145. The data we need to migrate are:
  146. 1. cur_threadrent thread
  147. 2. cur_threadrent filesystem
  148. 3. handle mapping
  149. 4. each handle */
  150. BEGIN_MIGRATION_DEF(execve, struct shim_process * proc,
  151. struct shim_thread * thread,
  152. const char ** envp, size_t envsize)
  153. {
  154. store->use_gipc = true;
  155. DEFINE_MIGRATE(process, proc, sizeof(struct shim_process), false);
  156. DEFINE_MIGRATE(all_mounts, NULL, 0, false);
  157. DEFINE_MIGRATE(running_thread, thread, sizeof(struct shim_thread),
  158. false);
  159. DEFINE_MIGRATE(handle_map, thread->handle_map,
  160. sizeof (struct shim_handle_map), true);
  161. DEFINE_MIGRATE(migratable, NULL, 0, false);
  162. DEFINE_MIGRATE(environ, envp, envsize, true);
  163. }
  164. END_MIGRATION_DEF
  165. return START_MIGRATE(cpstore, execve, 0, process, thread, envp, envsize);
  166. }
  167. int shim_do_execve (const char * file, const char ** argv,
  168. const char ** envp)
  169. {
  170. struct shim_thread * cur_thread = get_cur_thread();
  171. struct shim_dentry * dent = NULL;
  172. int ret = 0;
  173. if (!envp)
  174. envp = initial_envp;
  175. BEGIN_PROFILE_INTERVAL();
  176. if ((ret = path_lookupat(NULL, file, LOOKUP_OPEN, &dent)) < 0)
  177. return ret;
  178. struct shim_mount * fs = dent->fs;
  179. get_dentry(dent);
  180. if (!fs->d_ops->open) {
  181. ret = -EACCES;
  182. err:
  183. put_dentry(dent);
  184. return ret;
  185. }
  186. if (fs->d_ops->mode) {
  187. mode_t mode;
  188. if ((ret = fs->d_ops->mode(dent, &mode, 1)) < 0)
  189. goto err;
  190. }
  191. SAVE_PROFILE_INTERVAL(search_and_check_file_for_exec);
  192. struct shim_handle * exec = NULL;
  193. if (!(exec = get_new_handle())) {
  194. ret = -ENOMEM;
  195. goto err;
  196. }
  197. set_handle_fs(exec, fs);
  198. exec->flags = O_RDONLY;
  199. exec->acc_mode = MAY_READ;
  200. ret = fs->d_ops->open(exec, dent, O_RDONLY);
  201. if (qstrempty(&exec->uri)) {
  202. put_handle(exec);
  203. return -EACCES;
  204. }
  205. int sz;
  206. char *path = dentry_get_path(dent, true, &sz);
  207. qstrsetstr(&exec->path, path, sz);
  208. if ((ret = check_elf_object(&exec)) < 0) {
  209. put_handle(exec);
  210. return ret;
  211. }
  212. SAVE_PROFILE_INTERVAL(open_file_for_exec);
  213. int is_last = check_last_thread(cur_thread) == 0;
  214. if (is_last)
  215. return shim_do_execve_rtld(exec, argv, envp);
  216. INC_PROFILE_OCCURENCE(syscall_use_ipc);
  217. #ifdef PROFILE
  218. unsigned long create_time = GET_PROFILE_INTERVAL();
  219. #endif
  220. size_t envsize = allocsize;
  221. void * envptr = NULL;
  222. const char ** empty_argv = NULL;
  223. retry:
  224. envptr = system_malloc(envsize);
  225. if (!envptr)
  226. return -ENOMEM;
  227. ret = populate_user_stack(envptr, envsize, 0, NULL, &empty_argv, &envp);
  228. if (ret == -ENOMEM) {
  229. system_free(envptr, envsize);
  230. envsize += allocsize;
  231. goto retry;
  232. }
  233. lock(cur_thread->lock);
  234. put_handle(cur_thread->exec);
  235. cur_thread->exec = exec;
  236. void * stack = cur_thread->stack;
  237. void * stack_top = cur_thread->stack_top;
  238. void * tcb = cur_thread->tcb;
  239. bool user_tcb = cur_thread->user_tcb;
  240. void * frameptr = cur_thread->frameptr;
  241. cur_thread->stack = NULL;
  242. cur_thread->stack_top = NULL;
  243. cur_thread->frameptr = NULL;
  244. cur_thread->tcb = NULL;
  245. cur_thread->user_tcb = false;
  246. cur_thread->in_vm = false;
  247. unlock(cur_thread->lock);
  248. ret = do_migrate_process(&migrate_execve, exec, argv, cur_thread, envp,
  249. envptr + envsize - (void *) envp);
  250. system_free(envptr, envsize);
  251. lock(cur_thread->lock);
  252. cur_thread->stack = stack;
  253. cur_thread->stack_top = stack_top;
  254. cur_thread->frameptr = frameptr;
  255. cur_thread->tcb = tcb;
  256. cur_thread->user_tcb = user_tcb;
  257. if (ret < 0) {
  258. cur_thread->in_vm = true;
  259. unlock(cur_thread->lock);
  260. return ret;
  261. }
  262. struct shim_handle_map * handle_map = cur_thread->handle_map;
  263. cur_thread->handle_map = NULL;
  264. unlock(cur_thread->lock);
  265. if (handle_map)
  266. put_handle_map(handle_map);
  267. if (cur_thread->dummy)
  268. switch_dummy_thread(cur_thread);
  269. try_process_exit(0);
  270. return 0;
  271. }