shim_thread.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361
  1. #ifndef _SHIM_THREAD_H_
  2. #define _SHIM_THREAD_H_
  3. #include <shim_defs.h>
  4. #include <shim_internal.h>
  5. #include <shim_tcb.h>
  6. #include <shim_utils.h>
  7. #include <shim_signal.h>
  8. #include <shim_handle.h>
  9. #include <shim_vma.h>
  10. #include <pal.h>
  11. #include <list.h>
  12. struct shim_handle;
  13. struct shim_fd_map;
  14. struct shim_dentry;
  15. struct shim_signal_log;
  16. DEFINE_LIST(shim_thread);
  17. DEFINE_LISTP(shim_thread);
  18. struct shim_thread {
  19. /* thread identifiers */
  20. IDTYPE vmid;
  21. IDTYPE pgid, ppid, tgid, tid;
  22. bool in_vm;
  23. LEASETYPE tid_lease;
  24. /* credentials */
  25. IDTYPE uid, gid, euid, egid;
  26. /* thread pal handle */
  27. PAL_HANDLE pal_handle;
  28. /* parent handle */
  29. struct shim_thread * parent;
  30. /* thread leader */
  31. struct shim_thread * leader;
  32. #ifndef ALIAS_VFORK_AS_FORK
  33. /* dummy thread: stores blocked parent thread for vfork */
  34. struct shim_thread * dummy;
  35. #endif
  36. /* child handles; protected by thread->lock */
  37. LISTP_TYPE(shim_thread) children;
  38. /* nodes in child handles; protected by the parent's lock */
  39. LIST_TYPE(shim_thread) siblings;
  40. /* nodes in global handles; protected by thread_list_lock */
  41. LIST_TYPE(shim_thread) list;
  42. struct shim_handle_map * handle_map;
  43. /* child tid */
  44. int * set_child_tid, * clear_child_tid;
  45. /* signal handling */
  46. __sigset_t signal_mask;
  47. struct shim_signal_handle signal_handles[NUM_SIGS];
  48. struct atomic_int has_signal;
  49. struct shim_signal_log * signal_logs;
  50. bool suspend_on_signal;
  51. stack_t signal_altstack;
  52. /* futex robust list */
  53. void * robust_list;
  54. PAL_HANDLE scheduler_event;
  55. PAL_HANDLE exit_event;
  56. int exit_code;
  57. int term_signal; // Store the terminating signal, if any; needed for
  58. // wait() and friends
  59. bool is_alive;
  60. PAL_HANDLE child_exit_event;
  61. LISTP_TYPE(shim_thread) exited_children;
  62. /* file system */
  63. struct shim_dentry * root, * cwd;
  64. mode_t umask;
  65. /* executable */
  66. struct shim_handle * exec;
  67. void * stack, * stack_top, * stack_red;
  68. shim_tcb_t * shim_tcb;
  69. void * frameptr;
  70. REFTYPE ref_count;
  71. struct shim_lock lock;
  72. #ifdef PROFILE
  73. unsigned long exit_time;
  74. #endif
  75. };
  76. DEFINE_LIST(shim_simple_thread);
  77. struct shim_simple_thread {
  78. /* VMID and PIDs */
  79. IDTYPE vmid;
  80. IDTYPE pgid, tgid, tid;
  81. /* exit event and status */
  82. PAL_HANDLE exit_event;
  83. int exit_code;
  84. int term_signal;
  85. bool is_alive;
  86. /* nodes in global handles */
  87. LIST_TYPE(shim_simple_thread) list;
  88. REFTYPE ref_count;
  89. struct shim_lock lock;
  90. #ifdef PROFILE
  91. unsigned long exit_time;
  92. #endif
  93. };
  94. int init_thread (void);
  95. static inline struct shim_thread * shim_thread_self(void)
  96. {
  97. /* TODO: optimize to use single movq %gs:<offset> */
  98. shim_tcb_t * shim_tcb = shim_get_tcb();
  99. return shim_tcb->tp;
  100. }
  101. static inline struct shim_thread * save_shim_thread_self(struct shim_thread * __self)
  102. {
  103. /* TODO: optimize to use single movq %gs:<offset> */
  104. shim_tcb_t * shim_tcb = shim_get_tcb();
  105. shim_tcb->tp = __self;
  106. return __self;
  107. }
  108. static inline bool is_internal(struct shim_thread *thread)
  109. {
  110. return thread->tid >= INTERNAL_TID_BASE;
  111. }
  112. void get_thread (struct shim_thread * thread);
  113. void put_thread (struct shim_thread * thread);
  114. void get_simple_thread (struct shim_simple_thread * thread);
  115. void put_simple_thread (struct shim_simple_thread * thread);
  116. void init_fs_base (unsigned long fs_base, struct shim_thread * thread);
  117. void update_fs_base (unsigned long fs_base);
  118. void debug_setprefix (shim_tcb_t * tcb);
  119. static inline
  120. __attribute__((always_inline))
  121. void debug_setbuf (shim_tcb_t * tcb, bool on_stack)
  122. {
  123. if (!debug_handle)
  124. return;
  125. tcb->debug_buf = on_stack ? __alloca(sizeof(struct debug_buf)) :
  126. malloc(sizeof(struct debug_buf));
  127. debug_setprefix(tcb);
  128. }
  129. static inline
  130. __attribute__((always_inline))
  131. struct shim_thread * get_cur_thread (void)
  132. {
  133. return shim_thread_self();
  134. }
  135. static inline
  136. __attribute__((always_inline))
  137. bool cur_thread_is_alive (void)
  138. {
  139. struct shim_thread * thread = get_cur_thread();
  140. return thread ? thread->is_alive : false;
  141. }
  142. static inline
  143. __attribute__((always_inline))
  144. void set_cur_thread (struct shim_thread * thread)
  145. {
  146. shim_tcb_t * tcb = shim_get_tcb();
  147. IDTYPE tid = 0;
  148. if (thread) {
  149. if (tcb->tp && tcb->tp != thread)
  150. put_thread(tcb->tp);
  151. if (tcb->tp != thread)
  152. get_thread(thread);
  153. tcb->tp = thread;
  154. thread->shim_tcb = tcb;
  155. tid = thread->tid;
  156. if (!is_internal(thread) && !thread->signal_logs)
  157. thread->signal_logs = malloc(sizeof(struct shim_signal_log) *
  158. NUM_SIGS);
  159. } else if (tcb->tp) {
  160. put_thread(tcb->tp);
  161. tcb->tp = NULL;
  162. } else {
  163. BUG();
  164. }
  165. if (tcb->tid != tid) {
  166. tcb->tid = tid;
  167. debug_setprefix(tcb);
  168. }
  169. }
  170. static inline void thread_setwait (struct shim_thread ** queue,
  171. struct shim_thread * thread)
  172. {
  173. if (!thread)
  174. thread = get_cur_thread();
  175. DkEventClear(thread->scheduler_event);
  176. if (queue) {
  177. get_thread(thread);
  178. *queue = thread;
  179. }
  180. }
  181. static inline int thread_sleep (uint64_t timeout_us)
  182. {
  183. struct shim_thread * cur_thread = get_cur_thread();
  184. if (!cur_thread)
  185. return -EINVAL;
  186. PAL_HANDLE event = cur_thread->scheduler_event;
  187. if (!event)
  188. return -EINVAL;
  189. if ( NULL == DkObjectsWaitAny(1, &event, timeout_us))
  190. return -PAL_ERRNO;
  191. return 0;
  192. }
  193. static inline void thread_wakeup (struct shim_thread * thread)
  194. {
  195. DkEventSet(thread->scheduler_event);
  196. }
  197. extern struct shim_lock thread_list_lock;
  198. /*!
  199. * \brief Look up the thread for a given id.
  200. *
  201. * \param tid Thread id to look for.
  202. *
  203. * Searches global threads list for a thread with id equal to \p tid.
  204. * If no thread was found returns NULL.
  205. * Increases refcount of the returned thread.
  206. */
  207. struct shim_thread* lookup_thread(IDTYPE tid);
  208. struct shim_simple_thread * __lookup_simple_thread (IDTYPE tid);
  209. struct shim_simple_thread * lookup_simple_thread (IDTYPE tid);
  210. void set_as_child (struct shim_thread * parent, struct shim_thread * child);
  211. /* creating and revoking thread objects */
  212. struct shim_thread * get_new_thread (IDTYPE new_tid);
  213. struct shim_thread * get_new_internal_thread (void);
  214. struct shim_simple_thread * get_new_simple_thread (void);
  215. /* thread list utilities */
  216. void add_thread (struct shim_thread * thread);
  217. void del_thread (struct shim_thread * thread);
  218. void add_simple_thread (struct shim_simple_thread * thread);
  219. void del_simple_thread (struct shim_simple_thread * thread);
  220. int check_last_thread (struct shim_thread * self);
  221. #ifndef ALIAS_VFORK_AS_FORK
  222. void switch_dummy_thread (struct shim_thread * thread);
  223. #endif
  224. int walk_thread_list (int (*callback) (struct shim_thread *, void *, bool *),
  225. void * arg);
  226. int walk_simple_thread_list (int (*callback) (struct shim_simple_thread *,
  227. void *, bool *),
  228. void * arg);
  229. /* reference counting of handle maps */
  230. void get_handle_map (struct shim_handle_map * map);
  231. void put_handle_map (struct shim_handle_map * map);
  232. /* retriving handle mapping */
  233. static inline __attribute__((always_inline))
  234. struct shim_handle_map * get_cur_handle_map (struct shim_thread * thread)
  235. {
  236. if (!thread)
  237. thread = get_cur_thread();
  238. return thread ? thread->handle_map : NULL;
  239. }
  240. static inline __attribute__((always_inline))
  241. void set_handle_map (struct shim_thread * thread,
  242. struct shim_handle_map * map)
  243. {
  244. get_handle_map(map);
  245. if (!thread)
  246. thread = get_cur_thread();
  247. if (thread->handle_map)
  248. put_handle_map(thread->handle_map);
  249. thread->handle_map = map;
  250. }
  251. /* shim exit callback */
  252. int thread_exit(struct shim_thread* self, bool send_ipc, int** clear_child_tid_pal_ptr);
  253. /* If the process was killed by a signal, pass it in the second
  254. * argument, else pass zero */
  255. noreturn void thread_or_process_exit(int error_code, int term_signal);
  256. /* thread cloning helpers */
  257. struct shim_clone_args {
  258. PAL_HANDLE create_event;
  259. PAL_HANDLE initialize_event;
  260. struct shim_thread * parent, * thread;
  261. void * stack;
  262. unsigned long fs_base;
  263. };
  264. void * allocate_stack (size_t size, size_t protect_size, bool user);
  265. static inline __attribute__((always_inline))
  266. bool check_stack_size (struct shim_thread * cur_thread, int size)
  267. {
  268. if (!cur_thread)
  269. cur_thread = get_cur_thread();
  270. void * rsp;
  271. __asm__ volatile ("movq %%rsp, %0" : "=r"(rsp) :: "memory");
  272. if (rsp <= cur_thread->stack_top && rsp > cur_thread->stack)
  273. return size < rsp - cur_thread->stack;
  274. return false;
  275. }
  276. static inline __attribute__((always_inline))
  277. bool check_on_stack (struct shim_thread * cur_thread, void * mem)
  278. {
  279. if (!cur_thread)
  280. cur_thread = get_cur_thread();
  281. return (mem <= cur_thread->stack_top && mem > cur_thread->stack);
  282. }
  283. int init_stack (const char ** argv, const char ** envp,
  284. int ** argcpp, const char *** argpp,
  285. elf_auxv_t ** auxpp);
  286. #endif /* _SHIM_THREAD_H_ */