shim_thread.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. #ifndef _SHIM_THREAD_H_
  2. #define _SHIM_THREAD_H_
  3. #include <shim_defs.h>
  4. #include <shim_internal.h>
  5. #include <shim_tls.h>
  6. #include <shim_utils.h>
  7. #include <shim_signal.h>
  8. #include <shim_handle.h>
  9. #include <shim_vma.h>
  10. #include <pal.h>
  11. #include <list.h>
  12. struct shim_handle;
  13. struct shim_fd_map;
  14. struct shim_dentry;
  15. struct shim_signal_log;
  16. DEFINE_LIST(shim_thread);
  17. DEFINE_LISTP(shim_thread);
  18. struct shim_thread {
  19. /* thread identifiers */
  20. IDTYPE vmid;
  21. IDTYPE pgid, ppid, tgid, tid;
  22. bool in_vm;
  23. LEASETYPE tid_lease;
  24. /* credentials */
  25. IDTYPE uid, gid, euid, egid;
  26. /* thread pal handle */
  27. PAL_HANDLE pal_handle;
  28. /* parent handle */
  29. struct shim_thread * parent;
  30. /* thread leader */
  31. struct shim_thread * leader;
  32. #ifndef ALIAS_VFORK_AS_FORK
  33. /* dummy thread: stores blocked parent thread for vfork */
  34. struct shim_thread * dummy;
  35. #endif
  36. /* child handles; protected by thread->lock */
  37. LISTP_TYPE(shim_thread) children;
  38. /* nodes in child handles; protected by the parent's lock */
  39. LIST_TYPE(shim_thread) siblings;
  40. /* nodes in global handles; protected by thread_list_lock */
  41. LIST_TYPE(shim_thread) list;
  42. struct shim_handle_map * handle_map;
  43. /* child tid */
  44. int * set_child_tid, * clear_child_tid;
  45. /* signal handling */
  46. __sigset_t signal_mask;
  47. struct shim_signal_handle signal_handles[NUM_SIGS];
  48. struct atomic_int has_signal;
  49. struct shim_signal_log * signal_logs;
  50. bool suspend_on_signal;
  51. stack_t signal_altstack;
  52. /* futex robust list */
  53. void * robust_list;
  54. PAL_HANDLE scheduler_event;
  55. PAL_HANDLE exit_event;
  56. int exit_code;
  57. int term_signal; // Store the terminating signal, if any; needed for
  58. // wait() and friends
  59. bool is_alive;
  60. PAL_HANDLE child_exit_event;
  61. LISTP_TYPE(shim_thread) exited_children;
  62. /* file system */
  63. struct shim_dentry * root, * cwd;
  64. mode_t umask;
  65. /* executable */
  66. struct shim_handle * exec;
  67. void * stack, * stack_top, * stack_red;
  68. unsigned long fs_base;
  69. shim_tcb_t * shim_tcb;
  70. void * frameptr;
  71. REFTYPE ref_count;
  72. struct shim_lock lock;
  73. #ifdef PROFILE
  74. unsigned long exit_time;
  75. #endif
  76. };
  77. DEFINE_LIST(shim_simple_thread);
  78. struct shim_simple_thread {
  79. /* VMID and PIDs */
  80. IDTYPE vmid;
  81. IDTYPE pgid, tgid, tid;
  82. /* exit event and status */
  83. PAL_HANDLE exit_event;
  84. int exit_code;
  85. int term_signal;
  86. bool is_alive;
  87. /* nodes in global handles */
  88. LIST_TYPE(shim_simple_thread) list;
  89. REFTYPE ref_count;
  90. struct shim_lock lock;
  91. #ifdef PROFILE
  92. unsigned long exit_time;
  93. #endif
  94. };
  95. int init_thread (void);
  96. static inline struct shim_thread * shim_thread_self(void)
  97. {
  98. /* TODO: optimize to use single movq %gs:<offset> */
  99. shim_tcb_t * shim_tcb = shim_get_tls();
  100. return shim_tcb->tp;
  101. }
  102. static inline struct shim_thread * save_shim_thread_self(struct shim_thread * __self)
  103. {
  104. /* TODO: optimize to use single movq %gs:<offset> */
  105. shim_tcb_t * shim_tcb = shim_get_tls();
  106. shim_tcb->tp = __self;
  107. return __self;
  108. }
  109. static inline bool is_internal(struct shim_thread *thread)
  110. {
  111. return thread->tid >= INTERNAL_TID_BASE;
  112. }
  113. void get_thread (struct shim_thread * thread);
  114. void put_thread (struct shim_thread * thread);
  115. void get_simple_thread (struct shim_simple_thread * thread);
  116. void put_simple_thread (struct shim_simple_thread * thread);
  117. void allocate_tls (unsigned long fs_base, struct shim_thread * thread);
  118. void populate_tls (unsigned long fs_base);
  119. void debug_setprefix (shim_tcb_t * tcb);
  120. static inline
  121. __attribute__((always_inline))
  122. void debug_setbuf (shim_tcb_t * tcb, bool on_stack)
  123. {
  124. if (!debug_handle)
  125. return;
  126. tcb->debug_buf = on_stack ? __alloca(sizeof(struct debug_buf)) :
  127. malloc(sizeof(struct debug_buf));
  128. debug_setprefix(tcb);
  129. }
  130. static inline
  131. __attribute__((always_inline))
  132. struct shim_thread * get_cur_thread (void)
  133. {
  134. return shim_thread_self();
  135. }
  136. static inline
  137. __attribute__((always_inline))
  138. bool cur_thread_is_alive (void)
  139. {
  140. struct shim_thread * thread = get_cur_thread();
  141. return thread ? thread->is_alive : false;
  142. }
  143. static inline
  144. __attribute__((always_inline))
  145. void set_cur_thread (struct shim_thread * thread)
  146. {
  147. shim_tcb_t * tcb = shim_get_tls();
  148. IDTYPE tid = 0;
  149. if (thread) {
  150. unsigned long fs_base = tcb->tp ? tcb->tp->fs_base : 0;
  151. if (tcb->tp && tcb->tp != thread)
  152. put_thread(tcb->tp);
  153. if (tcb->tp != thread)
  154. get_thread(thread);
  155. tcb->tp = thread;
  156. thread->fs_base = fs_base;
  157. thread->shim_tcb = tcb;
  158. tid = thread->tid;
  159. if (!is_internal(thread) && !thread->signal_logs)
  160. thread->signal_logs = malloc(sizeof(struct shim_signal_log) *
  161. NUM_SIGS);
  162. } else if (tcb->tp) {
  163. put_thread(tcb->tp);
  164. tcb->tp = NULL;
  165. } else {
  166. BUG();
  167. }
  168. if (tcb->tid != tid) {
  169. tcb->tid = tid;
  170. debug_setprefix(tcb);
  171. }
  172. }
  173. static inline void thread_setwait (struct shim_thread ** queue,
  174. struct shim_thread * thread)
  175. {
  176. if (!thread)
  177. thread = get_cur_thread();
  178. DkEventClear(thread->scheduler_event);
  179. if (queue) {
  180. get_thread(thread);
  181. *queue = thread;
  182. }
  183. }
  184. static inline int thread_sleep (uint64_t timeout_us)
  185. {
  186. struct shim_thread * cur_thread = get_cur_thread();
  187. if (!cur_thread)
  188. return -EINVAL;
  189. PAL_HANDLE event = cur_thread->scheduler_event;
  190. if (!event)
  191. return -EINVAL;
  192. if ( NULL == DkObjectsWaitAny(1, &event, timeout_us))
  193. return -PAL_ERRNO;
  194. return 0;
  195. }
  196. static inline void thread_wakeup (struct shim_thread * thread)
  197. {
  198. DkEventSet(thread->scheduler_event);
  199. }
  200. extern struct shim_lock thread_list_lock;
  201. /*!
  202. * \brief Look up the thread for a given id.
  203. *
  204. * \param tid Thread id to look for.
  205. *
  206. * Searches global threads list for a thread with id equal to \p tid.
  207. * If no thread was found returns NULL.
  208. * Increases refcount of the returned thread.
  209. */
  210. struct shim_thread* lookup_thread(IDTYPE tid);
  211. struct shim_simple_thread * __lookup_simple_thread (IDTYPE tid);
  212. struct shim_simple_thread * lookup_simple_thread (IDTYPE tid);
  213. void set_as_child (struct shim_thread * parent, struct shim_thread * child);
  214. /* creating and revoking thread objects */
  215. struct shim_thread * get_new_thread (IDTYPE new_tid);
  216. struct shim_thread * get_new_internal_thread (void);
  217. struct shim_simple_thread * get_new_simple_thread (void);
  218. /* thread list utilities */
  219. void add_thread (struct shim_thread * thread);
  220. void del_thread (struct shim_thread * thread);
  221. void add_simple_thread (struct shim_simple_thread * thread);
  222. void del_simple_thread (struct shim_simple_thread * thread);
  223. int check_last_thread (struct shim_thread * self);
  224. #ifndef ALIAS_VFORK_AS_FORK
  225. void switch_dummy_thread (struct shim_thread * thread);
  226. #endif
  227. int walk_thread_list (int (*callback) (struct shim_thread *, void *, bool *),
  228. void * arg);
  229. int walk_simple_thread_list (int (*callback) (struct shim_simple_thread *,
  230. void *, bool *),
  231. void * arg);
  232. /* reference counting of handle maps */
  233. void get_handle_map (struct shim_handle_map * map);
  234. void put_handle_map (struct shim_handle_map * map);
  235. /* retriving handle mapping */
  236. static inline __attribute__((always_inline))
  237. struct shim_handle_map * get_cur_handle_map (struct shim_thread * thread)
  238. {
  239. if (!thread)
  240. thread = get_cur_thread();
  241. return thread ? thread->handle_map : NULL;
  242. }
  243. static inline __attribute__((always_inline))
  244. void set_handle_map (struct shim_thread * thread,
  245. struct shim_handle_map * map)
  246. {
  247. get_handle_map(map);
  248. if (!thread)
  249. thread = get_cur_thread();
  250. if (thread->handle_map)
  251. put_handle_map(thread->handle_map);
  252. thread->handle_map = map;
  253. }
  254. /* shim exit callback */
  255. int thread_exit (struct shim_thread * self, bool send_ipc);
  256. /* If the process was killed by a signal, pass it in the second
  257. * argument, else pass zero */
  258. int try_process_exit (int error_code, int term_signal);
  259. /* thread cloning helpers */
  260. struct clone_args {
  261. PAL_HANDLE create_event;
  262. PAL_HANDLE initialize_event;
  263. struct shim_thread * parent, * thread;
  264. void * stack;
  265. };
  266. void * allocate_stack (size_t size, size_t protect_size, bool user);
  267. static inline __attribute__((always_inline))
  268. bool check_stack_size (struct shim_thread * cur_thread, int size)
  269. {
  270. if (!cur_thread)
  271. cur_thread = get_cur_thread();
  272. void * rsp;
  273. __asm__ volatile ("movq %%rsp, %0" : "=r"(rsp) :: "memory");
  274. if (rsp <= cur_thread->stack_top && rsp > cur_thread->stack)
  275. return size < rsp - cur_thread->stack;
  276. return false;
  277. }
  278. static inline __attribute__((always_inline))
  279. bool check_on_stack (struct shim_thread * cur_thread, void * mem)
  280. {
  281. if (!cur_thread)
  282. cur_thread = get_cur_thread();
  283. return (mem <= cur_thread->stack_top && mem > cur_thread->stack);
  284. }
  285. int init_stack (const char ** argv, const char ** envp,
  286. int ** argcpp, const char *** argpp,
  287. elf_auxv_t ** auxpp);
  288. #endif /* _SHIM_THREAD_H_ */