shim_thread.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_thread.c
  15. *
  16. * This file contains codes to maintain bookkeeping of threads in library OS.
  17. */
  18. #include <shim_defs.h>
  19. #include <shim_internal.h>
  20. #include <shim_thread.h>
  21. #include <shim_handle.h>
  22. #include <shim_vma.h>
  23. #include <shim_fs.h>
  24. #include <shim_checkpoint.h>
  25. #include <shim_utils.h>
  26. #include <pal.h>
  27. #include <list.h>
  28. #include <linux/signal.h>
  29. static IDTYPE tid_alloc_idx __attribute_migratable = 0;
  30. static LISTP_TYPE(shim_thread) thread_list = LISTP_INIT;
  31. DEFINE_LISTP(shim_simple_thread);
  32. static LISTP_TYPE(shim_simple_thread) simple_thread_list = LISTP_INIT;
  33. struct shim_lock thread_list_lock;
  34. static IDTYPE internal_tid_alloc_idx = INTERNAL_TID_BASE;
  35. PAL_HANDLE thread_start_event = NULL;
  36. //#define DEBUG_REF
  37. int init_thread (void)
  38. {
  39. create_lock(&thread_list_lock);
  40. struct shim_thread * cur_thread = get_cur_thread();
  41. if (cur_thread)
  42. return 0;
  43. if (!(cur_thread = get_new_thread(0)))
  44. return -ENOMEM;
  45. cur_thread->in_vm = cur_thread->is_alive = true;
  46. set_cur_thread(cur_thread);
  47. add_thread(cur_thread);
  48. cur_thread->pal_handle = PAL_CB(first_thread);
  49. return 0;
  50. }
  51. void dump_threads (void)
  52. {
  53. struct shim_thread * tmp;
  54. lock(&thread_list_lock);
  55. LISTP_FOR_EACH_ENTRY(tmp, &thread_list, list) {
  56. debug("thread %d, vmid = %d, pgid = %d, ppid = %d, tgid = %d, in_vm = %d\n",
  57. tmp->tid, tmp->vmid, tmp->pgid, tmp->ppid, tmp->tgid, tmp->in_vm);
  58. }
  59. unlock(&thread_list_lock);
  60. }
  61. static struct shim_thread* __lookup_thread(IDTYPE tid) {
  62. assert(locked(&thread_list_lock));
  63. struct shim_thread* tmp;
  64. LISTP_FOR_EACH_ENTRY(tmp, &thread_list, list) {
  65. if (tmp->tid == tid) {
  66. get_thread(tmp);
  67. return tmp;
  68. }
  69. }
  70. return NULL;
  71. }
  72. struct shim_thread* lookup_thread(IDTYPE tid) {
  73. lock(&thread_list_lock);
  74. struct shim_thread* thread = __lookup_thread(tid);
  75. unlock(&thread_list_lock);
  76. return thread;
  77. }
  78. IDTYPE get_pid (void)
  79. {
  80. IDTYPE idx;
  81. while (1) {
  82. IDTYPE old_idx = tid_alloc_idx;
  83. IDTYPE max = 0;
  84. idx = old_idx + 1;
  85. do {
  86. if ((idx = allocate_pid(idx, max)))
  87. break;
  88. tid_alloc_idx = idx;
  89. if (!idx) {
  90. if (max == old_idx)
  91. break;
  92. max = old_idx;
  93. }
  94. } while (idx != tid_alloc_idx);
  95. if (idx != tid_alloc_idx)
  96. break;
  97. if (ipc_pid_lease_send(NULL) < 0)
  98. return 0;
  99. }
  100. tid_alloc_idx = idx;
  101. return idx;
  102. }
  103. static IDTYPE get_internal_pid (void)
  104. {
  105. lock(&thread_list_lock);
  106. internal_tid_alloc_idx++;
  107. IDTYPE idx = internal_tid_alloc_idx;
  108. unlock(&thread_list_lock);
  109. assert(is_internal_tid(idx));
  110. return idx;
  111. }
  112. struct shim_thread * alloc_new_thread (void)
  113. {
  114. struct shim_thread * thread = calloc(1, sizeof(struct shim_thread));
  115. if (!thread)
  116. return NULL;
  117. REF_SET(thread->ref_count, 1);
  118. INIT_LISTP(&thread->children);
  119. INIT_LIST_HEAD(thread, siblings);
  120. INIT_LISTP(&thread->exited_children);
  121. INIT_LIST_HEAD(thread, list);
  122. /* default value as sigalt stack isn't specified yet */
  123. thread->signal_altstack.ss_flags = SS_DISABLE;
  124. return thread;
  125. }
  126. struct shim_thread * get_new_thread (IDTYPE new_tid)
  127. {
  128. if (!new_tid) {
  129. new_tid = get_pid();
  130. assert(new_tid);
  131. }
  132. struct shim_thread * thread = alloc_new_thread();
  133. if (!thread)
  134. return NULL;
  135. struct shim_thread * cur_thread = get_cur_thread();
  136. thread->tid = new_tid;
  137. if (cur_thread) {
  138. /* The newly created thread will be in the same thread group
  139. (process group as well) with its parent */
  140. thread->pgid = cur_thread->pgid;
  141. thread->ppid = cur_thread->tgid;
  142. thread->tgid = cur_thread->tgid;
  143. thread->uid = cur_thread->uid;
  144. thread->gid = cur_thread->gid;
  145. thread->euid = cur_thread->euid;
  146. thread->egid = cur_thread->egid;
  147. thread->parent = cur_thread;
  148. thread->stack = cur_thread->stack;
  149. thread->stack_top = cur_thread->stack_top;
  150. thread->stack_red = cur_thread->stack_red;
  151. thread->cwd = cur_thread->cwd;
  152. thread->root = cur_thread->root;
  153. thread->umask = cur_thread->umask;
  154. thread->exec = cur_thread->exec;
  155. get_handle(cur_thread->exec);
  156. for (int i = 0 ; i < NUM_SIGS ; i++) {
  157. if (!cur_thread->signal_handles[i].action)
  158. continue;
  159. thread->signal_handles[i].action =
  160. malloc_copy(cur_thread->signal_handles[i].action,
  161. sizeof(*thread->signal_handles[i].action));
  162. }
  163. memcpy(&thread->signal_mask, &cur_thread->signal_mask,
  164. sizeof(sigset_t));
  165. get_dentry(cur_thread->cwd);
  166. get_dentry(cur_thread->root);
  167. struct shim_handle_map * map = get_cur_handle_map(cur_thread);
  168. assert(map);
  169. set_handle_map(thread, map);
  170. } else {
  171. /* default pid and pgid equals to tid */
  172. thread->ppid = thread->pgid = thread->tgid = new_tid;
  173. /* This case should fall back to the global root of the file system.
  174. */
  175. path_lookupat(NULL, "/", 0, &thread->root, NULL);
  176. char dir_cfg[CONFIG_MAX];
  177. if (root_config &&
  178. get_config(root_config, "fs.start_dir", dir_cfg, sizeof(dir_cfg)) > 0) {
  179. path_lookupat(NULL, dir_cfg, 0, &thread->cwd, NULL);
  180. } else if (thread->root) {
  181. get_dentry(thread->root);
  182. thread->cwd = thread->root;
  183. }
  184. }
  185. thread->signal_logs = malloc(sizeof(struct shim_signal_log) *
  186. NUM_SIGS);
  187. thread->vmid = cur_process.vmid;
  188. create_lock(&thread->lock);
  189. thread->scheduler_event = DkNotificationEventCreate(PAL_TRUE);
  190. thread->exit_event = DkNotificationEventCreate(PAL_FALSE);
  191. thread->child_exit_event = DkNotificationEventCreate(PAL_FALSE);
  192. return thread;
  193. }
  194. struct shim_thread * get_new_internal_thread (void)
  195. {
  196. IDTYPE new_tid = get_internal_pid();
  197. assert(new_tid);
  198. struct shim_thread * thread = alloc_new_thread();
  199. if (!thread)
  200. return NULL;
  201. thread->vmid = cur_process.vmid;
  202. thread->tid = new_tid;
  203. thread->in_vm = thread->is_alive = true;
  204. create_lock(&thread->lock);
  205. thread->exit_event = DkNotificationEventCreate(PAL_FALSE);
  206. return thread;
  207. }
  208. struct shim_simple_thread * __lookup_simple_thread (IDTYPE tid)
  209. {
  210. assert(locked(&thread_list_lock));
  211. struct shim_simple_thread * tmp;
  212. LISTP_FOR_EACH_ENTRY(tmp, &simple_thread_list, list) {
  213. if (tmp->tid == tid) {
  214. get_simple_thread(tmp);
  215. return tmp;
  216. }
  217. }
  218. return NULL;
  219. }
  220. struct shim_simple_thread * lookup_simple_thread (IDTYPE tid)
  221. {
  222. lock(&thread_list_lock);
  223. struct shim_simple_thread * thread = __lookup_simple_thread(tid);
  224. unlock(&thread_list_lock);
  225. return thread;
  226. }
  227. struct shim_simple_thread * get_new_simple_thread (void)
  228. {
  229. struct shim_simple_thread * thread =
  230. malloc(sizeof(struct shim_simple_thread));
  231. if (!thread)
  232. return NULL;
  233. memset(thread, 0, sizeof(struct shim_simple_thread));
  234. INIT_LIST_HEAD(thread, list);
  235. create_lock(&thread->lock);
  236. thread->exit_event = DkNotificationEventCreate(PAL_FALSE);
  237. return thread;
  238. }
  239. void get_thread (struct shim_thread * thread)
  240. {
  241. #ifdef DEBUG_REF
  242. int ref_count = REF_INC(thread->ref_count);
  243. debug("get_thread %p(%d) (ref_count = %d)\n", thread, thread->tid,
  244. ref_count);
  245. #else
  246. REF_INC(thread->ref_count);
  247. #endif
  248. }
  249. void put_thread (struct shim_thread * thread)
  250. {
  251. int ref_count = REF_DEC(thread->ref_count);
  252. #ifdef DEBUG_REF
  253. debug("put_thread %p(%d) (ref_count = %d)\n", thread, thread->tid,
  254. ref_count);
  255. #endif
  256. if (!ref_count) {
  257. if (thread->exec)
  258. put_handle(thread->exec);
  259. if (!is_internal(thread))
  260. release_pid(thread->tid);
  261. if (thread->pal_handle &&
  262. thread->pal_handle != PAL_CB(first_thread))
  263. DkObjectClose(thread->pal_handle);
  264. if (thread->scheduler_event)
  265. DkObjectClose(thread->scheduler_event);
  266. if (thread->exit_event)
  267. DkObjectClose(thread->exit_event);
  268. if (thread->child_exit_event)
  269. DkObjectClose(thread->child_exit_event);
  270. destroy_lock(&thread->lock);
  271. free(thread->signal_logs);
  272. free(thread);
  273. }
  274. }
  275. void get_simple_thread (struct shim_simple_thread * thread)
  276. {
  277. REF_INC(thread->ref_count);
  278. }
  279. void put_simple_thread (struct shim_simple_thread * thread)
  280. {
  281. assert(locked(&thread_list_lock));
  282. int ref_count = REF_DEC(thread->ref_count);
  283. if (!ref_count) {
  284. /* Simple threads always live on the simple thread list */
  285. LISTP_DEL(thread, &simple_thread_list, list);
  286. if (thread->exit_event)
  287. DkObjectClose(thread->exit_event);
  288. destroy_lock(&thread->lock);
  289. free(thread);
  290. }
  291. }
  292. void set_as_child (struct shim_thread * parent,
  293. struct shim_thread * child)
  294. {
  295. if (!parent)
  296. parent = get_cur_thread();
  297. get_thread(parent);
  298. get_thread(child);
  299. lock(&child->lock);
  300. child->ppid = parent->tid;
  301. child->parent = parent;
  302. lock(&parent->lock);
  303. LISTP_ADD_TAIL(child, &parent->children, siblings);
  304. unlock(&parent->lock);
  305. unlock(&child->lock);
  306. }
  307. void add_thread (struct shim_thread * thread)
  308. {
  309. if (is_internal(thread) || !LIST_EMPTY(thread, list))
  310. return;
  311. struct shim_thread * tmp, * prev = NULL;
  312. lock(&thread_list_lock);
  313. /* keep it sorted */
  314. LISTP_FOR_EACH_ENTRY_REVERSE(tmp, &thread_list, list) {
  315. if (tmp->tid == thread->tid) {
  316. unlock(&thread_list_lock);
  317. return;
  318. }
  319. if (tmp->tid < thread->tid) {
  320. prev = tmp;
  321. break;
  322. }
  323. }
  324. get_thread(thread);
  325. LISTP_ADD_AFTER(thread, prev, &thread_list, list);
  326. unlock(&thread_list_lock);
  327. }
  328. void del_thread (struct shim_thread * thread)
  329. {
  330. debug("del_thread(%p, %d, %ld)\n", thread, thread ? (int) thread->tid : -1,
  331. atomic_read(&thread->ref_count));
  332. if (is_internal(thread) || LIST_EMPTY(thread, list)) {
  333. debug("del_thread: internal\n");
  334. return;
  335. }
  336. lock(&thread_list_lock);
  337. /* thread->list goes on the thread_list */
  338. LISTP_DEL_INIT(thread, &thread_list, list);
  339. unlock(&thread_list_lock);
  340. put_thread(thread);
  341. }
  342. void add_simple_thread (struct shim_simple_thread * thread)
  343. {
  344. if (!LIST_EMPTY(thread, list))
  345. return;
  346. struct shim_simple_thread * tmp, * prev = NULL;
  347. lock(&thread_list_lock);
  348. /* keep it sorted */
  349. LISTP_FOR_EACH_ENTRY_REVERSE(tmp, &simple_thread_list, list) {
  350. if (tmp->tid == thread->tid) {
  351. unlock(&thread_list_lock);
  352. return;
  353. }
  354. if (tmp->tid < thread->tid) {
  355. prev = tmp;
  356. break;
  357. }
  358. }
  359. get_simple_thread(thread);
  360. LISTP_ADD_AFTER(thread, prev, &simple_thread_list, list);
  361. unlock(&thread_list_lock);
  362. }
  363. void del_simple_thread (struct shim_simple_thread * thread)
  364. {
  365. if (LIST_EMPTY(thread, list))
  366. return;
  367. lock(&thread_list_lock);
  368. LISTP_DEL_INIT(thread, &simple_thread_list, list);
  369. unlock(&thread_list_lock);
  370. put_simple_thread(thread);
  371. }
  372. static int _check_last_thread(struct shim_thread* self) {
  373. assert(locked(&thread_list_lock));
  374. IDTYPE self_tid = self ? self->tid : 0;
  375. struct shim_thread* thread;
  376. LISTP_FOR_EACH_ENTRY(thread, &thread_list, list) {
  377. if (thread->tid && thread->tid != self_tid && thread->in_vm && thread->is_alive) {
  378. return thread->tid;
  379. }
  380. }
  381. return 0;
  382. }
  383. /* Checks for any alive threads apart from thread self. Returns tid of the first found alive thread
  384. * or 0 if there are no alive threads. self can be NULL, then all threads are checked. */
  385. int check_last_thread(struct shim_thread* self) {
  386. lock(&thread_list_lock);
  387. int alive_thread_tid = _check_last_thread(self);
  388. unlock(&thread_list_lock);
  389. return alive_thread_tid;
  390. }
  391. /* This function is called by Async Helper thread to wait on thread->clear_child_tid_pal to be
  392. * zeroed (PAL does it when thread finally exits). Since it is a callback to Async Helper thread,
  393. * this function must follow the `void (*callback) (IDTYPE caller, void* arg)` signature. */
  394. void cleanup_thread(IDTYPE caller, void* arg) {
  395. __UNUSED(caller);
  396. struct shim_thread* thread = (struct shim_thread*)arg;
  397. assert(thread);
  398. int exit_code = thread->term_signal ? : thread->exit_code;
  399. /* wait on clear_child_tid_pal; this signals that PAL layer exited child thread */
  400. while (__atomic_load_n(&thread->clear_child_tid_pal, __ATOMIC_RELAXED) != 0) {
  401. __asm__ volatile ("pause");
  402. }
  403. /* notify parent if any */
  404. release_clear_child_tid(thread->clear_child_tid);
  405. /* clean up the thread itself */
  406. lock(&thread_list_lock);
  407. thread->is_alive = false;
  408. LISTP_DEL_INIT(thread, &thread_list, list);
  409. put_thread(thread);
  410. if (!_check_last_thread(NULL)) {
  411. /* corner case when all application threads exited via exit(), only Async helper
  412. * and IPC helper threads are left at this point so simply exit process (recall
  413. * that typically processes exit via exit_group()) */
  414. unlock(&thread_list_lock);
  415. shim_clean_and_exit(exit_code);
  416. }
  417. unlock(&thread_list_lock);
  418. }
  419. int walk_thread_list (int (*callback) (struct shim_thread *, void *, bool *),
  420. void * arg)
  421. {
  422. struct shim_thread * tmp, * n;
  423. bool srched = false;
  424. int ret;
  425. IDTYPE min_tid = 0;
  426. relock:
  427. lock(&thread_list_lock);
  428. debug("walk_thread_list(callback=%p)\n", callback);
  429. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &thread_list, list) {
  430. if (tmp->tid <= min_tid)
  431. continue;
  432. bool unlocked = false;
  433. ret = (*callback) (tmp, arg, &unlocked);
  434. if (ret < 0 && ret != -ESRCH) {
  435. if (unlocked)
  436. goto out;
  437. else
  438. goto out_locked;
  439. }
  440. if (ret > 0)
  441. srched = true;
  442. if (unlocked) {
  443. min_tid = tmp->tid;
  444. goto relock;
  445. }
  446. }
  447. ret = srched ? 0 : -ESRCH;
  448. out_locked:
  449. unlock(&thread_list_lock);
  450. out:
  451. return ret;
  452. }
  453. int walk_simple_thread_list (int (*callback) (struct shim_simple_thread *,
  454. void *, bool *),
  455. void * arg)
  456. {
  457. struct shim_simple_thread * tmp, * n;
  458. bool srched = false;
  459. int ret;
  460. IDTYPE min_tid = 0;
  461. relock:
  462. lock(&thread_list_lock);
  463. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &simple_thread_list, list) {
  464. if (tmp->tid <= min_tid)
  465. continue;
  466. bool unlocked = false;
  467. ret = (*callback) (tmp, arg, &unlocked);
  468. if (ret < 0 && ret != -ESRCH) {
  469. if (unlocked)
  470. goto out;
  471. else
  472. goto out_locked;
  473. }
  474. if (ret > 0)
  475. srched = true;
  476. if (unlocked) {
  477. min_tid = tmp->tid;
  478. goto relock;
  479. }
  480. }
  481. ret = srched ? 0 : -ESRCH;
  482. out_locked:
  483. unlock(&thread_list_lock);
  484. out:
  485. return ret;
  486. }
  487. #ifndef ALIAS_VFORK_AS_FORK
  488. void switch_dummy_thread (struct shim_thread * thread)
  489. {
  490. struct shim_thread * real_thread = thread->dummy;
  491. IDTYPE child = thread->tid;
  492. assert(thread->frameptr);
  493. assert(real_thread->stack);
  494. assert(real_thread->stack_top > real_thread->stack);
  495. memcpy(thread->frameptr, real_thread->stack,
  496. real_thread->stack_top - real_thread->stack);
  497. real_thread->stack = thread->stack;
  498. real_thread->stack_top = thread->stack_top;
  499. real_thread->frameptr = thread->frameptr;
  500. DkSegmentRegister(PAL_SEGMENT_FS, real_thread->tcb);
  501. set_cur_thread(real_thread);
  502. debug("set tcb to %p\n", real_thread->tcb);
  503. debug("jump to the stack %p\n", real_thread->frameptr);
  504. debug("shim_vfork success (returning %d)\n", child);
  505. /* jump onto old stack
  506. we actually pop rbp as rsp, and later we will call 'ret' */
  507. __asm__ volatile("movq %0, %%rbp\r\n"
  508. "leaveq\r\n"
  509. "retq\r\n" :
  510. : "g"(real_thread->frameptr),
  511. "a"(child)
  512. : "memory");
  513. }
  514. #endif
  515. BEGIN_CP_FUNC(thread)
  516. {
  517. __UNUSED(size);
  518. assert(size == sizeof(struct shim_thread));
  519. struct shim_thread * thread = (struct shim_thread *) obj;
  520. struct shim_thread * new_thread = NULL;
  521. ptr_t off = GET_FROM_CP_MAP(obj);
  522. if (!off) {
  523. off = ADD_CP_OFFSET(sizeof(struct shim_thread));
  524. ADD_TO_CP_MAP(obj, off);
  525. new_thread = (struct shim_thread *) (base + off);
  526. memcpy(new_thread, thread, sizeof(struct shim_thread));
  527. INIT_LISTP(&new_thread->children);
  528. INIT_LIST_HEAD(new_thread, siblings);
  529. INIT_LISTP(&new_thread->exited_children);
  530. INIT_LIST_HEAD(new_thread, list);
  531. new_thread->in_vm = false;
  532. new_thread->parent = NULL;
  533. #ifndef ALIAS_VFORK_AS_FORK
  534. new_thread->dummy = NULL;
  535. #endif
  536. new_thread->handle_map = NULL;
  537. new_thread->root = NULL;
  538. new_thread->cwd = NULL;
  539. new_thread->signal_logs = NULL;
  540. new_thread->robust_list = NULL;
  541. REF_SET(new_thread->ref_count, 0);
  542. for (int i = 0 ; i < NUM_SIGS ; i++)
  543. if (thread->signal_handles[i].action) {
  544. ptr_t soff = ADD_CP_OFFSET(sizeof(struct __kernel_sigaction));
  545. new_thread->signal_handles[i].action
  546. = (struct __kernel_sigaction *) (base + soff);
  547. memcpy(new_thread->signal_handles[i].action,
  548. thread->signal_handles[i].action,
  549. sizeof(struct __kernel_sigaction));
  550. }
  551. DO_CP_MEMBER(handle, thread, new_thread, exec);
  552. DO_CP_MEMBER(handle_map, thread, new_thread, handle_map);
  553. DO_CP_MEMBER(dentry, thread, new_thread, root);
  554. DO_CP_MEMBER(dentry, thread, new_thread, cwd);
  555. ADD_CP_FUNC_ENTRY(off);
  556. } else {
  557. new_thread = (struct shim_thread *) (base + off);
  558. }
  559. if (objp)
  560. *objp = (void *) new_thread;
  561. }
  562. END_CP_FUNC(thread)
  563. BEGIN_RS_FUNC(thread)
  564. {
  565. struct shim_thread * thread = (void *) (base + GET_CP_FUNC_ENTRY());
  566. __UNUSED(offset);
  567. CP_REBASE(thread->children);
  568. CP_REBASE(thread->siblings);
  569. CP_REBASE(thread->exited_children);
  570. CP_REBASE(thread->list);
  571. CP_REBASE(thread->exec);
  572. CP_REBASE(thread->handle_map);
  573. CP_REBASE(thread->root);
  574. CP_REBASE(thread->cwd);
  575. CP_REBASE(thread->signal_handles);
  576. create_lock(&thread->lock);
  577. thread->scheduler_event = DkNotificationEventCreate(PAL_TRUE);
  578. thread->exit_event = DkNotificationEventCreate(PAL_FALSE);
  579. thread->child_exit_event = DkNotificationEventCreate(PAL_FALSE);
  580. add_thread(thread);
  581. if (thread->exec)
  582. get_handle(thread->exec);
  583. if (thread->handle_map)
  584. get_handle_map(thread->handle_map);
  585. if (thread->root)
  586. get_dentry(thread->root);
  587. if (thread->cwd)
  588. get_dentry(thread->cwd);
  589. DEBUG_RS("tid=%d,tgid=%d,parent=%d,stack=%p,frameptr=%p,tcb=%p,shim_tcb=%p",
  590. thread->tid, thread->tgid,
  591. thread->parent ? thread->parent->tid : thread->tid,
  592. thread->stack, thread->frameptr, thread->tcb, thread->shim_tcb);
  593. }
  594. END_RS_FUNC(thread)
  595. BEGIN_CP_FUNC(running_thread)
  596. {
  597. __UNUSED(size);
  598. __UNUSED(objp);
  599. assert(size == sizeof(struct shim_thread));
  600. struct shim_thread * thread = (struct shim_thread *) obj;
  601. struct shim_thread * new_thread = NULL;
  602. DO_CP(thread, thread, &new_thread);
  603. ADD_CP_FUNC_ENTRY((ptr_t) new_thread - base);
  604. if (thread->shim_tcb) {
  605. ptr_t toff = ADD_CP_OFFSET(sizeof(shim_tcb_t));
  606. new_thread->shim_tcb = (void *)(base + toff);
  607. struct shim_tcb* new_tcb = new_thread->shim_tcb;
  608. memcpy(new_tcb, thread->shim_tcb, sizeof(*new_tcb));
  609. /* don't export stale pointers */
  610. new_tcb->self = NULL;
  611. new_tcb->tp = NULL;
  612. new_tcb->context.next = NULL;
  613. new_tcb->debug_buf = NULL;
  614. }
  615. }
  616. END_CP_FUNC(running_thread)
  617. static int resume_wrapper (void * param)
  618. {
  619. struct shim_thread * thread = (struct shim_thread *) param;
  620. assert(thread);
  621. /* initialize the current shim_tcb_t (= shim_get_tcb())
  622. based on saved thread->shim_tcb */
  623. shim_tcb_init();
  624. shim_tcb_t* saved_tcb = thread->shim_tcb;
  625. assert(saved_tcb->context.regs && saved_tcb->context.regs->rsp);
  626. set_cur_thread(thread);
  627. unsigned long fs_base = saved_tcb->context.fs_base;
  628. assert(fs_base);
  629. update_fs_base(fs_base);
  630. thread->in_vm = thread->is_alive = true;
  631. shim_tcb_t* tcb = shim_get_tcb();
  632. tcb->context.regs = saved_tcb->context.regs;
  633. tcb->context.enter_time = saved_tcb->context.enter_time;
  634. tcb->context.preempt = saved_tcb->context.preempt;
  635. debug_setbuf(tcb, false);
  636. debug("set fs_base to 0x%lx\n", fs_base);
  637. object_wait_with_retry(thread_start_event);
  638. restore_context(&tcb->context);
  639. return 0;
  640. }
  641. BEGIN_RS_FUNC(running_thread)
  642. {
  643. __UNUSED(offset);
  644. struct shim_thread * thread = (void *) (base + GET_CP_FUNC_ENTRY());
  645. struct shim_thread * cur_thread = get_cur_thread();
  646. thread->in_vm = true;
  647. thread->vmid = cur_process.vmid;
  648. if (thread->shim_tcb)
  649. CP_REBASE(thread->shim_tcb);
  650. if (thread->set_child_tid) {
  651. /* CLONE_CHILD_SETTID */
  652. *thread->set_child_tid = thread->tid;
  653. thread->set_child_tid = NULL;
  654. }
  655. thread->signal_logs = malloc(sizeof(struct shim_signal_log) *
  656. NUM_SIGS);
  657. if (cur_thread) {
  658. PAL_HANDLE handle = DkThreadCreate(resume_wrapper, thread);
  659. if (!thread)
  660. return -PAL_ERRNO;
  661. thread->pal_handle = handle;
  662. } else {
  663. shim_tcb_t* saved_tcb = thread->shim_tcb;
  664. if (saved_tcb) {
  665. /* fork case */
  666. shim_tcb_t* tcb = shim_get_tcb();
  667. memcpy(tcb, saved_tcb, sizeof(*tcb));
  668. __shim_tcb_init(tcb);
  669. set_cur_thread(thread);
  670. assert(tcb->context.regs && tcb->context.regs->rsp);
  671. update_fs_base(tcb->context.fs_base);
  672. /* Temporarily disable preemption until the thread resumes. */
  673. __disable_preempt(tcb);
  674. debug_setbuf(tcb, false);
  675. debug("after resume, set tcb to 0x%lx\n", tcb->context.fs_base);
  676. } else {
  677. /*
  678. * In execve case, the following holds:
  679. * stack = NULL
  680. * stack_top = NULL
  681. * frameptr = NULL
  682. * tcb = NULL
  683. * shim_tcb = NULL
  684. * in_vm = false
  685. */
  686. set_cur_thread(thread);
  687. debug_setbuf(thread->shim_tcb, false);
  688. }
  689. thread->in_vm = thread->is_alive = true;
  690. thread->pal_handle = PAL_CB(first_thread);
  691. }
  692. DEBUG_RS("tid=%d", thread->tid);
  693. }
  694. END_RS_FUNC(running_thread)
  695. BEGIN_CP_FUNC(all_running_threads)
  696. {
  697. __UNUSED(obj);
  698. __UNUSED(size);
  699. __UNUSED(objp);
  700. struct shim_thread * thread;
  701. lock(&thread_list_lock);
  702. LISTP_FOR_EACH_ENTRY(thread, &thread_list, list) {
  703. if (!thread->in_vm || !thread->is_alive)
  704. continue;
  705. DO_CP(running_thread, thread, NULL);
  706. DO_CP(handle_map, thread->handle_map, NULL);
  707. }
  708. unlock(&thread_list_lock);
  709. }
  710. END_CP_FUNC_NO_RS(all_running_threads)