shim_thread.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_thread.c
  17. *
  18. * This file contains codes to maintain bookkeeping of threads in library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_vma.h>
  24. #include <shim_fs.h>
  25. #include <shim_checkpoint.h>
  26. #include <pal.h>
  27. #include <linux_list.h>
  28. #define THREAD_MGR_ALLOC 4
  29. static LOCKTYPE thread_mgr_lock;
  30. #define system_lock() lock(thread_mgr_lock)
  31. #define system_unlock() unlock(thread_mgr_lock)
  32. #define PAGE_SIZE allocsize
  33. #define OBJ_TYPE struct shim_thread
  34. #include <memmgr.h>
  35. static MEM_MGR thread_mgr = NULL;
  36. static IDTYPE tid_alloc_idx __attribute_migratable = 0;
  37. static LIST_HEAD(thread_list);
  38. static LIST_HEAD(simple_thread_list);
  39. LOCKTYPE thread_list_lock;
  40. static IDTYPE internal_tid_alloc_idx = INTERNAL_TID_BASE;
  41. PAL_HANDLE thread_start_event = NULL;
  42. //#define DEBUG_REF
  43. int init_thread (void)
  44. {
  45. create_lock(thread_list_lock);
  46. create_lock(thread_mgr_lock);
  47. thread_mgr = create_mem_mgr(init_align_up(THREAD_MGR_ALLOC));
  48. if (!thread_mgr)
  49. return -ENOMEM;
  50. struct shim_thread * cur_thread = get_cur_thread();
  51. if (cur_thread)
  52. return 0;
  53. if (!(cur_thread = get_new_thread(0)))
  54. return -ENOMEM;
  55. cur_thread->in_vm = cur_thread->is_alive = true;
  56. get_thread(cur_thread);
  57. set_cur_thread(cur_thread);
  58. add_thread(cur_thread);
  59. cur_thread->pal_handle = PAL_CB(first_thread);
  60. return 0;
  61. }
  62. struct shim_thread * __lookup_thread (IDTYPE tid)
  63. {
  64. struct shim_thread * tmp;
  65. list_for_each_entry(tmp, &thread_list, list)
  66. if (tmp->tid == tid) {
  67. get_thread(tmp);
  68. return tmp;
  69. }
  70. return NULL;
  71. }
  72. struct shim_thread * lookup_thread (IDTYPE tid)
  73. {
  74. lock(thread_list_lock);
  75. struct shim_thread * thread = __lookup_thread(tid);
  76. unlock(thread_list_lock);
  77. return thread;
  78. }
  79. struct shim_thread * __get_cur_thread (void)
  80. {
  81. return SHIM_THREAD_SELF();
  82. }
  83. shim_tcb_t * __get_cur_tcb (void)
  84. {
  85. return SHIM_GET_TLS();
  86. }
  87. static IDTYPE get_pid (void)
  88. {
  89. IDTYPE idx;
  90. while (1) {
  91. IDTYPE old_idx = tid_alloc_idx;
  92. IDTYPE max = 0;
  93. idx = old_idx + 1;
  94. do {
  95. if ((idx = allocate_pid(idx, max)))
  96. break;
  97. tid_alloc_idx = idx;
  98. if (!idx) {
  99. if (max == old_idx)
  100. break;
  101. max = old_idx;
  102. }
  103. } while (idx != tid_alloc_idx);
  104. if (idx != tid_alloc_idx)
  105. break;
  106. if (ipc_pid_lease_send(NULL) < 0)
  107. return 0;
  108. }
  109. tid_alloc_idx = idx;
  110. return idx;
  111. }
  112. static IDTYPE get_internal_pid (void)
  113. {
  114. lock(thread_list_lock);
  115. internal_tid_alloc_idx++;
  116. IDTYPE idx = internal_tid_alloc_idx;
  117. unlock(thread_list_lock);
  118. return idx;
  119. }
  120. static inline int init_mem_mgr (void)
  121. {
  122. if (thread_mgr)
  123. return 0;
  124. MEM_MGR mgr = create_mem_mgr(init_align_up(THREAD_MGR_ALLOC));
  125. MEM_MGR old_mgr = NULL;
  126. lock(thread_mgr_lock);
  127. if (mgr) {
  128. if (thread_mgr) {
  129. old_mgr = mgr;
  130. mgr = thread_mgr;
  131. } else {
  132. thread_mgr = mgr;
  133. }
  134. }
  135. unlock(thread_mgr_lock);
  136. if (old_mgr)
  137. destroy_mem_mgr(old_mgr);
  138. return mgr ? 0 : -ENOMEM;
  139. }
  140. struct shim_thread * alloc_new_thread (void)
  141. {
  142. struct shim_thread * thread =
  143. get_mem_obj_from_mgr_enlarge(thread_mgr,
  144. size_align_up(THREAD_MGR_ALLOC));
  145. if (!thread)
  146. return NULL;
  147. memset(thread, 0, sizeof(struct shim_thread));
  148. REF_SET(thread->ref_count, 1);
  149. INIT_LIST_HEAD(&thread->children);
  150. INIT_LIST_HEAD(&thread->siblings);
  151. INIT_LIST_HEAD(&thread->exited_children);
  152. INIT_LIST_HEAD(&thread->list);
  153. return thread;
  154. }
  155. struct shim_thread * get_new_thread (IDTYPE new_tid)
  156. {
  157. if (init_mem_mgr() < 0)
  158. return NULL;
  159. if (!new_tid) {
  160. new_tid = get_pid();
  161. assert(new_tid);
  162. }
  163. struct shim_thread * thread = alloc_new_thread();
  164. if (!thread)
  165. return NULL;
  166. struct shim_thread * cur_thread = get_cur_thread();
  167. thread->tid = new_tid;
  168. if (cur_thread) {
  169. /* The newly created thread will be in the same thread group
  170. (process group as well) with its parent */
  171. thread->pgid = cur_thread->pgid;
  172. thread->ppid = cur_thread->tgid;
  173. thread->tgid = cur_thread->tgid;
  174. thread->uid = cur_thread->uid;
  175. thread->gid = cur_thread->gid;
  176. thread->euid = cur_thread->euid;
  177. thread->egid = cur_thread->egid;
  178. thread->parent = cur_thread;
  179. thread->stack = cur_thread->stack;
  180. thread->stack_top = cur_thread->stack_top;
  181. thread->cwd = cur_thread->cwd;
  182. thread->root = cur_thread->root;
  183. thread->umask = cur_thread->umask;
  184. thread->exec = cur_thread->exec;
  185. get_handle(cur_thread->exec);
  186. for (int i = 0 ; i < NUM_SIGS ; i++) {
  187. if (!cur_thread->signal_handles[i].action)
  188. continue;
  189. thread->signal_handles[i].action =
  190. remalloc(cur_thread->signal_handles[i].action,
  191. sizeof(struct shim_signal_handle));
  192. }
  193. memcpy(&thread->signal_mask, &cur_thread->signal_mask,
  194. sizeof(sigset_t));
  195. get_dentry(cur_thread->cwd);
  196. get_dentry(cur_thread->root);
  197. struct shim_handle_map * map = get_cur_handle_map(cur_thread);
  198. assert(map);
  199. set_handle_map(thread, map);
  200. } else {
  201. /* default pid and pgid equals to tid */
  202. thread->ppid = thread->pgid = thread->tgid = new_tid;
  203. path_lookupat(NULL, "/", 0, &thread->root);
  204. char dir_cfg[CONFIG_MAX];
  205. if (root_config &&
  206. get_config(root_config, "fs.start_dir", dir_cfg, CONFIG_MAX) > 0) {
  207. path_lookupat(NULL, dir_cfg, 0, &thread->cwd);
  208. } else if (thread->root) {
  209. get_dentry(thread->root);
  210. thread->cwd = thread->root;
  211. }
  212. }
  213. thread->vmid = cur_process.vmid;
  214. create_lock(thread->lock);
  215. thread->scheduler_event = DkNotificationEventCreate(1);
  216. thread->exit_event = DkNotificationEventCreate(0);
  217. thread->child_exit_event = DkNotificationEventCreate(0);
  218. return thread;
  219. }
  220. struct shim_thread * get_new_internal_thread (void)
  221. {
  222. if (init_mem_mgr() < 0)
  223. return NULL;
  224. IDTYPE new_tid = get_internal_pid();
  225. assert(new_tid);
  226. struct shim_thread * thread = alloc_new_thread();
  227. if (!thread)
  228. return NULL;
  229. thread->vmid = cur_process.vmid;
  230. thread->tid = new_tid;
  231. thread->in_vm = thread->is_alive = true;
  232. create_lock(thread->lock);
  233. thread->exit_event = DkNotificationEventCreate(0);
  234. return thread;
  235. }
  236. struct shim_simple_thread * __lookup_simple_thread (IDTYPE tid)
  237. {
  238. struct shim_simple_thread * tmp;
  239. list_for_each_entry(tmp, &simple_thread_list, list)
  240. if (tmp->tid == tid) {
  241. get_simple_thread(tmp);
  242. return tmp;
  243. }
  244. return NULL;
  245. }
  246. struct shim_simple_thread * lookup_simple_thread (IDTYPE tid)
  247. {
  248. lock(thread_list_lock);
  249. struct shim_simple_thread * thread = __lookup_simple_thread(tid);
  250. unlock(thread_list_lock);
  251. return thread;
  252. }
  253. struct shim_simple_thread * get_new_simple_thread (void)
  254. {
  255. struct shim_simple_thread * thread =
  256. malloc(sizeof(struct shim_simple_thread));
  257. if (!thread)
  258. return NULL;
  259. memset(thread, 0, sizeof(struct shim_simple_thread));
  260. INIT_LIST_HEAD(&thread->list);
  261. create_lock(thread->lock);
  262. thread->exit_event = DkNotificationEventCreate(0);
  263. return thread;
  264. }
  265. void get_thread (struct shim_thread * thread)
  266. {
  267. #ifdef DEBUG_REF
  268. int ref_count = REF_INC(thread->ref_count);
  269. debug("get_thread %p(%d) (ref_count = %d)\n", thread, thread->tid,
  270. ref_count);
  271. #else
  272. REF_INC(thread->ref_count);
  273. #endif
  274. }
  275. void put_thread (struct shim_thread * thread)
  276. {
  277. int ref_count = REF_DEC(thread->ref_count);
  278. #ifdef DEBUG_REF
  279. debug("put thread %p(%d) (ref_count = %d)\n", thread, thread->tid,
  280. ref_count);
  281. #endif
  282. if (!ref_count) {
  283. if (thread->exec)
  284. put_handle(thread->exec);
  285. if (!IS_INTERNAL(thread))
  286. release_pid(thread->tid);
  287. if (MEMORY_MIGRATED(thread))
  288. memset(thread, 0, sizeof(struct shim_thread));
  289. else
  290. free_mem_obj_to_mgr(thread_mgr, thread);
  291. }
  292. }
  293. void get_simple_thread (struct shim_simple_thread * thread)
  294. {
  295. REF_INC(thread->ref_count);
  296. }
  297. void put_simple_thread (struct shim_simple_thread * thread)
  298. {
  299. int ref_count = REF_DEC(thread->ref_count);
  300. if (!ref_count) {
  301. list_del(&thread->list);
  302. free(thread);
  303. }
  304. }
  305. void set_as_child (struct shim_thread * parent,
  306. struct shim_thread * child)
  307. {
  308. if (!parent)
  309. parent = get_cur_thread();
  310. get_thread(parent);
  311. get_thread(child);
  312. lock(child->lock);
  313. child->ppid = parent->tid;
  314. child->parent = parent;
  315. lock(parent->lock);
  316. list_add_tail(&child->siblings, &parent->children);
  317. unlock(parent->lock);
  318. unlock(child->lock);
  319. }
  320. void add_thread (struct shim_thread * thread)
  321. {
  322. if (IS_INTERNAL(thread) || !list_empty(&thread->list))
  323. return;
  324. struct shim_thread * tmp, * prev = NULL;
  325. lock(thread_list_lock);
  326. /* keep it sorted */
  327. list_for_each_entry_reverse(tmp, &thread_list, list) {
  328. if (tmp->tid == thread->tid) {
  329. unlock(thread_list_lock);
  330. return;
  331. }
  332. if (tmp->tid < thread->tid) {
  333. prev = tmp;
  334. break;
  335. }
  336. }
  337. get_thread(thread);
  338. list_add(&thread->list, prev ? &prev->list : &thread_list);
  339. unlock(thread_list_lock);
  340. }
  341. void del_thread (struct shim_thread * thread)
  342. {
  343. if (IS_INTERNAL(thread) || list_empty(&thread->list))
  344. return;
  345. lock(thread_list_lock);
  346. list_del_init(&thread->list);
  347. unlock(thread_list_lock);
  348. put_thread(thread);
  349. }
  350. void add_simple_thread (struct shim_simple_thread * thread)
  351. {
  352. if (!list_empty(&thread->list))
  353. return;
  354. struct shim_simple_thread * tmp, * prev = NULL;
  355. lock(thread_list_lock);
  356. /* keep it sorted */
  357. list_for_each_entry_reverse(tmp, &simple_thread_list, list) {
  358. if (tmp->tid == thread->tid) {
  359. unlock(thread_list_lock);
  360. return;
  361. }
  362. if (tmp->tid < thread->tid) {
  363. prev = tmp;
  364. break;
  365. }
  366. }
  367. get_simple_thread(thread);
  368. list_add(&thread->list, prev ? &prev->list : &simple_thread_list);
  369. unlock(thread_list_lock);
  370. }
  371. void del_simple_thread (struct shim_simple_thread * thread)
  372. {
  373. if (list_empty(&thread->list))
  374. return;
  375. lock(thread_list_lock);
  376. list_del_init(&thread->list);
  377. unlock(thread_list_lock);
  378. put_simple_thread(thread);
  379. }
  380. int check_last_thread (struct shim_thread * self)
  381. {
  382. struct shim_thread * tmp;
  383. lock(thread_list_lock);
  384. /* find out if there is any thread that is
  385. 1) no current thread 2) in current vm
  386. 3) still alive */
  387. list_for_each_entry(tmp, &thread_list, list)
  388. if (tmp->tid &&
  389. (!self || tmp->tid != self->tid) && tmp->in_vm && tmp->is_alive) {
  390. debug("check_last_thread: thread %d is alive\n", tmp->tid);
  391. unlock(thread_list_lock);
  392. return tmp->tid;
  393. }
  394. debug("this is the only thread\n", self->tid);
  395. unlock(thread_list_lock);
  396. return 0;
  397. }
  398. int walk_thread_list (int (*callback) (struct shim_thread *, void *, bool *),
  399. void * arg, bool may_write)
  400. {
  401. struct shim_thread * tmp, * n;
  402. bool srched = false;
  403. int ret;
  404. IDTYPE min_tid = 0;
  405. relock:
  406. lock(thread_list_lock);
  407. list_for_each_entry_safe(tmp, n, &thread_list, list) {
  408. if (tmp->tid <= min_tid)
  409. continue;
  410. bool unlocked = false;
  411. ret = (*callback) (tmp, arg, &unlocked);
  412. if (ret < 0 && ret != -ESRCH) {
  413. if (unlocked)
  414. goto out;
  415. else
  416. goto out_locked;
  417. }
  418. if (ret > 0)
  419. srched = true;
  420. if (unlocked) {
  421. min_tid = tmp->tid;
  422. goto relock;
  423. }
  424. }
  425. ret = srched ? 0 : -ESRCH;
  426. out_locked:
  427. unlock(thread_list_lock);
  428. out:
  429. return ret;
  430. }
  431. int walk_simple_thread_list (int (*callback) (struct shim_simple_thread *,
  432. void *, bool *),
  433. void * arg, bool may_write)
  434. {
  435. struct shim_simple_thread * tmp, * n;
  436. bool srched = false;
  437. int ret;
  438. IDTYPE min_tid = 0;
  439. relock:
  440. lock(thread_list_lock);
  441. list_for_each_entry_safe(tmp, n, &simple_thread_list, list) {
  442. if (tmp->tid <= min_tid)
  443. continue;
  444. bool unlocked = false;
  445. ret = (*callback) (tmp, arg, &unlocked);
  446. if (ret < 0 && ret != -ESRCH) {
  447. if (unlocked)
  448. goto out;
  449. else
  450. goto out_locked;
  451. }
  452. if (ret > 0)
  453. srched = true;
  454. if (unlocked) {
  455. min_tid = tmp->tid;
  456. goto relock;
  457. }
  458. }
  459. ret = srched ? 0 : -ESRCH;
  460. out_locked:
  461. unlock(thread_list_lock);
  462. out:
  463. return ret;
  464. }
  465. void switch_dummy_thread (struct shim_thread * thread)
  466. {
  467. struct shim_thread * real_thread = thread->dummy;
  468. IDTYPE child = thread->tid;
  469. assert(thread->frameptr);
  470. assert(real_thread->stack);
  471. assert(real_thread->stack_top > real_thread->stack);
  472. memcpy(thread->frameptr, real_thread->stack,
  473. real_thread->stack_top - real_thread->stack);
  474. real_thread->stack = thread->stack;
  475. real_thread->stack_top = thread->stack_top;
  476. real_thread->frameptr = thread->frameptr;
  477. DkThreadPrivate(real_thread->tcb);
  478. set_cur_thread(real_thread);
  479. debug("jump to the stack %p\n", real_thread->frameptr);
  480. debug("shim_vfork success (returning %d)\n", child);
  481. /* jump onto old stack
  482. we actually pop rbp as rsp, and later we will call 'ret' */
  483. asm volatile("movq %0, %%rbp\r\n"
  484. "leaveq\r\n"
  485. "retq\r\n" :
  486. : "g"(real_thread->frameptr),
  487. "a"(child)
  488. : "memory");
  489. }
  490. DEFINE_MIGRATE_FUNC(thread)
  491. MIGRATE_FUNC_BODY(thread)
  492. {
  493. assert(size == sizeof(struct shim_thread));
  494. struct shim_thread * thread = (struct shim_thread *) obj;
  495. struct shim_thread * new_thread = NULL;
  496. if (recursive) {
  497. struct shim_vma * vma = NULL;
  498. lookup_supervma(thread->stack, thread->stack_top - thread->stack,
  499. &vma);
  500. assert(vma);
  501. DO_MIGRATE(vma, vma, NULL, true);
  502. }
  503. unsigned long off = ADD_TO_MIGRATE_MAP(obj, *offset, size);
  504. if (ENTRY_JUST_CREATED(off)) {
  505. ADD_OFFSET(sizeof(struct shim_thread));
  506. ADD_FUNC_ENTRY(*offset);
  507. ADD_ENTRY(SIZE, sizeof(struct shim_thread));
  508. if (!dry) {
  509. new_thread = (struct shim_thread *) (base + *offset);
  510. memcpy(new_thread, thread, sizeof(struct shim_thread));
  511. INIT_LIST_HEAD(&new_thread->children);
  512. INIT_LIST_HEAD(&new_thread->siblings);
  513. INIT_LIST_HEAD(&new_thread->exited_children);
  514. INIT_LIST_HEAD(&new_thread->list);
  515. new_thread->in_vm = false;
  516. new_thread->parent = NULL;
  517. new_thread->dummy = NULL;
  518. new_thread->handle_map = NULL;
  519. new_thread->root = NULL;
  520. new_thread->cwd = NULL;
  521. if (!recursive)
  522. new_thread->tcb = NULL;
  523. REF_SET(new_thread->ref_count, 0);
  524. }
  525. for (int i = 0 ; i < NUM_SIGS ; i++) {
  526. if (thread->signal_handles[i].action) {
  527. ADD_OFFSET(sizeof(struct __kernel_sigaction));
  528. if (!dry) {
  529. new_thread->signal_handles[i].action
  530. = (struct __kernel_sigaction *) (base + *offset);
  531. memcpy(new_thread->signal_handles[i].action,
  532. thread->signal_handles[i].action,
  533. sizeof(struct __kernel_sigaction));
  534. }
  535. }
  536. }
  537. int rlen, clen;
  538. const char * rpath = dentry_get_path(thread->root, true, &rlen);
  539. const char * cpath = dentry_get_path(thread->cwd, true, &clen);
  540. char * new_rpath, * new_cpath;
  541. ADD_OFFSET(rlen + 1);
  542. ADD_ENTRY(ADDR, (new_rpath = (void *) (base + *offset)));
  543. ADD_OFFSET(clen + 1);
  544. ADD_ENTRY(ADDR, (new_cpath = (void *) (base + *offset)));
  545. if (!dry) {
  546. memcpy(new_rpath, rpath, rlen + 1);
  547. memcpy(new_cpath, cpath, clen + 1);
  548. }
  549. } else if (!dry) {
  550. new_thread = (struct shim_thread *) (base + off);
  551. }
  552. if (new_thread && objp)
  553. *objp = (void *) new_thread;
  554. DO_MIGRATE_MEMBER(handle, thread, new_thread, exec, 0);
  555. DO_MIGRATE_MEMBER_IF_RECURSIVE(handle_map, thread, new_thread,
  556. handle_map, 1);
  557. }
  558. END_MIGRATE_FUNC
  559. RESUME_FUNC_BODY(thread)
  560. {
  561. unsigned long off = GET_FUNC_ENTRY();
  562. size_t size = GET_ENTRY(SIZE);
  563. assert(size == sizeof(struct shim_thread));
  564. struct shim_thread * thread = (struct shim_thread *) (base + off);
  565. RESUME_REBASE(thread->children);
  566. RESUME_REBASE(thread->siblings);
  567. RESUME_REBASE(thread->exited_children);
  568. RESUME_REBASE(thread->list);
  569. RESUME_REBASE(thread->exec);
  570. RESUME_REBASE(thread->handle_map);
  571. RESUME_REBASE(thread->signal_handles);
  572. const char * rpath = (const char *) GET_ENTRY(ADDR);
  573. const char * cpath = (const char *) GET_ENTRY(ADDR);
  574. RESUME_REBASE(rpath);
  575. RESUME_REBASE(cpath);
  576. path_lookupat(NULL, rpath, LOOKUP_OPEN, &thread->root);
  577. path_lookupat(NULL, cpath, LOOKUP_OPEN, &thread->cwd);
  578. create_lock(thread->lock);
  579. thread->scheduler_event = DkNotificationEventCreate(1);
  580. thread->exit_event = DkNotificationEventCreate(0);
  581. thread->child_exit_event = DkNotificationEventCreate(0);
  582. add_thread(thread);
  583. if (thread->exec)
  584. get_handle(thread->exec);
  585. if (thread->handle_map)
  586. get_handle_map(thread->handle_map);
  587. #ifndef DEBUG_RESUME
  588. debug("thread: "
  589. "tid=%d,tgid=%d,parent=%d,stack=%p,frameptr=%p,tcb=%p\n",
  590. thread->tid, thread->tgid,
  591. thread->parent ? thread->parent->tid : thread->tid,
  592. thread->stack, thread->frameptr, thread->tcb);
  593. #endif
  594. }
  595. END_RESUME_FUNC
  596. DEFINE_MIGRATE_FUNC(running_thread)
  597. MIGRATE_FUNC_BODY(running_thread)
  598. {
  599. assert(size == sizeof(struct shim_thread));
  600. struct shim_thread * thread = (struct shim_thread *) obj;
  601. struct shim_thread * new_thread = NULL;
  602. struct shim_thread ** thread_obj = &new_thread;
  603. DO_MIGRATE(thread, thread, thread_obj, recursive);
  604. ADD_FUNC_ENTRY(new_thread);
  605. __libc_tcb_t * tcb = thread->tcb;
  606. if (tcb && lookup_supervma(tcb, sizeof(__libc_tcb_t), NULL) < 0) {
  607. ADD_OFFSET(sizeof(__libc_tcb_t));
  608. ADD_ENTRY(ADDR, base + *offset);
  609. if (!dry) {
  610. __libc_tcb_t * new_tcb = (void *) (base + *offset);
  611. memcpy(new_tcb, tcb, sizeof(__libc_tcb_t));
  612. }
  613. } else {
  614. ADD_ENTRY(ADDR, NULL);
  615. }
  616. }
  617. END_MIGRATE_FUNC
  618. int resume_wrapper (void * param)
  619. {
  620. struct shim_thread * thread = (struct shim_thread *) param;
  621. assert(thread);
  622. __libc_tcb_t * libc_tcb = (__libc_tcb_t *) thread->tcb;
  623. assert(libc_tcb);
  624. shim_tcb_t * tcb = &libc_tcb->shim_tcb;
  625. assert(tcb->context.sp);
  626. thread->in_vm = thread->is_alive = true;
  627. allocate_tls(libc_tcb, thread);
  628. debug_setbuf(tcb, true);
  629. DkObjectsWaitAny(1, &thread_start_event, NO_TIMEOUT);
  630. restore_context(&tcb->context);
  631. return 0;
  632. }
  633. RESUME_FUNC_BODY(running_thread)
  634. {
  635. struct shim_thread * thread = (void *) GET_FUNC_ENTRY();
  636. RESUME_REBASE(thread);
  637. struct shim_thread * cur_thread = get_cur_thread();
  638. thread->in_vm = true;
  639. get_thread(thread);
  640. void * new_tcb = (void *) GET_ENTRY(ADDR);
  641. if (new_tcb) {
  642. RESUME_REBASE(new_tcb);
  643. thread->tcb = new_tcb;
  644. }
  645. if (cur_thread) {
  646. PAL_HANDLE handle = DkThreadCreate(resume_wrapper, thread, 0);
  647. if (!thread)
  648. return -PAL_ERRNO;
  649. thread->pal_handle = handle;
  650. } else {
  651. __libc_tcb_t * libc_tcb = (__libc_tcb_t *) thread->tcb;
  652. if (libc_tcb) {
  653. shim_tcb_t * tcb = &libc_tcb->shim_tcb;
  654. assert(tcb->context.sp);
  655. tcb->debug_buf = SHIM_GET_TLS()->debug_buf;
  656. allocate_tls(libc_tcb, thread);
  657. debug_setprefix(tcb);
  658. } else {
  659. set_cur_thread(thread);
  660. }
  661. thread->in_vm = thread->is_alive = true;
  662. thread->pal_handle = PAL_CB(first_thread);
  663. }
  664. #ifdef DEBUG_RESUME
  665. debug("thread %d is attached to the current process\n", thread->tid);
  666. #endif
  667. }
  668. END_RESUME_FUNC
  669. DEFINE_MIGRATE_FUNC(all_running_threads)
  670. MIGRATE_FUNC_BODY(all_running_threads)
  671. {
  672. struct shim_thread * thread;
  673. lock(thread_list_lock);
  674. list_for_each_entry(thread, &thread_list, list) {
  675. if (!thread->in_vm || !thread->is_alive)
  676. continue;
  677. DO_MIGRATE(running_thread, thread, NULL, recursive);
  678. DO_MIGRATE(handle_map, thread->handle_map, NULL, recursive);
  679. }
  680. unlock(thread_list_lock);
  681. }
  682. END_MIGRATE_FUNC
  683. RESUME_FUNC_BODY(all_running_threads)
  684. {
  685. /* useless */
  686. }
  687. END_RESUME_FUNC