shim_thread.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_thread.c
  17. *
  18. * This file contains codes to maintain bookkeeping of threads in library OS.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_vma.h>
  24. #include <shim_fs.h>
  25. #include <shim_checkpoint.h>
  26. #include <pal.h>
  27. #include <linux_list.h>
  28. #define THREAD_MGR_ALLOC 4
  29. static LOCKTYPE thread_mgr_lock;
  30. #define system_lock() lock(thread_mgr_lock)
  31. #define system_unlock() unlock(thread_mgr_lock)
  32. #define PAGE_SIZE allocsize
  33. #define OBJ_TYPE struct shim_thread
  34. #include <memmgr.h>
  35. static MEM_MGR thread_mgr = NULL;
  36. static IDTYPE tid_alloc_idx __attribute_migratable = 0;
  37. static LIST_HEAD(thread_list);
  38. static LIST_HEAD(simple_thread_list);
  39. LOCKTYPE thread_list_lock;
  40. static IDTYPE internal_tid_alloc_idx = INTERNAL_TID_BASE;
  41. PAL_HANDLE thread_start_event = NULL;
  42. //#define DEBUG_REF
  43. int init_thread (void)
  44. {
  45. create_lock(thread_list_lock);
  46. create_lock(thread_mgr_lock);
  47. thread_mgr = create_mem_mgr(init_align_up(THREAD_MGR_ALLOC));
  48. if (!thread_mgr)
  49. return -ENOMEM;
  50. struct shim_thread * cur_thread = get_cur_thread();
  51. if (cur_thread)
  52. return 0;
  53. if (!(cur_thread = get_new_thread(0)))
  54. return -ENOMEM;
  55. cur_thread->in_vm = cur_thread->is_alive = true;
  56. get_thread(cur_thread);
  57. set_cur_thread(cur_thread);
  58. add_thread(cur_thread);
  59. cur_thread->pal_handle = PAL_CB(first_thread);
  60. return 0;
  61. }
  62. struct shim_thread * __lookup_thread (IDTYPE tid)
  63. {
  64. struct shim_thread * tmp;
  65. list_for_each_entry(tmp, &thread_list, list)
  66. if (tmp->tid == tid) {
  67. get_thread(tmp);
  68. return tmp;
  69. }
  70. return NULL;
  71. }
  72. struct shim_thread * lookup_thread (IDTYPE tid)
  73. {
  74. lock(thread_list_lock);
  75. struct shim_thread * thread = __lookup_thread(tid);
  76. unlock(thread_list_lock);
  77. return thread;
  78. }
  79. struct shim_thread * __get_cur_thread (void)
  80. {
  81. return SHIM_THREAD_SELF();
  82. }
  83. shim_tcb_t * __get_cur_tcb (void)
  84. {
  85. return SHIM_GET_TLS();
  86. }
  87. static IDTYPE get_pid (void)
  88. {
  89. IDTYPE idx;
  90. while (1) {
  91. IDTYPE old_idx = tid_alloc_idx;
  92. IDTYPE max = 0;
  93. idx = old_idx + 1;
  94. do {
  95. if ((idx = allocate_pid(idx, max)))
  96. break;
  97. tid_alloc_idx = idx;
  98. if (!idx) {
  99. if (max == old_idx)
  100. break;
  101. max = old_idx;
  102. }
  103. } while (idx != tid_alloc_idx);
  104. if (idx != tid_alloc_idx)
  105. break;
  106. if (ipc_pid_lease_send(NULL) < 0)
  107. return 0;
  108. }
  109. tid_alloc_idx = idx;
  110. return idx;
  111. }
  112. static IDTYPE get_internal_pid (void)
  113. {
  114. lock(thread_list_lock);
  115. internal_tid_alloc_idx++;
  116. IDTYPE idx = internal_tid_alloc_idx;
  117. unlock(thread_list_lock);
  118. return idx;
  119. }
  120. static inline int init_mem_mgr (void)
  121. {
  122. if (thread_mgr)
  123. return 0;
  124. MEM_MGR mgr = create_mem_mgr(init_align_up(THREAD_MGR_ALLOC));
  125. MEM_MGR old_mgr = NULL;
  126. lock(thread_mgr_lock);
  127. if (mgr) {
  128. if (thread_mgr) {
  129. old_mgr = mgr;
  130. mgr = thread_mgr;
  131. } else {
  132. thread_mgr = mgr;
  133. }
  134. }
  135. unlock(thread_mgr_lock);
  136. if (old_mgr)
  137. destroy_mem_mgr(old_mgr);
  138. return mgr ? 0 : -ENOMEM;
  139. }
  140. struct shim_thread * alloc_new_thread (void)
  141. {
  142. struct shim_thread * thread =
  143. get_mem_obj_from_mgr_enlarge(thread_mgr,
  144. size_align_up(THREAD_MGR_ALLOC));
  145. if (!thread)
  146. return NULL;
  147. memset(thread, 0, sizeof(struct shim_thread));
  148. REF_SET(thread->ref_count, 1);
  149. INIT_LIST_HEAD(&thread->children);
  150. INIT_LIST_HEAD(&thread->siblings);
  151. INIT_LIST_HEAD(&thread->exited_children);
  152. INIT_LIST_HEAD(&thread->list);
  153. return thread;
  154. }
  155. struct shim_thread * get_new_thread (IDTYPE new_tid)
  156. {
  157. if (init_mem_mgr() < 0)
  158. return NULL;
  159. if (!new_tid) {
  160. new_tid = get_pid();
  161. assert(new_tid);
  162. }
  163. struct shim_thread * thread = alloc_new_thread();
  164. if (!thread)
  165. return NULL;
  166. struct shim_thread * cur_thread = get_cur_thread();
  167. thread->tid = new_tid;
  168. if (cur_thread) {
  169. /* The newly created thread will be in the same thread group
  170. (process group as well) with its parent */
  171. thread->pgid = cur_thread->pgid;
  172. thread->ppid = cur_thread->tgid;
  173. thread->tgid = cur_thread->tgid;
  174. thread->uid = cur_thread->uid;
  175. thread->gid = cur_thread->gid;
  176. thread->euid = cur_thread->euid;
  177. thread->egid = cur_thread->egid;
  178. thread->parent = cur_thread;
  179. thread->stack = cur_thread->stack;
  180. thread->stack_top = cur_thread->stack_top;
  181. thread->stack_red = cur_thread->stack_red;
  182. thread->cwd = cur_thread->cwd;
  183. thread->root = cur_thread->root;
  184. thread->umask = cur_thread->umask;
  185. thread->exec = cur_thread->exec;
  186. get_handle(cur_thread->exec);
  187. for (int i = 0 ; i < NUM_SIGS ; i++) {
  188. if (!cur_thread->signal_handles[i].action)
  189. continue;
  190. thread->signal_handles[i].action =
  191. remalloc(cur_thread->signal_handles[i].action,
  192. sizeof(struct shim_signal_handle));
  193. }
  194. memcpy(&thread->signal_mask, &cur_thread->signal_mask,
  195. sizeof(sigset_t));
  196. get_dentry(cur_thread->cwd);
  197. get_dentry(cur_thread->root);
  198. struct shim_handle_map * map = get_cur_handle_map(cur_thread);
  199. assert(map);
  200. set_handle_map(thread, map);
  201. } else {
  202. /* default pid and pgid equals to tid */
  203. thread->ppid = thread->pgid = thread->tgid = new_tid;
  204. path_lookupat(NULL, "/", 0, &thread->root);
  205. char dir_cfg[CONFIG_MAX];
  206. if (root_config &&
  207. get_config(root_config, "fs.start_dir", dir_cfg, CONFIG_MAX) > 0) {
  208. path_lookupat(NULL, dir_cfg, 0, &thread->cwd);
  209. } else if (thread->root) {
  210. get_dentry(thread->root);
  211. thread->cwd = thread->root;
  212. }
  213. }
  214. thread->vmid = cur_process.vmid;
  215. create_lock(thread->lock);
  216. thread->scheduler_event = DkNotificationEventCreate(1);
  217. thread->exit_event = DkNotificationEventCreate(0);
  218. thread->child_exit_event = DkNotificationEventCreate(0);
  219. return thread;
  220. }
  221. struct shim_thread * get_new_internal_thread (void)
  222. {
  223. if (init_mem_mgr() < 0)
  224. return NULL;
  225. IDTYPE new_tid = get_internal_pid();
  226. assert(new_tid);
  227. struct shim_thread * thread = alloc_new_thread();
  228. if (!thread)
  229. return NULL;
  230. thread->vmid = cur_process.vmid;
  231. thread->tid = new_tid;
  232. thread->in_vm = thread->is_alive = true;
  233. create_lock(thread->lock);
  234. thread->exit_event = DkNotificationEventCreate(0);
  235. return thread;
  236. }
  237. struct shim_simple_thread * __lookup_simple_thread (IDTYPE tid)
  238. {
  239. struct shim_simple_thread * tmp;
  240. list_for_each_entry(tmp, &simple_thread_list, list)
  241. if (tmp->tid == tid) {
  242. get_simple_thread(tmp);
  243. return tmp;
  244. }
  245. return NULL;
  246. }
  247. struct shim_simple_thread * lookup_simple_thread (IDTYPE tid)
  248. {
  249. lock(thread_list_lock);
  250. struct shim_simple_thread * thread = __lookup_simple_thread(tid);
  251. unlock(thread_list_lock);
  252. return thread;
  253. }
  254. struct shim_simple_thread * get_new_simple_thread (void)
  255. {
  256. struct shim_simple_thread * thread =
  257. malloc(sizeof(struct shim_simple_thread));
  258. if (!thread)
  259. return NULL;
  260. memset(thread, 0, sizeof(struct shim_simple_thread));
  261. INIT_LIST_HEAD(&thread->list);
  262. create_lock(thread->lock);
  263. thread->exit_event = DkNotificationEventCreate(0);
  264. return thread;
  265. }
  266. void get_thread (struct shim_thread * thread)
  267. {
  268. #ifdef DEBUG_REF
  269. int ref_count = REF_INC(thread->ref_count);
  270. debug("get_thread %p(%d) (ref_count = %d)\n", thread, thread->tid,
  271. ref_count);
  272. #else
  273. REF_INC(thread->ref_count);
  274. #endif
  275. }
  276. void put_thread (struct shim_thread * thread)
  277. {
  278. int ref_count = REF_DEC(thread->ref_count);
  279. #ifdef DEBUG_REF
  280. debug("put thread %p(%d) (ref_count = %d)\n", thread, thread->tid,
  281. ref_count);
  282. #endif
  283. if (!ref_count) {
  284. if (thread->exec)
  285. put_handle(thread->exec);
  286. if (!IS_INTERNAL(thread))
  287. release_pid(thread->tid);
  288. if (MEMORY_MIGRATED(thread))
  289. memset(thread, 0, sizeof(struct shim_thread));
  290. else
  291. free_mem_obj_to_mgr(thread_mgr, thread);
  292. }
  293. }
  294. void get_simple_thread (struct shim_simple_thread * thread)
  295. {
  296. REF_INC(thread->ref_count);
  297. }
  298. void put_simple_thread (struct shim_simple_thread * thread)
  299. {
  300. int ref_count = REF_DEC(thread->ref_count);
  301. if (!ref_count) {
  302. list_del(&thread->list);
  303. free(thread);
  304. }
  305. }
  306. void set_as_child (struct shim_thread * parent,
  307. struct shim_thread * child)
  308. {
  309. if (!parent)
  310. parent = get_cur_thread();
  311. get_thread(parent);
  312. get_thread(child);
  313. lock(child->lock);
  314. child->ppid = parent->tid;
  315. child->parent = parent;
  316. lock(parent->lock);
  317. list_add_tail(&child->siblings, &parent->children);
  318. unlock(parent->lock);
  319. unlock(child->lock);
  320. }
  321. void add_thread (struct shim_thread * thread)
  322. {
  323. if (IS_INTERNAL(thread) || !list_empty(&thread->list))
  324. return;
  325. struct shim_thread * tmp, * prev = NULL;
  326. lock(thread_list_lock);
  327. /* keep it sorted */
  328. list_for_each_entry_reverse(tmp, &thread_list, list) {
  329. if (tmp->tid == thread->tid) {
  330. unlock(thread_list_lock);
  331. return;
  332. }
  333. if (tmp->tid < thread->tid) {
  334. prev = tmp;
  335. break;
  336. }
  337. }
  338. get_thread(thread);
  339. list_add(&thread->list, prev ? &prev->list : &thread_list);
  340. unlock(thread_list_lock);
  341. }
  342. void del_thread (struct shim_thread * thread)
  343. {
  344. if (IS_INTERNAL(thread) || list_empty(&thread->list))
  345. return;
  346. lock(thread_list_lock);
  347. list_del_init(&thread->list);
  348. unlock(thread_list_lock);
  349. put_thread(thread);
  350. }
  351. void add_simple_thread (struct shim_simple_thread * thread)
  352. {
  353. if (!list_empty(&thread->list))
  354. return;
  355. struct shim_simple_thread * tmp, * prev = NULL;
  356. lock(thread_list_lock);
  357. /* keep it sorted */
  358. list_for_each_entry_reverse(tmp, &simple_thread_list, list) {
  359. if (tmp->tid == thread->tid) {
  360. unlock(thread_list_lock);
  361. return;
  362. }
  363. if (tmp->tid < thread->tid) {
  364. prev = tmp;
  365. break;
  366. }
  367. }
  368. get_simple_thread(thread);
  369. list_add(&thread->list, prev ? &prev->list : &simple_thread_list);
  370. unlock(thread_list_lock);
  371. }
  372. void del_simple_thread (struct shim_simple_thread * thread)
  373. {
  374. if (list_empty(&thread->list))
  375. return;
  376. lock(thread_list_lock);
  377. list_del_init(&thread->list);
  378. unlock(thread_list_lock);
  379. put_simple_thread(thread);
  380. }
  381. int check_last_thread (struct shim_thread * self)
  382. {
  383. struct shim_thread * tmp;
  384. lock(thread_list_lock);
  385. /* find out if there is any thread that is
  386. 1) no current thread 2) in current vm
  387. 3) still alive */
  388. list_for_each_entry(tmp, &thread_list, list)
  389. if (tmp->tid &&
  390. (!self || tmp->tid != self->tid) && tmp->in_vm && tmp->is_alive) {
  391. debug("check_last_thread: thread %d is alive\n", tmp->tid);
  392. unlock(thread_list_lock);
  393. return tmp->tid;
  394. }
  395. debug("this is the only thread\n", self->tid);
  396. unlock(thread_list_lock);
  397. return 0;
  398. }
  399. int walk_thread_list (int (*callback) (struct shim_thread *, void *, bool *),
  400. void * arg, bool may_write)
  401. {
  402. struct shim_thread * tmp, * n;
  403. bool srched = false;
  404. int ret;
  405. IDTYPE min_tid = 0;
  406. relock:
  407. lock(thread_list_lock);
  408. list_for_each_entry_safe(tmp, n, &thread_list, list) {
  409. if (tmp->tid <= min_tid)
  410. continue;
  411. bool unlocked = false;
  412. ret = (*callback) (tmp, arg, &unlocked);
  413. if (ret < 0 && ret != -ESRCH) {
  414. if (unlocked)
  415. goto out;
  416. else
  417. goto out_locked;
  418. }
  419. if (ret > 0)
  420. srched = true;
  421. if (unlocked) {
  422. min_tid = tmp->tid;
  423. goto relock;
  424. }
  425. }
  426. ret = srched ? 0 : -ESRCH;
  427. out_locked:
  428. unlock(thread_list_lock);
  429. out:
  430. return ret;
  431. }
  432. int walk_simple_thread_list (int (*callback) (struct shim_simple_thread *,
  433. void *, bool *),
  434. void * arg, bool may_write)
  435. {
  436. struct shim_simple_thread * tmp, * n;
  437. bool srched = false;
  438. int ret;
  439. IDTYPE min_tid = 0;
  440. relock:
  441. lock(thread_list_lock);
  442. list_for_each_entry_safe(tmp, n, &simple_thread_list, list) {
  443. if (tmp->tid <= min_tid)
  444. continue;
  445. bool unlocked = false;
  446. ret = (*callback) (tmp, arg, &unlocked);
  447. if (ret < 0 && ret != -ESRCH) {
  448. if (unlocked)
  449. goto out;
  450. else
  451. goto out_locked;
  452. }
  453. if (ret > 0)
  454. srched = true;
  455. if (unlocked) {
  456. min_tid = tmp->tid;
  457. goto relock;
  458. }
  459. }
  460. ret = srched ? 0 : -ESRCH;
  461. out_locked:
  462. unlock(thread_list_lock);
  463. out:
  464. return ret;
  465. }
  466. void switch_dummy_thread (struct shim_thread * thread)
  467. {
  468. struct shim_thread * real_thread = thread->dummy;
  469. IDTYPE child = thread->tid;
  470. assert(thread->frameptr);
  471. assert(real_thread->stack);
  472. assert(real_thread->stack_top > real_thread->stack);
  473. memcpy(thread->frameptr, real_thread->stack,
  474. real_thread->stack_top - real_thread->stack);
  475. real_thread->stack = thread->stack;
  476. real_thread->stack_top = thread->stack_top;
  477. real_thread->frameptr = thread->frameptr;
  478. DkThreadPrivate(real_thread->tcb);
  479. set_cur_thread(real_thread);
  480. debug("jump to the stack %p\n", real_thread->frameptr);
  481. debug("shim_vfork success (returning %d)\n", child);
  482. /* jump onto old stack
  483. we actually pop rbp as rsp, and later we will call 'ret' */
  484. asm volatile("movq %0, %%rbp\r\n"
  485. "leaveq\r\n"
  486. "retq\r\n" :
  487. : "g"(real_thread->frameptr),
  488. "a"(child)
  489. : "memory");
  490. }
  491. DEFINE_MIGRATE_FUNC(thread)
  492. MIGRATE_FUNC_BODY(thread)
  493. {
  494. assert(size == sizeof(struct shim_thread));
  495. struct shim_thread * thread = (struct shim_thread *) obj;
  496. struct shim_thread * new_thread = NULL;
  497. if (recursive) {
  498. struct shim_vma * vma = NULL;
  499. lookup_supervma(thread->stack, thread->stack_top - thread->stack,
  500. &vma);
  501. assert(vma);
  502. DO_MIGRATE(vma, vma, NULL, true);
  503. }
  504. unsigned long off = ADD_TO_MIGRATE_MAP(obj, *offset, size);
  505. if (ENTRY_JUST_CREATED(off)) {
  506. ADD_OFFSET(sizeof(struct shim_thread));
  507. ADD_FUNC_ENTRY(*offset);
  508. ADD_ENTRY(SIZE, sizeof(struct shim_thread));
  509. if (!dry) {
  510. new_thread = (struct shim_thread *) (base + *offset);
  511. memcpy(new_thread, thread, sizeof(struct shim_thread));
  512. INIT_LIST_HEAD(&new_thread->children);
  513. INIT_LIST_HEAD(&new_thread->siblings);
  514. INIT_LIST_HEAD(&new_thread->exited_children);
  515. INIT_LIST_HEAD(&new_thread->list);
  516. new_thread->in_vm = false;
  517. new_thread->parent = NULL;
  518. new_thread->dummy = NULL;
  519. new_thread->handle_map = NULL;
  520. new_thread->root = NULL;
  521. new_thread->cwd = NULL;
  522. new_thread->robust_list = NULL;
  523. if (!recursive)
  524. new_thread->tcb = NULL;
  525. REF_SET(new_thread->ref_count, 0);
  526. }
  527. for (int i = 0 ; i < NUM_SIGS ; i++) {
  528. if (thread->signal_handles[i].action) {
  529. ADD_OFFSET(sizeof(struct __kernel_sigaction));
  530. if (!dry) {
  531. new_thread->signal_handles[i].action
  532. = (struct __kernel_sigaction *) (base + *offset);
  533. memcpy(new_thread->signal_handles[i].action,
  534. thread->signal_handles[i].action,
  535. sizeof(struct __kernel_sigaction));
  536. }
  537. }
  538. }
  539. int rlen, clen;
  540. const char * rpath = dentry_get_path(thread->root, true, &rlen);
  541. const char * cpath = dentry_get_path(thread->cwd, true, &clen);
  542. char * new_rpath, * new_cpath;
  543. ADD_OFFSET(rlen + 1);
  544. ADD_ENTRY(ADDR, (new_rpath = (void *) (base + *offset)));
  545. ADD_OFFSET(clen + 1);
  546. ADD_ENTRY(ADDR, (new_cpath = (void *) (base + *offset)));
  547. if (!dry) {
  548. memcpy(new_rpath, rpath, rlen + 1);
  549. memcpy(new_cpath, cpath, clen + 1);
  550. }
  551. } else if (!dry) {
  552. new_thread = (struct shim_thread *) (base + off);
  553. }
  554. if (new_thread && objp)
  555. *objp = (void *) new_thread;
  556. DO_MIGRATE_MEMBER(handle, thread, new_thread, exec, 0);
  557. DO_MIGRATE_MEMBER_IF_RECURSIVE(handle_map, thread, new_thread,
  558. handle_map, 1);
  559. }
  560. END_MIGRATE_FUNC
  561. RESUME_FUNC_BODY(thread)
  562. {
  563. unsigned long off = GET_FUNC_ENTRY();
  564. size_t size = GET_ENTRY(SIZE);
  565. assert(size == sizeof(struct shim_thread));
  566. struct shim_thread * thread = (struct shim_thread *) (base + off);
  567. RESUME_REBASE(thread->children);
  568. RESUME_REBASE(thread->siblings);
  569. RESUME_REBASE(thread->exited_children);
  570. RESUME_REBASE(thread->list);
  571. RESUME_REBASE(thread->exec);
  572. RESUME_REBASE(thread->handle_map);
  573. RESUME_REBASE(thread->signal_handles);
  574. const char * rpath = (const char *) GET_ENTRY(ADDR);
  575. const char * cpath = (const char *) GET_ENTRY(ADDR);
  576. RESUME_REBASE(rpath);
  577. RESUME_REBASE(cpath);
  578. path_lookupat(NULL, rpath, LOOKUP_OPEN, &thread->root);
  579. path_lookupat(NULL, cpath, LOOKUP_OPEN, &thread->cwd);
  580. create_lock(thread->lock);
  581. thread->scheduler_event = DkNotificationEventCreate(1);
  582. thread->exit_event = DkNotificationEventCreate(0);
  583. thread->child_exit_event = DkNotificationEventCreate(0);
  584. add_thread(thread);
  585. if (thread->exec)
  586. get_handle(thread->exec);
  587. if (thread->handle_map)
  588. get_handle_map(thread->handle_map);
  589. #ifndef DEBUG_RESUME
  590. debug("thread: "
  591. "tid=%d,tgid=%d,parent=%d,stack=%p,frameptr=%p,tcb=%p\n",
  592. thread->tid, thread->tgid,
  593. thread->parent ? thread->parent->tid : thread->tid,
  594. thread->stack, thread->frameptr, thread->tcb);
  595. #endif
  596. }
  597. END_RESUME_FUNC
  598. DEFINE_MIGRATE_FUNC(running_thread)
  599. MIGRATE_FUNC_BODY(running_thread)
  600. {
  601. assert(size == sizeof(struct shim_thread));
  602. struct shim_thread * thread = (struct shim_thread *) obj;
  603. struct shim_thread * new_thread = NULL;
  604. struct shim_thread ** thread_obj = &new_thread;
  605. DO_MIGRATE(thread, thread, thread_obj, recursive);
  606. ADD_FUNC_ENTRY(new_thread);
  607. __libc_tcb_t * tcb = thread->tcb;
  608. if (tcb && lookup_supervma(tcb, sizeof(__libc_tcb_t), NULL) < 0) {
  609. ADD_OFFSET(sizeof(__libc_tcb_t));
  610. ADD_ENTRY(ADDR, base + *offset);
  611. if (!dry) {
  612. __libc_tcb_t * new_tcb = (void *) (base + *offset);
  613. memcpy(new_tcb, tcb, sizeof(__libc_tcb_t));
  614. }
  615. } else {
  616. ADD_ENTRY(ADDR, NULL);
  617. }
  618. }
  619. END_MIGRATE_FUNC
  620. int resume_wrapper (void * param)
  621. {
  622. struct shim_thread * thread = (struct shim_thread *) param;
  623. assert(thread);
  624. __libc_tcb_t * libc_tcb = (__libc_tcb_t *) thread->tcb;
  625. assert(libc_tcb);
  626. shim_tcb_t * tcb = &libc_tcb->shim_tcb;
  627. assert(tcb->context.sp);
  628. thread->in_vm = thread->is_alive = true;
  629. allocate_tls(libc_tcb, thread);
  630. debug_setbuf(tcb, true);
  631. DkObjectsWaitAny(1, &thread_start_event, NO_TIMEOUT);
  632. restore_context(&tcb->context);
  633. return 0;
  634. }
  635. RESUME_FUNC_BODY(running_thread)
  636. {
  637. struct shim_thread * thread = (void *) GET_FUNC_ENTRY();
  638. RESUME_REBASE(thread);
  639. struct shim_thread * cur_thread = get_cur_thread();
  640. thread->in_vm = true;
  641. get_thread(thread);
  642. void * new_tcb = (void *) GET_ENTRY(ADDR);
  643. if (new_tcb) {
  644. RESUME_REBASE(new_tcb);
  645. thread->tcb = new_tcb;
  646. }
  647. if (cur_thread) {
  648. PAL_HANDLE handle = DkThreadCreate(resume_wrapper, thread, 0);
  649. if (!thread)
  650. return -PAL_ERRNO;
  651. thread->pal_handle = handle;
  652. } else {
  653. __libc_tcb_t * libc_tcb = (__libc_tcb_t *) thread->tcb;
  654. if (libc_tcb) {
  655. shim_tcb_t * tcb = &libc_tcb->shim_tcb;
  656. assert(tcb->context.sp);
  657. tcb->debug_buf = SHIM_GET_TLS()->debug_buf;
  658. allocate_tls(libc_tcb, thread);
  659. debug_setprefix(tcb);
  660. } else {
  661. set_cur_thread(thread);
  662. }
  663. thread->in_vm = thread->is_alive = true;
  664. thread->pal_handle = PAL_CB(first_thread);
  665. }
  666. #ifdef DEBUG_RESUME
  667. debug("thread %d is attached to the current process\n", thread->tid);
  668. #endif
  669. }
  670. END_RESUME_FUNC
  671. DEFINE_MIGRATE_FUNC(all_running_threads)
  672. MIGRATE_FUNC_BODY(all_running_threads)
  673. {
  674. struct shim_thread * thread;
  675. lock(thread_list_lock);
  676. list_for_each_entry(thread, &thread_list, list) {
  677. if (!thread->in_vm || !thread->is_alive)
  678. continue;
  679. DO_MIGRATE(running_thread, thread, NULL, recursive);
  680. DO_MIGRATE(handle_map, thread->handle_map, NULL, recursive);
  681. }
  682. unlock(thread_list_lock);
  683. }
  684. END_MIGRATE_FUNC
  685. RESUME_FUNC_BODY(all_running_threads)
  686. {
  687. /* useless */
  688. }
  689. END_RESUME_FUNC