shim_async.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_async.c
  17. *
  18. * This file contains functions to add asyncronous events triggered by timer.
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_utils.h>
  22. #include <shim_thread.h>
  23. #include <pal.h>
  24. #include <linux_list.h>
  25. struct async_event {
  26. IDTYPE caller;
  27. struct list_head list;
  28. void (*callback) (IDTYPE caller, void * arg);
  29. void * arg;
  30. PAL_HANDLE object;
  31. unsigned long install_time;
  32. unsigned long expire_time;
  33. };
  34. static LIST_HEAD(async_list);
  35. enum { HELPER_NOTALIVE, HELPER_ALIVE };
  36. static struct shim_atomic async_helper_state;
  37. static struct shim_thread * async_helper_thread;
  38. static AEVENTTYPE async_helper_event;
  39. static LOCKTYPE async_helper_lock;
  40. int install_async_event (PAL_HANDLE object, unsigned long time,
  41. void (*callback) (IDTYPE caller, void * arg),
  42. void * arg)
  43. {
  44. struct async_event * event =
  45. malloc(sizeof(struct async_event));
  46. unsigned long install_time = DkSystemTimeQuery();
  47. debug("install async event at %llu\n", install_time);
  48. event->callback = callback;
  49. event->arg = arg;
  50. event->caller = get_cur_tid();
  51. event->object = object;
  52. event->install_time = time ? install_time : 0;
  53. event->expire_time = time ? install_time + time : 0;
  54. lock(async_helper_lock);
  55. struct async_event * tmp;
  56. struct list_head * prev = &async_list;
  57. list_for_each_entry(tmp, &async_list, list) {
  58. if (event->expire_time && tmp->expire_time > event->expire_time)
  59. break;
  60. prev = &tmp->list;
  61. }
  62. INIT_LIST_HEAD(&event->list);
  63. list_add(&event->list, prev);
  64. unlock(async_helper_lock);
  65. if (atomic_read(&async_helper_state) == HELPER_NOTALIVE)
  66. create_async_helper();
  67. set_event(&async_helper_event, 1);
  68. return 0;
  69. }
  70. int init_async (void)
  71. {
  72. atomic_set(&async_helper_state, HELPER_NOTALIVE);
  73. create_lock(async_helper_lock);
  74. create_event(&async_helper_event);
  75. return 0;
  76. }
  77. #define IDLE_SLEEP_TIME 1000
  78. #define MAX_IDLE_CYCLES 100
  79. static void shim_async_helper (void * arg)
  80. {
  81. struct shim_thread * self = (struct shim_thread *) arg;
  82. if (!arg)
  83. return;
  84. __libc_tcb_t tcb;
  85. allocate_tls(&tcb, false, self);
  86. debug_setbuf(&tcb.shim_tcb, true);
  87. debug("set tcb to %p\n", &tcb);
  88. lock(async_helper_lock);
  89. bool notme = (self != async_helper_thread);
  90. unlock(async_helper_lock);
  91. if (notme) {
  92. put_thread(self);
  93. DkThreadExit();
  94. return;
  95. }
  96. debug("async helper thread started\n");
  97. /* TSAI: we assume async helper thread will not drain the
  98. stack that PAL provides, so for efficiency, we don't
  99. swap any stack */
  100. unsigned long idle_cycles = 0;
  101. unsigned long latest_time;
  102. struct async_event * next_event = NULL;
  103. PAL_HANDLE async_event_handle = event_handle(&async_helper_event);
  104. int object_list_size = 32, object_num;
  105. PAL_HANDLE polled;
  106. PAL_HANDLE * local_objects =
  107. malloc(sizeof(PAL_HANDLE) * (1 + object_list_size));
  108. local_objects[0] = async_event_handle;
  109. goto update_status;
  110. while (atomic_read(&async_helper_state) == HELPER_ALIVE) {
  111. unsigned long sleep_time;
  112. if (next_event) {
  113. sleep_time = next_event->expire_time - latest_time;
  114. idle_cycles = 0;
  115. } else if (object_num) {
  116. sleep_time = NO_TIMEOUT;
  117. idle_cycles = 0;
  118. } else {
  119. sleep_time = IDLE_SLEEP_TIME;
  120. idle_cycles++;
  121. }
  122. polled = DkObjectsWaitAny(object_num + 1, local_objects, sleep_time);
  123. if (!polled) {
  124. if (next_event) {
  125. debug("async event trigger at %llu\n",
  126. next_event->expire_time);
  127. next_event->callback(next_event->caller, next_event->arg);
  128. lock(async_helper_lock);
  129. list_del(&next_event->list);
  130. free(next_event);
  131. goto update_list;
  132. }
  133. continue;
  134. }
  135. if (polled == async_event_handle) {
  136. clear_event(&async_helper_event);
  137. update_status:
  138. latest_time = DkSystemTimeQuery();
  139. if (atomic_read(&async_helper_state) == HELPER_NOTALIVE) {
  140. break;
  141. } else {
  142. lock(async_helper_lock);
  143. goto update_list;
  144. }
  145. }
  146. struct async_event * tmp, * n;
  147. lock(async_helper_lock);
  148. list_for_each_entry_safe(tmp, n, &async_list, list) {
  149. if (tmp->object == polled) {
  150. debug("async event trigger at %llu\n",
  151. latest_time);
  152. unlock(async_helper_lock);
  153. tmp->callback(tmp->caller, tmp->arg);
  154. lock(async_helper_lock);
  155. break;
  156. }
  157. }
  158. update_list:
  159. next_event = NULL;
  160. object_num = 0;
  161. if (!list_empty(&async_list)) {
  162. struct async_event * tmp, * n;
  163. list_for_each_entry_safe(tmp, n, &async_list, list) {
  164. if (tmp->object) {
  165. local_objects[object_num + 1] = tmp->object;
  166. object_num++;
  167. }
  168. if (!tmp->install_time)
  169. continue;
  170. if (tmp->expire_time > latest_time) {
  171. next_event = tmp;
  172. break;
  173. }
  174. debug("async event trigger at %llu (expire at %llu)\n",
  175. latest_time, tmp->expire_time);
  176. list_del(&tmp->list);
  177. unlock(async_helper_lock);
  178. tmp->callback(tmp->caller, tmp->arg);
  179. free(tmp);
  180. lock(async_helper_lock);
  181. }
  182. idle_cycles = 0;
  183. }
  184. unlock(async_helper_lock);
  185. if (idle_cycles++ == MAX_IDLE_CYCLES) {
  186. debug("async helper thread reach helper cycle\n");
  187. /* walking away, if someone is issueing an event,
  188. they have to create another thread */
  189. break;
  190. }
  191. }
  192. atomic_set(&async_helper_state, HELPER_NOTALIVE);
  193. lock(async_helper_lock);
  194. async_helper_thread = NULL;
  195. unlock(async_helper_lock);
  196. put_thread(self);
  197. debug("async helper thread terminated\n");
  198. DkThreadExit();
  199. }
  200. int create_async_helper (void)
  201. {
  202. int ret = 0;
  203. if (atomic_read(&async_helper_state) == HELPER_ALIVE)
  204. return 0;
  205. enable_locking();
  206. struct shim_thread * new = get_new_internal_thread();
  207. if (!new)
  208. return -ENOMEM;
  209. lock(async_helper_lock);
  210. if (atomic_read(&async_helper_state) == HELPER_ALIVE) {
  211. unlock(async_helper_lock);
  212. put_thread(new);
  213. return 0;
  214. }
  215. async_helper_thread = new;
  216. atomic_xchg(&async_helper_state, HELPER_ALIVE);
  217. unlock(async_helper_lock);
  218. PAL_HANDLE handle = thread_create(shim_async_helper, new, 0);
  219. if (!handle) {
  220. ret = -PAL_ERRNO;
  221. lock(async_helper_lock);
  222. async_helper_thread = NULL;
  223. atomic_xchg(&async_helper_state, HELPER_NOTALIVE);
  224. unlock(async_helper_lock);
  225. put_thread(new);
  226. return ret;
  227. }
  228. new->pal_handle = handle;
  229. return 0;
  230. }
  231. int terminate_async_helper (void)
  232. {
  233. if (atomic_read(&async_helper_state) != HELPER_ALIVE)
  234. return 0;
  235. lock(async_helper_lock);
  236. atomic_xchg(&async_helper_state, HELPER_NOTALIVE);
  237. unlock(async_helper_lock);
  238. set_event(&async_helper_event, 1);
  239. return 0;
  240. }