shim_async.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_async.c
  15. *
  16. * This file contains functions to add asyncronous events triggered by timer.
  17. */
  18. #include <shim_internal.h>
  19. #include <shim_utils.h>
  20. #include <shim_thread.h>
  21. #include <pal.h>
  22. #include <list.h>
  23. DEFINE_LIST(async_event);
  24. struct async_event {
  25. IDTYPE caller;
  26. LIST_TYPE(async_event) list;
  27. void (*callback) (IDTYPE caller, void * arg);
  28. void * arg;
  29. PAL_HANDLE object;
  30. unsigned long install_time;
  31. unsigned long expire_time;
  32. };
  33. DEFINE_LISTP(async_event);
  34. static LISTP_TYPE(async_event) async_list;
  35. /* This variable can be read without the async_helper_lock held, but is always
  36. * modified with it held. */
  37. static enum { HELPER_NOTALIVE, HELPER_ALIVE } async_helper_state;
  38. static struct shim_thread * async_helper_thread;
  39. static AEVENTTYPE async_helper_event;
  40. static struct shim_lock async_helper_lock;
  41. /* Returns remaining usecs */
  42. int64_t install_async_event (PAL_HANDLE object, unsigned long time,
  43. void (*callback) (IDTYPE caller, void * arg),
  44. void * arg)
  45. {
  46. struct async_event * event =
  47. malloc(sizeof(struct async_event));
  48. unsigned long install_time = DkSystemTimeQuery();
  49. int64_t rv = 0;
  50. debug("install async event at %lu\n", install_time);
  51. event->callback = callback;
  52. event->arg = arg;
  53. event->caller = get_cur_tid();
  54. event->object = object;
  55. event->install_time = time ? install_time : 0;
  56. event->expire_time = time ? install_time + time : 0;
  57. lock(&async_helper_lock);
  58. struct async_event * tmp;
  59. LISTP_FOR_EACH_ENTRY(tmp, &async_list, list) {
  60. if (event->expire_time && tmp->expire_time > event->expire_time)
  61. break;
  62. }
  63. /*
  64. * man page of alarm system call :
  65. * DESCRIPTION
  66. * alarm() arranges for a SIGALRM signal to be delivered to the
  67. * calling process in seconds seconds.
  68. * If seconds is zero, any pending alarm is canceled.
  69. * In any event any previously set alarm() is canceled.
  70. */
  71. if (!LISTP_EMPTY(&async_list)) {
  72. tmp = LISTP_FIRST_ENTRY(&async_list, struct async_event, list);
  73. tmp = tmp->list.prev;
  74. rv = tmp->expire_time - install_time;
  75. /*
  76. * any previously set alarm() is canceled.
  77. * There should be exactly only one timer pending
  78. */
  79. LISTP_DEL(tmp, &async_list, list);
  80. free(tmp);
  81. } else {
  82. tmp = NULL;
  83. }
  84. INIT_LIST_HEAD(event, list);
  85. if (!time) // If seconds is zero, any pending alarm is canceled.
  86. free(event);
  87. else
  88. LISTP_ADD_TAIL(event, &async_list, list);
  89. if (async_helper_state == HELPER_NOTALIVE)
  90. create_async_helper();
  91. unlock(&async_helper_lock);
  92. set_event(&async_helper_event, 1);
  93. return rv;
  94. }
  95. int init_async (void)
  96. {
  97. /* This is early enough in init that we can write this variable without
  98. * the lock. */
  99. async_helper_state = HELPER_NOTALIVE;
  100. create_lock(&async_helper_lock);
  101. create_event(&async_helper_event);
  102. return 0;
  103. }
  104. #define IDLE_SLEEP_TIME 1000
  105. #define MAX_IDLE_CYCLES 100
  106. static void shim_async_helper (void * arg)
  107. {
  108. struct shim_thread * self = (struct shim_thread *) arg;
  109. if (!arg)
  110. return;
  111. __libc_tcb_t tcb;
  112. allocate_tls(&tcb, false, self);
  113. debug_setbuf(&tcb.shim_tcb, true);
  114. debug("set tcb to %p\n", &tcb);
  115. lock(&async_helper_lock);
  116. bool notme = (self != async_helper_thread);
  117. unlock(&async_helper_lock);
  118. if (notme) {
  119. put_thread(self);
  120. DkThreadExit();
  121. return;
  122. }
  123. debug("async helper thread started\n");
  124. /* TSAI: we assume async helper thread will not drain the
  125. stack that PAL provides, so for efficiency, we don't
  126. swap any stack */
  127. unsigned long idle_cycles = 0;
  128. unsigned long latest_time;
  129. struct async_event * next_event = NULL;
  130. PAL_HANDLE async_event_handle = event_handle(&async_helper_event);
  131. int object_list_size = 32, object_num;
  132. PAL_HANDLE polled;
  133. PAL_HANDLE * local_objects =
  134. malloc(sizeof(PAL_HANDLE) * (1 + object_list_size));
  135. local_objects[0] = async_event_handle;
  136. goto update_status;
  137. /* This loop should be careful to use a barrier after sleeping
  138. * to ensure that the while breaks once async_helper_state changes.
  139. */
  140. while (async_helper_state == HELPER_ALIVE) {
  141. unsigned long sleep_time;
  142. if (next_event) {
  143. sleep_time = next_event->expire_time - latest_time;
  144. idle_cycles = 0;
  145. } else if (object_num) {
  146. sleep_time = NO_TIMEOUT;
  147. idle_cycles = 0;
  148. } else {
  149. sleep_time = IDLE_SLEEP_TIME;
  150. idle_cycles++;
  151. }
  152. polled = DkObjectsWaitAny(object_num + 1, local_objects, sleep_time);
  153. COMPILER_BARRIER();
  154. if (!polled) {
  155. if (next_event) {
  156. debug("async event trigger at %lu\n",
  157. next_event->expire_time);
  158. next_event->callback(next_event->caller, next_event->arg);
  159. lock(&async_helper_lock);
  160. /* DEP: Events can only be on the async list */
  161. LISTP_DEL(next_event, &async_list, list);
  162. free(next_event);
  163. goto update_list;
  164. }
  165. continue;
  166. }
  167. if (polled == async_event_handle) {
  168. clear_event(&async_helper_event);
  169. update_status:
  170. latest_time = DkSystemTimeQuery();
  171. if (async_helper_state == HELPER_NOTALIVE) {
  172. break;
  173. } else {
  174. lock(&async_helper_lock);
  175. goto update_list;
  176. }
  177. }
  178. struct async_event * tmp, * n;
  179. lock(&async_helper_lock);
  180. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  181. if (tmp->object == polled) {
  182. debug("async event trigger at %lu\n",
  183. latest_time);
  184. unlock(&async_helper_lock);
  185. tmp->callback(tmp->caller, tmp->arg);
  186. lock(&async_helper_lock);
  187. break;
  188. }
  189. }
  190. update_list:
  191. next_event = NULL;
  192. object_num = 0;
  193. if (!LISTP_EMPTY(&async_list)) {
  194. struct async_event * tmp, * n;
  195. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  196. if (tmp->object) {
  197. local_objects[object_num + 1] = tmp->object;
  198. object_num++;
  199. }
  200. if (!tmp->install_time)
  201. continue;
  202. if (tmp->expire_time > latest_time) {
  203. next_event = tmp;
  204. break;
  205. }
  206. debug("async event trigger at %lu (expire at %lu)\n",
  207. latest_time, tmp->expire_time);
  208. LISTP_DEL(tmp, &async_list, list);
  209. unlock(&async_helper_lock);
  210. tmp->callback(tmp->caller, tmp->arg);
  211. free(tmp);
  212. lock(&async_helper_lock);
  213. }
  214. idle_cycles = 0;
  215. }
  216. unlock(&async_helper_lock);
  217. if (idle_cycles++ == MAX_IDLE_CYCLES) {
  218. debug("async helper thread reach helper cycle\n");
  219. /* walking away, if someone is issueing an event,
  220. they have to create another thread */
  221. break;
  222. }
  223. }
  224. lock(&async_helper_lock);
  225. async_helper_state = HELPER_NOTALIVE;
  226. async_helper_thread = NULL;
  227. unlock(&async_helper_lock);
  228. put_thread(self);
  229. debug("async helper thread terminated\n");
  230. free(local_objects);
  231. DkThreadExit();
  232. }
  233. /* This should be called with the async_helper_lock held */
  234. int create_async_helper (void)
  235. {
  236. int ret = 0;
  237. if (async_helper_state == HELPER_ALIVE)
  238. return 0;
  239. enable_locking();
  240. struct shim_thread * new = get_new_internal_thread();
  241. if (!new)
  242. return -ENOMEM;
  243. PAL_HANDLE handle = thread_create(shim_async_helper, new, 0);
  244. if (!handle) {
  245. ret = -PAL_ERRNO;
  246. async_helper_thread = NULL;
  247. async_helper_state = HELPER_NOTALIVE;
  248. put_thread(new);
  249. return ret;
  250. }
  251. new->pal_handle = handle;
  252. /* Publish new and update the state once fully initialized */
  253. async_helper_thread = new;
  254. async_helper_state = HELPER_ALIVE;
  255. return 0;
  256. }
  257. /*
  258. * On success, the reference to the thread of async helper is returned with
  259. * reference count incremented.
  260. * It's caller the responsibility to wait for its exit and release the
  261. * final reference to free related resources.
  262. * It's problematic for the thread itself to release its resources which it's
  263. * using. For example stack.
  264. * So defer releasing it after its exit and make the releasing the caller
  265. * responsibility.
  266. */
  267. struct shim_thread * terminate_async_helper (void)
  268. {
  269. if (async_helper_state != HELPER_ALIVE)
  270. return NULL;
  271. lock(&async_helper_lock);
  272. struct shim_thread * ret = async_helper_thread;
  273. if (ret)
  274. get_thread(ret);
  275. async_helper_state = HELPER_NOTALIVE;
  276. unlock(&async_helper_lock);
  277. set_event(&async_helper_event, 1);
  278. return ret;
  279. }