shim_async.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_async.c
  15. *
  16. * This file contains functions to add asyncronous events triggered by timer.
  17. */
  18. #include <list.h>
  19. #include <pal.h>
  20. #include <shim_internal.h>
  21. #include <shim_thread.h>
  22. #include <shim_utils.h>
  23. #define IDLE_SLEEP_TIME 1000
  24. #define MAX_IDLE_CYCLES 100
  25. DEFINE_LIST(async_event);
  26. struct async_event {
  27. IDTYPE caller; /* thread installing this event */
  28. LIST_TYPE(async_event) list;
  29. void (*callback)(IDTYPE caller, void* arg);
  30. void* arg;
  31. PAL_HANDLE object; /* handle (async IO) to wait on */
  32. uint64_t expire_time; /* alarm/timer to wait on */
  33. };
  34. DEFINE_LISTP(async_event);
  35. static LISTP_TYPE(async_event) async_list;
  36. /* can be read without async_helper_lock but always written with lock held */
  37. static enum { HELPER_NOTALIVE, HELPER_ALIVE } async_helper_state;
  38. static struct shim_thread* async_helper_thread;
  39. static struct shim_lock async_helper_lock;
  40. static AEVENTTYPE install_new_event;
  41. static int create_async_helper(void);
  42. /* Threads register async events like alarm(), setitimer(), ioctl(FIOASYNC)
  43. * using this function. These events are enqueued in async_list and delivered
  44. * to Async Helper thread by triggering install_new_event. When event is
  45. * triggered in Async Helper thread, the corresponding event's callback with
  46. * arguments `arg` is called. This callback typically sends a signal to the
  47. * thread who registered the event (saved in `event->caller`).
  48. *
  49. * We distinguish between alarm/timer events and async IO events:
  50. * - alarm/timer events set object = NULL and time = seconds
  51. * (time = 0 cancels all pending alarms/timers).
  52. * - async IO events set object = handle and time = 0.
  53. *
  54. * Function returns remaining usecs for alarm/timer events (same as alarm())
  55. * or 0 for async IO events. On error, it returns a negated error code.
  56. */
  57. int64_t install_async_event(PAL_HANDLE object, uint64_t time,
  58. void (*callback)(IDTYPE caller, void* arg), void* arg) {
  59. /* if event happens on object, time must be zero */
  60. assert(!object || (object && !time));
  61. uint64_t now = DkSystemTimeQuery();
  62. if ((int64_t)now < 0) {
  63. return (int64_t)now;
  64. }
  65. uint64_t max_prev_expire_time = now;
  66. struct async_event* event = malloc(sizeof(struct async_event));
  67. if (!event) {
  68. return -ENOMEM;
  69. }
  70. event->callback = callback;
  71. event->arg = arg;
  72. event->caller = get_cur_tid();
  73. event->object = object;
  74. event->expire_time = time ? now + time : 0;
  75. lock(&async_helper_lock);
  76. if (callback != &cleanup_thread && !object) {
  77. /* This is alarm() or setitimer() emulation, treat both according to
  78. * alarm() syscall semantics: cancel any pending alarm/timer. */
  79. struct async_event* tmp;
  80. struct async_event* n;
  81. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  82. if (tmp->expire_time) {
  83. /* this is a pending alarm/timer, cancel it and save its expiration time */
  84. if (max_prev_expire_time < tmp->expire_time)
  85. max_prev_expire_time = tmp->expire_time;
  86. LISTP_DEL(tmp, &async_list, list);
  87. free(tmp);
  88. }
  89. }
  90. if (!time) {
  91. /* This is alarm(0), we cancelled all pending alarms/timers
  92. * and user doesn't want to set a new alarm: we are done. */
  93. free(event);
  94. unlock(&async_helper_lock);
  95. return max_prev_expire_time - now;
  96. }
  97. }
  98. INIT_LIST_HEAD(event, list);
  99. LISTP_ADD_TAIL(event, &async_list, list);
  100. if (async_helper_state == HELPER_NOTALIVE) {
  101. int ret = create_async_helper();
  102. if (ret < 0) {
  103. unlock(&async_helper_lock);
  104. return ret;
  105. }
  106. }
  107. unlock(&async_helper_lock);
  108. debug("Installed async event at %lu\n", now);
  109. set_event(&install_new_event, 1);
  110. return max_prev_expire_time - now;
  111. }
  112. int init_async(void) {
  113. /* early enough in init, can write global vars without the lock */
  114. async_helper_state = HELPER_NOTALIVE;
  115. create_lock(&async_helper_lock);
  116. create_event(&install_new_event);
  117. /* enable locking mechanisms since we are going in multi-threaded mode */
  118. enable_locking();
  119. return 0;
  120. }
  121. static void shim_async_helper(void* arg) {
  122. struct shim_thread* self = (struct shim_thread*)arg;
  123. if (!arg)
  124. return;
  125. shim_tcb_init();
  126. set_cur_thread(self);
  127. update_fs_base(0);
  128. debug_setbuf(shim_get_tcb(), true);
  129. lock(&async_helper_lock);
  130. bool notme = (self != async_helper_thread);
  131. unlock(&async_helper_lock);
  132. if (notme) {
  133. put_thread(self);
  134. DkThreadExit(/*clear_child_tid=*/NULL);
  135. return;
  136. }
  137. /* Assume async helper thread will not drain the stack that PAL provides,
  138. * so for efficiency we don't swap the stack. */
  139. debug("Async helper thread started\n");
  140. /* Simple heuristic to not burn cycles when no async events are installed:
  141. * async helper thread sleeps IDLE_SLEEP_TIME for MAX_IDLE_CYCLES and
  142. * if nothing happens, dies. It will be re-spawned if some thread wants
  143. * to install a new event. */
  144. uint64_t idle_cycles = 0;
  145. /* init `pals` so that it always contains at least install_new_event */
  146. size_t pals_max_cnt = 32;
  147. PAL_HANDLE* pals = malloc(sizeof(*pals) * (1 + pals_max_cnt));
  148. if (!pals) {
  149. debug("Allocation of pals failed\n");
  150. goto out_err;
  151. }
  152. /* allocate one memory region to hold two PAL_FLG arrays: events and revents */
  153. PAL_FLG* pal_events = malloc(sizeof(*pal_events) * (1 + pals_max_cnt) * 2);
  154. if (!pal_events) {
  155. debug("Allocation of pal_events failed\n");
  156. goto out_err;
  157. }
  158. PAL_FLG* ret_events = pal_events + 1 + pals_max_cnt;
  159. PAL_HANDLE install_new_event_pal = event_handle(&install_new_event);
  160. pals[0] = install_new_event_pal;
  161. pal_events[0] = PAL_WAIT_READ;
  162. ret_events[0] = 0;
  163. while (true) {
  164. uint64_t now = DkSystemTimeQuery();
  165. if ((int64_t)now < 0) {
  166. debug("DkSystemTimeQuery failed with: %ld\n", (int64_t)now);
  167. goto out_err;
  168. }
  169. lock(&async_helper_lock);
  170. if (async_helper_state != HELPER_ALIVE) {
  171. async_helper_thread = NULL;
  172. unlock(&async_helper_lock);
  173. break;
  174. }
  175. uint64_t next_expire_time = 0;
  176. size_t pals_cnt = 0;
  177. struct async_event* tmp;
  178. struct async_event* n;
  179. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  180. /* repopulate `pals` with IO events and find the next expiring alarm/timer */
  181. if (tmp->object) {
  182. if (pals_cnt == pals_max_cnt) {
  183. /* grow `pals` to accommodate more objects */
  184. PAL_HANDLE* tmp_pals = malloc(sizeof(*tmp_pals) * (1 + pals_max_cnt * 2));
  185. if (!tmp_pals) {
  186. debug("tmp_pals allocation failed\n");
  187. goto out_err_unlock;
  188. }
  189. PAL_FLG* tmp_pal_events = malloc(sizeof(*tmp_pal_events) * (2 + pals_max_cnt * 4));
  190. if (!tmp_pal_events) {
  191. debug("tmp_pal_events allocation failed\n");
  192. goto out_err_unlock;
  193. }
  194. PAL_FLG* tmp_ret_events = tmp_pal_events + 1 + pals_max_cnt * 2;
  195. memcpy(tmp_pals, pals, sizeof(*tmp_pals) * (1 + pals_max_cnt));
  196. memcpy(tmp_pal_events, pal_events, sizeof(*tmp_pal_events) * (1 + pals_max_cnt));
  197. memcpy(tmp_ret_events, ret_events, sizeof(*tmp_ret_events) * (1 + pals_max_cnt));
  198. pals_max_cnt *= 2;
  199. free(pals);
  200. free(pal_events);
  201. pals = tmp_pals;
  202. pal_events = tmp_pal_events;
  203. ret_events = tmp_ret_events;
  204. }
  205. pals[pals_cnt + 1] = tmp->object;
  206. pal_events[pals_cnt + 1] = PAL_WAIT_READ;
  207. ret_events[pals_cnt + 1] = 0;
  208. pals_cnt++;
  209. } else if (tmp->expire_time && tmp->expire_time > now) {
  210. if (!next_expire_time || next_expire_time > tmp->expire_time) {
  211. /* use time of the next expiring alarm/timer */
  212. next_expire_time = tmp->expire_time;
  213. }
  214. }
  215. }
  216. uint64_t sleep_time;
  217. if (next_expire_time) {
  218. sleep_time = next_expire_time - now;
  219. idle_cycles = 0;
  220. } else if (pals_cnt) {
  221. sleep_time = NO_TIMEOUT;
  222. idle_cycles = 0;
  223. } else {
  224. /* no async IO events and no timers/alarms: thread is idling */
  225. sleep_time = IDLE_SLEEP_TIME;
  226. idle_cycles++;
  227. }
  228. if (idle_cycles == MAX_IDLE_CYCLES) {
  229. async_helper_state = HELPER_NOTALIVE;
  230. async_helper_thread = NULL;
  231. unlock(&async_helper_lock);
  232. debug("Async helper thread has been idle for some time; stopping it\n");
  233. break;
  234. }
  235. unlock(&async_helper_lock);
  236. /* wait on async IO events + install_new_event + next expiring alarm/timer */
  237. PAL_BOL polled = DkStreamsWaitEvents(pals_cnt + 1, pals, pal_events, ret_events, sleep_time);
  238. now = DkSystemTimeQuery();
  239. if ((int64_t)now < 0) {
  240. debug("DkSystemTimeQuery failed with: %ld\n", (int64_t)now);
  241. goto out_err;
  242. }
  243. LISTP_TYPE(async_event) triggered;
  244. INIT_LISTP(&triggered);
  245. /* acquire lock because we read/modify async_list below */
  246. lock(&async_helper_lock);
  247. for (size_t i = 0; polled && i < pals_cnt + 1; i++) {
  248. if (ret_events[i]) {
  249. if (pals[i] == install_new_event_pal) {
  250. /* some thread wants to install new event; this event is found in async_list,
  251. * so just re-init install_new_event */
  252. clear_event(&install_new_event);
  253. continue;
  254. }
  255. /* check if this event is an IO event found in async_list */
  256. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  257. if (tmp->object == pals[i]) {
  258. debug("Async IO event triggered at %lu\n", now);
  259. LISTP_ADD_TAIL(tmp, &triggered, list);
  260. break;
  261. }
  262. }
  263. }
  264. }
  265. /* check if exit-child or alarm/timer events were triggered */
  266. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  267. if (tmp->callback == &cleanup_thread) {
  268. debug("Thread exited, cleaning up\n");
  269. LISTP_DEL(tmp, &async_list, list);
  270. LISTP_ADD_TAIL(tmp, &triggered, list);
  271. } else if (tmp->expire_time && tmp->expire_time <= now) {
  272. debug("Alarm/timer triggered at %lu (expired at %lu)\n", now, tmp->expire_time);
  273. LISTP_DEL(tmp, &async_list, list);
  274. LISTP_ADD_TAIL(tmp, &triggered, list);
  275. }
  276. }
  277. unlock(&async_helper_lock);
  278. /* call callbacks for all triggered events */
  279. if (!LISTP_EMPTY(&triggered)) {
  280. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &triggered, list) {
  281. LISTP_DEL(tmp, &triggered, list);
  282. tmp->callback(tmp->caller, tmp->arg);
  283. if (!tmp->object) {
  284. /* this is a one-off exit-child or alarm/timer event */
  285. free(tmp);
  286. }
  287. }
  288. }
  289. }
  290. __disable_preempt(self->shim_tcb);
  291. put_thread(self);
  292. debug("Async helper thread terminated\n");
  293. free(pals);
  294. free(pal_events);
  295. DkThreadExit(/*clear_child_tid=*/NULL);
  296. return;
  297. out_err_unlock:
  298. unlock(&async_helper_lock);
  299. out_err:
  300. debug("Terminating the process due to a fatal error in async helper\n");
  301. put_thread(self);
  302. DkProcessExit(1);
  303. }
  304. /* this should be called with the async_helper_lock held */
  305. static int create_async_helper(void) {
  306. assert(locked(&async_helper_lock));
  307. if (async_helper_state == HELPER_ALIVE)
  308. return 0;
  309. struct shim_thread* new = get_new_internal_thread();
  310. if (!new)
  311. return -ENOMEM;
  312. async_helper_thread = new;
  313. async_helper_state = HELPER_ALIVE;
  314. PAL_HANDLE handle = thread_create(shim_async_helper, new);
  315. if (!handle) {
  316. async_helper_thread = NULL;
  317. async_helper_state = HELPER_NOTALIVE;
  318. put_thread(new);
  319. return -PAL_ERRNO;
  320. }
  321. new->pal_handle = handle;
  322. return 0;
  323. }
  324. /* On success, the reference to async helper thread is returned with refcount
  325. * incremented. It is the responsibility of caller to wait for async helper's
  326. * exit and then release the final reference to free related resources (it is
  327. * problematic for the thread itself to release its own resources e.g. stack).
  328. */
  329. struct shim_thread* terminate_async_helper(void) {
  330. lock(&async_helper_lock);
  331. if (async_helper_state != HELPER_ALIVE) {
  332. unlock(&async_helper_lock);
  333. return NULL;
  334. }
  335. struct shim_thread* ret = async_helper_thread;
  336. if (ret)
  337. get_thread(ret);
  338. async_helper_state = HELPER_NOTALIVE;
  339. unlock(&async_helper_lock);
  340. /* force wake up of async helper thread so that it exits */
  341. set_event(&install_new_event, 1);
  342. return ret;
  343. }