shim_async.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_async.c
  15. *
  16. * This file contains functions to add asyncronous events triggered by timer.
  17. */
  18. #include <list.h>
  19. #include <pal.h>
  20. #include <shim_internal.h>
  21. #include <shim_thread.h>
  22. #include <shim_utils.h>
  23. #define IDLE_SLEEP_TIME 1000
  24. #define MAX_IDLE_CYCLES 100
  25. DEFINE_LIST(async_event);
  26. struct async_event {
  27. IDTYPE caller; /* thread installing this event */
  28. LIST_TYPE(async_event) list;
  29. void (*callback)(IDTYPE caller, void* arg);
  30. void* arg;
  31. PAL_HANDLE object; /* handle (async IO) to wait on */
  32. uint64_t expire_time; /* alarm/timer to wait on */
  33. };
  34. DEFINE_LISTP(async_event);
  35. static LISTP_TYPE(async_event) async_list;
  36. /* Should be accessed with async_helper_lock held. */
  37. static enum { HELPER_NOTALIVE, HELPER_ALIVE } async_helper_state;
  38. static struct shim_thread* async_helper_thread;
  39. static struct shim_lock async_helper_lock;
  40. static AEVENTTYPE install_new_event;
  41. static int create_async_helper(void);
  42. /* Threads register async events like alarm(), setitimer(), ioctl(FIOASYNC)
  43. * using this function. These events are enqueued in async_list and delivered
  44. * to Async Helper thread by triggering install_new_event. When event is
  45. * triggered in Async Helper thread, the corresponding event's callback with
  46. * arguments `arg` is called. This callback typically sends a signal to the
  47. * thread who registered the event (saved in `event->caller`).
  48. *
  49. * We distinguish between alarm/timer events and async IO events:
  50. * - alarm/timer events set object = NULL and time = seconds
  51. * (time = 0 cancels all pending alarms/timers).
  52. * - async IO events set object = handle and time = 0.
  53. *
  54. * Function returns remaining usecs for alarm/timer events (same as alarm())
  55. * or 0 for async IO events. On error, it returns a negated error code.
  56. */
  57. int64_t install_async_event(PAL_HANDLE object, uint64_t time,
  58. void (*callback)(IDTYPE caller, void* arg), void* arg) {
  59. /* if event happens on object, time must be zero */
  60. assert(!object || (object && !time));
  61. uint64_t now = DkSystemTimeQuery();
  62. if ((int64_t)now < 0) {
  63. return (int64_t)now;
  64. }
  65. uint64_t max_prev_expire_time = now;
  66. struct async_event* event = malloc(sizeof(struct async_event));
  67. if (!event) {
  68. return -ENOMEM;
  69. }
  70. event->callback = callback;
  71. event->arg = arg;
  72. event->caller = get_cur_tid();
  73. event->object = object;
  74. event->expire_time = time ? now + time : 0;
  75. lock(&async_helper_lock);
  76. if (callback != &cleanup_thread && !object) {
  77. /* This is alarm() or setitimer() emulation, treat both according to
  78. * alarm() syscall semantics: cancel any pending alarm/timer. */
  79. struct async_event* tmp;
  80. struct async_event* n;
  81. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  82. if (tmp->expire_time) {
  83. /* this is a pending alarm/timer, cancel it and save its expiration time */
  84. if (max_prev_expire_time < tmp->expire_time)
  85. max_prev_expire_time = tmp->expire_time;
  86. LISTP_DEL(tmp, &async_list, list);
  87. free(tmp);
  88. }
  89. }
  90. if (!time) {
  91. /* This is alarm(0), we cancelled all pending alarms/timers
  92. * and user doesn't want to set a new alarm: we are done. */
  93. free(event);
  94. unlock(&async_helper_lock);
  95. return max_prev_expire_time - now;
  96. }
  97. }
  98. INIT_LIST_HEAD(event, list);
  99. LISTP_ADD_TAIL(event, &async_list, list);
  100. if (async_helper_state == HELPER_NOTALIVE) {
  101. int ret = create_async_helper();
  102. if (ret < 0) {
  103. unlock(&async_helper_lock);
  104. return ret;
  105. }
  106. }
  107. unlock(&async_helper_lock);
  108. debug("Installed async event at %lu\n", now);
  109. set_event(&install_new_event, 1);
  110. return max_prev_expire_time - now;
  111. }
  112. int init_async(void) {
  113. /* early enough in init, can write global vars without the lock */
  114. async_helper_state = HELPER_NOTALIVE;
  115. if (!create_lock(&async_helper_lock)) {
  116. return -ENOMEM;
  117. }
  118. create_event(&install_new_event);
  119. /* enable locking mechanisms since we are going in multi-threaded mode */
  120. enable_locking();
  121. return 0;
  122. }
  123. static void shim_async_helper(void* arg) {
  124. struct shim_thread* self = (struct shim_thread*)arg;
  125. if (!arg)
  126. return;
  127. shim_tcb_init();
  128. set_cur_thread(self);
  129. update_fs_base(0);
  130. debug_setbuf(shim_get_tcb(), true);
  131. lock(&async_helper_lock);
  132. bool notme = (self != async_helper_thread);
  133. unlock(&async_helper_lock);
  134. if (notme) {
  135. put_thread(self);
  136. DkThreadExit(/*clear_child_tid=*/NULL);
  137. return;
  138. }
  139. /* Assume async helper thread will not drain the stack that PAL provides,
  140. * so for efficiency we don't swap the stack. */
  141. debug("Async helper thread started\n");
  142. /* Simple heuristic to not burn cycles when no async events are installed:
  143. * async helper thread sleeps IDLE_SLEEP_TIME for MAX_IDLE_CYCLES and
  144. * if nothing happens, dies. It will be re-spawned if some thread wants
  145. * to install a new event. */
  146. uint64_t idle_cycles = 0;
  147. /* init `pals` so that it always contains at least install_new_event */
  148. size_t pals_max_cnt = 32;
  149. PAL_HANDLE* pals = malloc(sizeof(*pals) * (1 + pals_max_cnt));
  150. if (!pals) {
  151. debug("Allocation of pals failed\n");
  152. goto out_err;
  153. }
  154. /* allocate one memory region to hold two PAL_FLG arrays: events and revents */
  155. PAL_FLG* pal_events = malloc(sizeof(*pal_events) * (1 + pals_max_cnt) * 2);
  156. if (!pal_events) {
  157. debug("Allocation of pal_events failed\n");
  158. goto out_err;
  159. }
  160. PAL_FLG* ret_events = pal_events + 1 + pals_max_cnt;
  161. PAL_HANDLE install_new_event_pal = event_handle(&install_new_event);
  162. pals[0] = install_new_event_pal;
  163. pal_events[0] = PAL_WAIT_READ;
  164. ret_events[0] = 0;
  165. while (true) {
  166. uint64_t now = DkSystemTimeQuery();
  167. if ((int64_t)now < 0) {
  168. debug("DkSystemTimeQuery failed with: %ld\n", (int64_t)now);
  169. goto out_err;
  170. }
  171. lock(&async_helper_lock);
  172. if (async_helper_state != HELPER_ALIVE) {
  173. async_helper_thread = NULL;
  174. unlock(&async_helper_lock);
  175. break;
  176. }
  177. uint64_t next_expire_time = 0;
  178. size_t pals_cnt = 0;
  179. struct async_event* tmp;
  180. struct async_event* n;
  181. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  182. /* repopulate `pals` with IO events and find the next expiring alarm/timer */
  183. if (tmp->object) {
  184. if (pals_cnt == pals_max_cnt) {
  185. /* grow `pals` to accommodate more objects */
  186. PAL_HANDLE* tmp_pals = malloc(sizeof(*tmp_pals) * (1 + pals_max_cnt * 2));
  187. if (!tmp_pals) {
  188. debug("tmp_pals allocation failed\n");
  189. goto out_err_unlock;
  190. }
  191. PAL_FLG* tmp_pal_events = malloc(sizeof(*tmp_pal_events) * (2 + pals_max_cnt * 4));
  192. if (!tmp_pal_events) {
  193. debug("tmp_pal_events allocation failed\n");
  194. goto out_err_unlock;
  195. }
  196. PAL_FLG* tmp_ret_events = tmp_pal_events + 1 + pals_max_cnt * 2;
  197. memcpy(tmp_pals, pals, sizeof(*tmp_pals) * (1 + pals_max_cnt));
  198. memcpy(tmp_pal_events, pal_events, sizeof(*tmp_pal_events) * (1 + pals_max_cnt));
  199. memcpy(tmp_ret_events, ret_events, sizeof(*tmp_ret_events) * (1 + pals_max_cnt));
  200. pals_max_cnt *= 2;
  201. free(pals);
  202. free(pal_events);
  203. pals = tmp_pals;
  204. pal_events = tmp_pal_events;
  205. ret_events = tmp_ret_events;
  206. }
  207. pals[pals_cnt + 1] = tmp->object;
  208. pal_events[pals_cnt + 1] = PAL_WAIT_READ;
  209. ret_events[pals_cnt + 1] = 0;
  210. pals_cnt++;
  211. } else if (tmp->expire_time && tmp->expire_time > now) {
  212. if (!next_expire_time || next_expire_time > tmp->expire_time) {
  213. /* use time of the next expiring alarm/timer */
  214. next_expire_time = tmp->expire_time;
  215. }
  216. }
  217. }
  218. uint64_t sleep_time;
  219. if (next_expire_time) {
  220. sleep_time = next_expire_time - now;
  221. idle_cycles = 0;
  222. } else if (pals_cnt) {
  223. sleep_time = NO_TIMEOUT;
  224. idle_cycles = 0;
  225. } else {
  226. /* no async IO events and no timers/alarms: thread is idling */
  227. sleep_time = IDLE_SLEEP_TIME;
  228. idle_cycles++;
  229. }
  230. if (idle_cycles == MAX_IDLE_CYCLES) {
  231. async_helper_state = HELPER_NOTALIVE;
  232. async_helper_thread = NULL;
  233. unlock(&async_helper_lock);
  234. debug("Async helper thread has been idle for some time; stopping it\n");
  235. break;
  236. }
  237. unlock(&async_helper_lock);
  238. /* wait on async IO events + install_new_event + next expiring alarm/timer */
  239. PAL_BOL polled = DkStreamsWaitEvents(pals_cnt + 1, pals, pal_events, ret_events, sleep_time);
  240. now = DkSystemTimeQuery();
  241. if ((int64_t)now < 0) {
  242. debug("DkSystemTimeQuery failed with: %ld\n", (int64_t)now);
  243. goto out_err;
  244. }
  245. LISTP_TYPE(async_event) triggered;
  246. INIT_LISTP(&triggered);
  247. /* acquire lock because we read/modify async_list below */
  248. lock(&async_helper_lock);
  249. for (size_t i = 0; polled && i < pals_cnt + 1; i++) {
  250. if (ret_events[i]) {
  251. if (pals[i] == install_new_event_pal) {
  252. /* some thread wants to install new event; this event is found in async_list,
  253. * so just re-init install_new_event */
  254. clear_event(&install_new_event);
  255. continue;
  256. }
  257. /* check if this event is an IO event found in async_list */
  258. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  259. if (tmp->object == pals[i]) {
  260. debug("Async IO event triggered at %lu\n", now);
  261. LISTP_ADD_TAIL(tmp, &triggered, list);
  262. break;
  263. }
  264. }
  265. }
  266. }
  267. /* check if exit-child or alarm/timer events were triggered */
  268. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &async_list, list) {
  269. if (tmp->callback == &cleanup_thread) {
  270. debug("Thread exited, cleaning up\n");
  271. LISTP_DEL(tmp, &async_list, list);
  272. LISTP_ADD_TAIL(tmp, &triggered, list);
  273. } else if (tmp->expire_time && tmp->expire_time <= now) {
  274. debug("Alarm/timer triggered at %lu (expired at %lu)\n", now, tmp->expire_time);
  275. LISTP_DEL(tmp, &async_list, list);
  276. LISTP_ADD_TAIL(tmp, &triggered, list);
  277. }
  278. }
  279. unlock(&async_helper_lock);
  280. /* call callbacks for all triggered events */
  281. if (!LISTP_EMPTY(&triggered)) {
  282. LISTP_FOR_EACH_ENTRY_SAFE(tmp, n, &triggered, list) {
  283. LISTP_DEL(tmp, &triggered, list);
  284. tmp->callback(tmp->caller, tmp->arg);
  285. if (!tmp->object) {
  286. /* this is a one-off exit-child or alarm/timer event */
  287. free(tmp);
  288. }
  289. }
  290. }
  291. }
  292. __disable_preempt(self->shim_tcb);
  293. put_thread(self);
  294. debug("Async helper thread terminated\n");
  295. free(pals);
  296. free(pal_events);
  297. DkThreadExit(/*clear_child_tid=*/NULL);
  298. return;
  299. out_err_unlock:
  300. unlock(&async_helper_lock);
  301. out_err:
  302. debug("Terminating the process due to a fatal error in async helper\n");
  303. put_thread(self);
  304. DkProcessExit(1);
  305. }
  306. /* this should be called with the async_helper_lock held */
  307. static int create_async_helper(void) {
  308. assert(locked(&async_helper_lock));
  309. if (async_helper_state == HELPER_ALIVE)
  310. return 0;
  311. struct shim_thread* new = get_new_internal_thread();
  312. if (!new)
  313. return -ENOMEM;
  314. async_helper_thread = new;
  315. async_helper_state = HELPER_ALIVE;
  316. PAL_HANDLE handle = thread_create(shim_async_helper, new);
  317. if (!handle) {
  318. async_helper_thread = NULL;
  319. async_helper_state = HELPER_NOTALIVE;
  320. put_thread(new);
  321. return -PAL_ERRNO;
  322. }
  323. new->pal_handle = handle;
  324. return 0;
  325. }
  326. /* On success, the reference to async helper thread is returned with refcount
  327. * incremented. It is the responsibility of caller to wait for async helper's
  328. * exit and then release the final reference to free related resources (it is
  329. * problematic for the thread itself to release its own resources e.g. stack).
  330. */
  331. struct shim_thread* terminate_async_helper(void) {
  332. lock(&async_helper_lock);
  333. if (async_helper_state != HELPER_ALIVE) {
  334. unlock(&async_helper_lock);
  335. return NULL;
  336. }
  337. struct shim_thread* ret = async_helper_thread;
  338. if (ret)
  339. get_thread(ret);
  340. async_helper_state = HELPER_NOTALIVE;
  341. unlock(&async_helper_lock);
  342. /* force wake up of async helper thread so that it exits */
  343. set_event(&install_new_event, 1);
  344. return ret;
  345. }