workqueue.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /* Copyright (c) 2013, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #include "orconfig.h"
  4. #include "compat.h"
  5. #include "compat_threads.h"
  6. #include "util.h"
  7. #include "workqueue.h"
  8. #include "tor_queue.h"
  9. #include "torlog.h"
  10. /*
  11. design:
  12. each thread has its own queue, try to keep at least elements min..max cycles
  13. worth of work on each queue.
  14. keep array of threads; round-robin between them.
  15. When out of work, work-steal.
  16. alert threads with condition variables.
  17. alert main thread with fd, since it's libevent.
  18. */
  19. struct workqueue_entry_s {
  20. TOR_TAILQ_ENTRY(workqueue_entry_s) next_work;
  21. struct workerthread_s *on_thread;
  22. uint8_t pending;
  23. int (*fn)(void *state, void *arg);
  24. void (*reply_fn)(void *arg);
  25. void *arg;
  26. };
  27. struct replyqueue_s {
  28. tor_mutex_t lock;
  29. TOR_TAILQ_HEAD(, workqueue_entry_s) answers;
  30. alert_sockets_t alert; // lock not held on this.
  31. };
  32. typedef struct workerthread_s {
  33. tor_mutex_t lock;
  34. tor_cond_t condition;
  35. TOR_TAILQ_HEAD(, workqueue_entry_s) work;
  36. unsigned is_running;
  37. unsigned is_shut_down;
  38. unsigned waiting;
  39. void *state;
  40. replyqueue_t *reply_queue;
  41. } workerthread_t;
  42. struct threadpool_s {
  43. workerthread_t **threads;
  44. int next_for_work;
  45. tor_mutex_t lock;
  46. int n_threads;
  47. replyqueue_t *reply_queue;
  48. void *(*new_thread_state_fn)(void*);
  49. void (*free_thread_state_fn)(void*);
  50. void *new_thread_state_arg;
  51. };
  52. static void queue_reply(replyqueue_t *queue, workqueue_entry_t *work);
  53. static workqueue_entry_t *
  54. workqueue_entry_new(int (*fn)(void*, void*),
  55. void (*reply_fn)(void*),
  56. void *arg)
  57. {
  58. workqueue_entry_t *ent = tor_malloc_zero(sizeof(workqueue_entry_t));
  59. ent->fn = fn;
  60. ent->reply_fn = reply_fn;
  61. ent->arg = arg;
  62. return ent;
  63. }
  64. static void
  65. workqueue_entry_free(workqueue_entry_t *ent)
  66. {
  67. if (!ent)
  68. return;
  69. tor_free(ent);
  70. }
  71. int
  72. workqueue_entry_cancel(workqueue_entry_t *ent)
  73. {
  74. int cancelled = 0;
  75. tor_mutex_acquire(&ent->on_thread->lock);
  76. if (ent->pending) {
  77. TOR_TAILQ_REMOVE(&ent->on_thread->work, ent, next_work);
  78. cancelled = 1;
  79. }
  80. tor_mutex_release(&ent->on_thread->lock);
  81. if (cancelled) {
  82. tor_free(ent);
  83. }
  84. return cancelled;
  85. }
  86. static void
  87. worker_thread_main(void *thread_)
  88. {
  89. workerthread_t *thread = thread_;
  90. workqueue_entry_t *work;
  91. int result;
  92. tor_mutex_acquire(&thread->lock);
  93. thread->is_running = 1;
  94. while (1) {
  95. /* lock held. */
  96. while (!TOR_TAILQ_EMPTY(&thread->work)) {
  97. /* lock held. */
  98. work = TOR_TAILQ_FIRST(&thread->work);
  99. TOR_TAILQ_REMOVE(&thread->work, work, next_work);
  100. work->pending = 0;
  101. tor_mutex_release(&thread->lock);
  102. result = work->fn(thread->state, work->arg);
  103. queue_reply(thread->reply_queue, work);
  104. tor_mutex_acquire(&thread->lock);
  105. if (result >= WQ_RPL_ERROR) {
  106. thread->is_running = 0;
  107. thread->is_shut_down = 1;
  108. tor_mutex_release(&thread->lock);
  109. return;
  110. }
  111. }
  112. /* Lock held; no work in this thread's queue. */
  113. /* TODO: Try work-stealing. */
  114. /* TODO: support an idle-function */
  115. thread->waiting = 1;
  116. if (tor_cond_wait(&thread->condition, &thread->lock, NULL) < 0)
  117. /* ERR */
  118. thread->waiting = 0;
  119. }
  120. }
  121. static void
  122. queue_reply(replyqueue_t *queue, workqueue_entry_t *work)
  123. {
  124. int was_empty;
  125. tor_mutex_acquire(&queue->lock);
  126. was_empty = TOR_TAILQ_EMPTY(&queue->answers);
  127. TOR_TAILQ_INSERT_TAIL(&queue->answers, work, next_work);
  128. tor_mutex_release(&queue->lock);
  129. if (was_empty) {
  130. if (queue->alert.alert_fn(queue->alert.write_fd) < 0) {
  131. /* XXXX complain! */
  132. }
  133. }
  134. }
  135. static workerthread_t *
  136. workerthread_new(void *state, replyqueue_t *replyqueue)
  137. {
  138. workerthread_t *thr = tor_malloc_zero(sizeof(workerthread_t));
  139. tor_mutex_init_for_cond(&thr->lock);
  140. tor_cond_init(&thr->condition);
  141. TOR_TAILQ_INIT(&thr->work);
  142. thr->state = state;
  143. thr->reply_queue = replyqueue;
  144. if (spawn_func(worker_thread_main, thr) < 0) {
  145. log_err(LD_GENERAL, "Can't launch worker thread.");
  146. return NULL;
  147. }
  148. return thr;
  149. }
  150. workqueue_entry_t *
  151. threadpool_queue_work(threadpool_t *pool,
  152. int (*fn)(void *, void *),
  153. void (*reply_fn)(void *),
  154. void *arg)
  155. {
  156. workqueue_entry_t *ent;
  157. workerthread_t *worker;
  158. tor_mutex_acquire(&pool->lock);
  159. worker = pool->threads[pool->next_for_work++];
  160. if (!worker) {
  161. tor_mutex_release(&pool->lock);
  162. return NULL;
  163. }
  164. if (pool->next_for_work >= pool->n_threads)
  165. pool->next_for_work = 0;
  166. tor_mutex_release(&pool->lock);
  167. ent = workqueue_entry_new(fn, reply_fn, arg);
  168. tor_mutex_acquire(&worker->lock);
  169. ent->on_thread = worker;
  170. ent->pending = 1;
  171. TOR_TAILQ_INSERT_TAIL(&worker->work, ent, next_work);
  172. if (worker->waiting) /* XXXX inside or outside of lock?? */
  173. tor_cond_signal_one(&worker->condition);
  174. tor_mutex_release(&worker->lock);
  175. return ent;
  176. }
  177. int
  178. threadpool_start_threads(threadpool_t *pool, int n)
  179. {
  180. tor_mutex_acquire(&pool->lock);
  181. if (pool->n_threads < n)
  182. pool->threads = tor_realloc(pool->threads, sizeof(workerthread_t*)*n);
  183. while (pool->n_threads < n) {
  184. void *state = pool->new_thread_state_fn(pool->new_thread_state_arg);
  185. workerthread_t *thr = workerthread_new(state, pool->reply_queue);
  186. if (!thr) {
  187. tor_mutex_release(&pool->lock);
  188. return -1;
  189. }
  190. pool->threads[pool->n_threads++] = thr;
  191. }
  192. tor_mutex_release(&pool->lock);
  193. return 0;
  194. }
  195. threadpool_t *
  196. threadpool_new(int n_threads,
  197. replyqueue_t *replyqueue,
  198. void *(*new_thread_state_fn)(void*),
  199. void (*free_thread_state_fn)(void*),
  200. void *arg)
  201. {
  202. threadpool_t *pool;
  203. pool = tor_malloc_zero(sizeof(threadpool_t));
  204. tor_mutex_init(&pool->lock);
  205. pool->new_thread_state_fn = new_thread_state_fn;
  206. pool->new_thread_state_arg = arg;
  207. pool->free_thread_state_fn = free_thread_state_fn;
  208. pool->reply_queue = replyqueue;
  209. if (threadpool_start_threads(pool, n_threads) < 0) {
  210. tor_mutex_uninit(&pool->lock);
  211. tor_free(pool);
  212. return NULL;
  213. }
  214. return pool;
  215. }
  216. replyqueue_t *
  217. threadpool_get_replyqueue(threadpool_t *tp)
  218. {
  219. return tp->reply_queue;
  220. }
  221. replyqueue_t *
  222. replyqueue_new(void)
  223. {
  224. replyqueue_t *rq;
  225. rq = tor_malloc_zero(sizeof(replyqueue_t));
  226. if (alert_sockets_create(&rq->alert) < 0) {
  227. tor_free(rq);
  228. return NULL;
  229. }
  230. tor_mutex_init(&rq->lock);
  231. TOR_TAILQ_INIT(&rq->answers);
  232. return rq;
  233. }
  234. tor_socket_t
  235. replyqueue_get_socket(replyqueue_t *rq)
  236. {
  237. return rq->alert.read_fd;
  238. }
  239. void
  240. replyqueue_process(replyqueue_t *queue)
  241. {
  242. if (queue->alert.drain_fn(queue->alert.read_fd) < 0) {
  243. /* XXXX complain! */
  244. }
  245. tor_mutex_acquire(&queue->lock);
  246. while (!TOR_TAILQ_EMPTY(&queue->answers)) {
  247. /* lock held. */
  248. workqueue_entry_t *work = TOR_TAILQ_FIRST(&queue->answers);
  249. TOR_TAILQ_REMOVE(&queue->answers, work, next_work);
  250. tor_mutex_release(&queue->lock);
  251. work->reply_fn(work->arg);
  252. workqueue_entry_free(work);
  253. tor_mutex_acquire(&queue->lock);
  254. }
  255. tor_mutex_release(&queue->lock);
  256. }