workqueue.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. /* Copyright (c) 2013, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #include "orconfig.h"
  4. #include "compat.h"
  5. #include "compat_threads.h"
  6. #include "util.h"
  7. #include "workqueue.h"
  8. #include "tor_queue.h"
  9. #include "torlog.h"
  10. #ifdef HAVE_UNISTD_H
  11. // XXXX move wherever we move the write/send stuff
  12. #include <unistd.h>
  13. #endif
  14. /*
  15. design:
  16. each thread has its own queue, try to keep at least elements min..max cycles
  17. worth of work on each queue.
  18. keep array of threads; round-robin between them.
  19. When out of work, work-steal.
  20. alert threads with condition variables.
  21. alert main thread with fd, since it's libevent.
  22. */
  23. struct workqueue_entry_s {
  24. TOR_TAILQ_ENTRY(workqueue_entry_s) next_work;
  25. struct workerthread_s *on_thread;
  26. uint8_t pending;
  27. int (*fn)(void *state, void *arg);
  28. void (*reply_fn)(void *arg);
  29. void *arg;
  30. };
  31. struct replyqueue_s {
  32. tor_mutex_t lock;
  33. TOR_TAILQ_HEAD(, workqueue_entry_s) answers;
  34. void (*alert_fn)(struct replyqueue_s *); // lock not held on this, next 2.
  35. tor_socket_t write_sock;
  36. tor_socket_t read_sock;
  37. };
  38. typedef struct workerthread_s {
  39. tor_mutex_t lock;
  40. tor_cond_t condition;
  41. TOR_TAILQ_HEAD(, workqueue_entry_s) work;
  42. unsigned is_running;
  43. unsigned is_shut_down;
  44. unsigned waiting;
  45. void *state;
  46. replyqueue_t *reply_queue;
  47. } workerthread_t;
  48. struct threadpool_s {
  49. workerthread_t **threads;
  50. int next_for_work;
  51. tor_mutex_t lock;
  52. int n_threads;
  53. replyqueue_t *reply_queue;
  54. void *(*new_thread_state_fn)(void*);
  55. void (*free_thread_state_fn)(void*);
  56. void *new_thread_state_arg;
  57. };
  58. static void queue_reply(replyqueue_t *queue, workqueue_entry_t *work);
  59. static workqueue_entry_t *
  60. workqueue_entry_new(int (*fn)(void*, void*),
  61. void (*reply_fn)(void*),
  62. void *arg)
  63. {
  64. workqueue_entry_t *ent = tor_malloc_zero(sizeof(workqueue_entry_t));
  65. ent->fn = fn;
  66. ent->reply_fn = reply_fn;
  67. ent->arg = arg;
  68. return ent;
  69. }
  70. static void
  71. workqueue_entry_free(workqueue_entry_t *ent)
  72. {
  73. if (!ent)
  74. return;
  75. tor_free(ent);
  76. }
  77. int
  78. workqueue_entry_cancel(workqueue_entry_t *ent)
  79. {
  80. int cancelled = 0;
  81. tor_mutex_acquire(&ent->on_thread->lock);
  82. if (ent->pending) {
  83. TOR_TAILQ_REMOVE(&ent->on_thread->work, ent, next_work);
  84. cancelled = 1;
  85. }
  86. tor_mutex_release(&ent->on_thread->lock);
  87. if (cancelled) {
  88. tor_free(ent);
  89. }
  90. return cancelled;
  91. }
  92. static void
  93. worker_thread_main(void *thread_)
  94. {
  95. workerthread_t *thread = thread_;
  96. workqueue_entry_t *work;
  97. int result;
  98. tor_mutex_acquire(&thread->lock);
  99. thread->is_running = 1;
  100. while (1) {
  101. /* lock held. */
  102. while (!TOR_TAILQ_EMPTY(&thread->work)) {
  103. /* lock held. */
  104. work = TOR_TAILQ_FIRST(&thread->work);
  105. TOR_TAILQ_REMOVE(&thread->work, work, next_work);
  106. work->pending = 0;
  107. tor_mutex_release(&thread->lock);
  108. result = work->fn(thread->state, work->arg);
  109. queue_reply(thread->reply_queue, work);
  110. tor_mutex_acquire(&thread->lock);
  111. if (result >= WQ_RPL_ERROR) {
  112. thread->is_running = 0;
  113. thread->is_shut_down = 1;
  114. tor_mutex_release(&thread->lock);
  115. return;
  116. }
  117. }
  118. /* Lock held; no work in this thread's queue. */
  119. /* TODO: Try work-stealing. */
  120. /* TODO: support an idle-function */
  121. thread->waiting = 1;
  122. if (tor_cond_wait(&thread->condition, &thread->lock, NULL) < 0)
  123. /* ERR */
  124. thread->waiting = 0;
  125. }
  126. }
  127. static void
  128. queue_reply(replyqueue_t *queue, workqueue_entry_t *work)
  129. {
  130. int was_empty;
  131. tor_mutex_acquire(&queue->lock);
  132. was_empty = TOR_TAILQ_EMPTY(&queue->answers);
  133. TOR_TAILQ_INSERT_TAIL(&queue->answers, work, next_work);
  134. tor_mutex_release(&queue->lock);
  135. if (was_empty) {
  136. queue->alert_fn(queue);
  137. }
  138. }
  139. static void
  140. alert_by_fd(replyqueue_t *queue)
  141. {
  142. /* XXX extract this into new function */
  143. #ifndef _WIN32
  144. (void) send(queue->write_sock, "x", 1, 0);
  145. #else
  146. (void) write(queue->write_sock, "x", 1);
  147. #endif
  148. }
  149. static workerthread_t *
  150. workerthread_new(void *state, replyqueue_t *replyqueue)
  151. {
  152. workerthread_t *thr = tor_malloc_zero(sizeof(workerthread_t));
  153. tor_mutex_init_for_cond(&thr->lock);
  154. tor_cond_init(&thr->condition);
  155. TOR_TAILQ_INIT(&thr->work);
  156. thr->state = state;
  157. thr->reply_queue = replyqueue;
  158. if (spawn_func(worker_thread_main, thr) < 0) {
  159. log_err(LD_GENERAL, "Can't launch worker thread.");
  160. return NULL;
  161. }
  162. return thr;
  163. }
  164. workqueue_entry_t *
  165. threadpool_queue_work(threadpool_t *pool,
  166. int (*fn)(void *, void *),
  167. void (*reply_fn)(void *),
  168. void *arg)
  169. {
  170. workqueue_entry_t *ent;
  171. workerthread_t *worker;
  172. tor_mutex_acquire(&pool->lock);
  173. worker = pool->threads[pool->next_for_work++];
  174. if (!worker) {
  175. tor_mutex_release(&pool->lock);
  176. return NULL;
  177. }
  178. if (pool->next_for_work >= pool->n_threads)
  179. pool->next_for_work = 0;
  180. tor_mutex_release(&pool->lock);
  181. ent = workqueue_entry_new(fn, reply_fn, arg);
  182. tor_mutex_acquire(&worker->lock);
  183. ent->on_thread = worker;
  184. ent->pending = 1;
  185. TOR_TAILQ_INSERT_TAIL(&worker->work, ent, next_work);
  186. if (worker->waiting) /* XXXX inside or outside of lock?? */
  187. tor_cond_signal_one(&worker->condition);
  188. tor_mutex_release(&worker->lock);
  189. return ent;
  190. }
  191. int
  192. threadpool_start_threads(threadpool_t *pool, int n)
  193. {
  194. tor_mutex_acquire(&pool->lock);
  195. if (pool->n_threads < n)
  196. pool->threads = tor_realloc(pool->threads, sizeof(workerthread_t*)*n);
  197. while (pool->n_threads < n) {
  198. void *state = pool->new_thread_state_fn(pool->new_thread_state_arg);
  199. workerthread_t *thr = workerthread_new(state, pool->reply_queue);
  200. if (!thr) {
  201. tor_mutex_release(&pool->lock);
  202. return -1;
  203. }
  204. pool->threads[pool->n_threads++] = thr;
  205. }
  206. tor_mutex_release(&pool->lock);
  207. return 0;
  208. }
  209. threadpool_t *
  210. threadpool_new(int n_threads,
  211. replyqueue_t *replyqueue,
  212. void *(*new_thread_state_fn)(void*),
  213. void (*free_thread_state_fn)(void*),
  214. void *arg)
  215. {
  216. threadpool_t *pool;
  217. pool = tor_malloc_zero(sizeof(threadpool_t));
  218. tor_mutex_init(&pool->lock);
  219. pool->new_thread_state_fn = new_thread_state_fn;
  220. pool->new_thread_state_arg = arg;
  221. pool->free_thread_state_fn = free_thread_state_fn;
  222. pool->reply_queue = replyqueue;
  223. if (threadpool_start_threads(pool, n_threads) < 0) {
  224. tor_mutex_uninit(&pool->lock);
  225. tor_free(pool);
  226. return NULL;
  227. }
  228. return pool;
  229. }
  230. replyqueue_t *
  231. threadpool_get_replyqueue(threadpool_t *tp)
  232. {
  233. return tp->reply_queue;
  234. }
  235. replyqueue_t *
  236. replyqueue_new(void)
  237. {
  238. tor_socket_t pair[2];
  239. replyqueue_t *rq;
  240. int r;
  241. /* XXX extract this into new function */
  242. #ifdef _WIN32
  243. r = tor_socketpair(AF_UNIX, SOCK_STREAM, 0, pair);
  244. #else
  245. r = pipe(pair);
  246. #endif
  247. if (r < 0)
  248. return NULL;
  249. set_socket_nonblocking(pair[0]); /* the read-size should be nonblocking. */
  250. #if defined(FD_CLOEXEC)
  251. fcntl(pair[0], F_SETFD, FD_CLOEXEC);
  252. fcntl(pair[1], F_SETFD, FD_CLOEXEC);
  253. #endif
  254. rq = tor_malloc_zero(sizeof(replyqueue_t));
  255. tor_mutex_init(&rq->lock);
  256. TOR_TAILQ_INIT(&rq->answers);
  257. rq->read_sock = pair[0];
  258. rq->write_sock = pair[1];
  259. rq->alert_fn = alert_by_fd;
  260. return rq;
  261. }
  262. tor_socket_t
  263. replyqueue_get_socket(replyqueue_t *rq)
  264. {
  265. return rq->read_sock;
  266. }
  267. void
  268. replyqueue_process(replyqueue_t *queue)
  269. {
  270. ssize_t r;
  271. /* XXX extract this into new function */
  272. do {
  273. char buf[64];
  274. #ifdef _WIN32
  275. r = recv(queue->read_sock, buf, sizeof(buf), 0);
  276. #else
  277. r = read(queue->read_sock, buf, sizeof(buf));
  278. #endif
  279. } while (r > 0);
  280. /* XXXX freak out on r == 0, or r == "error, not retryable". */
  281. tor_mutex_acquire(&queue->lock);
  282. while (!TOR_TAILQ_EMPTY(&queue->answers)) {
  283. /* lock held. */
  284. workqueue_entry_t *work = TOR_TAILQ_FIRST(&queue->answers);
  285. TOR_TAILQ_REMOVE(&queue->answers, work, next_work);
  286. tor_mutex_release(&queue->lock);
  287. work->reply_fn(work->arg);
  288. workqueue_entry_free(work);
  289. tor_mutex_acquire(&queue->lock);
  290. }
  291. tor_mutex_release(&queue->lock);
  292. }