workqueue.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. /* Copyright (c) 2013, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #include "orconfig.h"
  4. #include "compat.h"
  5. #include "compat_threads.h"
  6. #include "util.h"
  7. #include "workqueue.h"
  8. #include "tor_queue.h"
  9. #include "torlog.h"
  10. struct threadpool_s {
  11. /** An array of pointers to workerthread_t: one for each running worker
  12. * thread. */
  13. struct workerthread_s **threads;
  14. /** Index of the next thread that we'll give work to.*/
  15. int next_for_work;
  16. /** Number of elements in threads. */
  17. int n_threads;
  18. /** Mutex to protect all the above fields. */
  19. tor_mutex_t lock;
  20. /** A reply queue to use when constructing new threads. */
  21. replyqueue_t *reply_queue;
  22. /** Functions used to allocate and free thread state. */
  23. void *(*new_thread_state_fn)(void*);
  24. void (*free_thread_state_fn)(void*);
  25. void *new_thread_state_arg;
  26. };
  27. struct workqueue_entry_s {
  28. /** The next workqueue_entry_t that's pending on the same thread or
  29. * reply queue. */
  30. TOR_TAILQ_ENTRY(workqueue_entry_s) next_work;
  31. /** The thread to which this workqueue_entry_t was assigned. This field
  32. * is set when the workqueue_entry_t is created, and won't be cleared until
  33. * after it's handled in the main thread. */
  34. struct workerthread_s *on_thread;
  35. /** True iff this entry is waiting for a worker to start processing it. */
  36. uint8_t pending;
  37. /** Function to run in the worker thread. */
  38. int (*fn)(void *state, void *arg);
  39. /** Function to run while processing the reply queue. */
  40. void (*reply_fn)(void *arg);
  41. /** Argument for the above functions. */
  42. void *arg;
  43. };
  44. struct replyqueue_s {
  45. /** Mutex to protect the answers field */
  46. tor_mutex_t lock;
  47. /** Doubly-linked list of answers that the reply queue needs to handle. */
  48. TOR_TAILQ_HEAD(, workqueue_entry_s) answers;
  49. /** Mechanism to wake up the main thread when it is receiving answers. */
  50. alert_sockets_t alert;
  51. };
  52. /** A worker thread represents a single thread in a thread pool. To avoid
  53. * contention, each gets its own queue. This breaks the guarantee that that
  54. * queued work will get executed strictly in order. */
  55. typedef struct workerthread_s {
  56. /** Lock to protect all fields of this thread and its queue. */
  57. tor_mutex_t lock;
  58. /** Condition variable that we wait on when we have no work, and which
  59. * gets signaled when our queue becomes nonempty. */
  60. tor_cond_t condition;
  61. /** Queue of pending work that we have to do. */
  62. TOR_TAILQ_HEAD(, workqueue_entry_s) work;
  63. /** True iff this thread is currently in its loop. */
  64. unsigned is_running;
  65. /** True iff this thread has crashed or is shut down for some reason. */
  66. unsigned is_shut_down;
  67. /** True if we're waiting for more elements to get added to the queue. */
  68. unsigned waiting;
  69. /** User-supplied state field that we pass to the worker functions of each
  70. * work item. */
  71. void *state;
  72. /** Reply queue to which we pass our results. */
  73. replyqueue_t *reply_queue;
  74. } workerthread_t;
  75. static void queue_reply(replyqueue_t *queue, workqueue_entry_t *work);
  76. /** Allocate and return a new workqueue_entry_t, set up to run the function
  77. * <b>fn</b> in the worker thread, and <b>reply_fn</b> in the main
  78. * thread. See threadpool_queue_work() for full documentation. */
  79. static workqueue_entry_t *
  80. workqueue_entry_new(int (*fn)(void*, void*),
  81. void (*reply_fn)(void*),
  82. void *arg)
  83. {
  84. workqueue_entry_t *ent = tor_malloc_zero(sizeof(workqueue_entry_t));
  85. ent->fn = fn;
  86. ent->reply_fn = reply_fn;
  87. ent->arg = arg;
  88. return ent;
  89. }
  90. /**
  91. * Release all storage held in <b>ent</b>. Call only when <b>ent</b> is not on
  92. * any queue.
  93. */
  94. static void
  95. workqueue_entry_free(workqueue_entry_t *ent)
  96. {
  97. if (!ent)
  98. return;
  99. tor_free(ent);
  100. }
  101. /**
  102. * Cancel a workqueue_entry_t that has been returned from
  103. * threadpool_queue_work.
  104. *
  105. * You must not call this function on any work whose reply function has been
  106. * executed in the main thread; that will cause undefined behavior (probably,
  107. * a crash).
  108. *
  109. * If the work is cancelled, this function return 1. It is the caller's
  110. * responsibility to free any storage in the work function's arguments.
  111. *
  112. * This function will have no effect if the worker thread has already executed
  113. * or begun to execute the work item. In that case, it will return 0.
  114. */
  115. int
  116. workqueue_entry_cancel(workqueue_entry_t *ent)
  117. {
  118. int cancelled = 0;
  119. tor_mutex_acquire(&ent->on_thread->lock);
  120. if (ent->pending) {
  121. TOR_TAILQ_REMOVE(&ent->on_thread->work, ent, next_work);
  122. cancelled = 1;
  123. }
  124. tor_mutex_release(&ent->on_thread->lock);
  125. if (cancelled) {
  126. tor_free(ent);
  127. }
  128. return cancelled;
  129. }
  130. /**
  131. * Main function for the worker thread.
  132. */
  133. static void
  134. worker_thread_main(void *thread_)
  135. {
  136. workerthread_t *thread = thread_;
  137. workqueue_entry_t *work;
  138. int result;
  139. tor_mutex_acquire(&thread->lock);
  140. thread->is_running = 1;
  141. while (1) {
  142. /* lock must be held at this point. */
  143. while (!TOR_TAILQ_EMPTY(&thread->work)) {
  144. /* lock must be held at this point. */
  145. work = TOR_TAILQ_FIRST(&thread->work);
  146. TOR_TAILQ_REMOVE(&thread->work, work, next_work);
  147. work->pending = 0;
  148. tor_mutex_release(&thread->lock);
  149. /* We run the work function without holding the thread lock. This
  150. * is the main thread's first opportunity to give us more work. */
  151. result = work->fn(thread->state, work->arg);
  152. /* Queue the reply for the main thread. */
  153. queue_reply(thread->reply_queue, work);
  154. tor_mutex_acquire(&thread->lock);
  155. /* We may need to exit the thread. */
  156. if (result >= WQ_RPL_ERROR) {
  157. thread->is_running = 0;
  158. thread->is_shut_down = 1;
  159. tor_mutex_release(&thread->lock);
  160. return;
  161. }
  162. }
  163. /* At this point the lock is held, and there is no work in this thread's
  164. * queue. */
  165. /* TODO: Try work-stealing. */
  166. /* TODO: support an idle-function */
  167. /* Okay. Now, wait till somebody has work for us. */
  168. thread->waiting = 1;
  169. if (tor_cond_wait(&thread->condition, &thread->lock, NULL) < 0) {
  170. /* XXXX ERROR */
  171. }
  172. thread->waiting = 0;
  173. }
  174. }
  175. /** Put a reply on the reply queue. The reply must not currently be on
  176. * any thread's work queue. */
  177. static void
  178. queue_reply(replyqueue_t *queue, workqueue_entry_t *work)
  179. {
  180. int was_empty;
  181. tor_mutex_acquire(&queue->lock);
  182. was_empty = TOR_TAILQ_EMPTY(&queue->answers);
  183. TOR_TAILQ_INSERT_TAIL(&queue->answers, work, next_work);
  184. tor_mutex_release(&queue->lock);
  185. if (was_empty) {
  186. if (queue->alert.alert_fn(queue->alert.write_fd) < 0) {
  187. /* XXXX complain! */
  188. }
  189. }
  190. }
  191. /** Allocate and start a new worker thread to use state object <b>state</b>,
  192. * and send responses to <b>replyqueue</b>. */
  193. static workerthread_t *
  194. workerthread_new(void *state, replyqueue_t *replyqueue)
  195. {
  196. workerthread_t *thr = tor_malloc_zero(sizeof(workerthread_t));
  197. tor_mutex_init_for_cond(&thr->lock);
  198. tor_cond_init(&thr->condition);
  199. TOR_TAILQ_INIT(&thr->work);
  200. thr->state = state;
  201. thr->reply_queue = replyqueue;
  202. if (spawn_func(worker_thread_main, thr) < 0) {
  203. log_err(LD_GENERAL, "Can't launch worker thread.");
  204. return NULL;
  205. }
  206. return thr;
  207. }
  208. /**
  209. * Add an item of work to a single worker thread. See threadpool_queue_work(*)
  210. * for arguments.
  211. */
  212. static workqueue_entry_t *
  213. workerthread_queue_work(workerthread_t *worker,
  214. int (*fn)(void *, void *),
  215. void (*reply_fn)(void *),
  216. void *arg)
  217. {
  218. workqueue_entry_t *ent = workqueue_entry_new(fn, reply_fn, arg);
  219. tor_mutex_acquire(&worker->lock);
  220. ent->on_thread = worker;
  221. ent->pending = 1;
  222. TOR_TAILQ_INSERT_TAIL(&worker->work, ent, next_work);
  223. if (worker->waiting) /* XXXX inside or outside of lock?? */
  224. tor_cond_signal_one(&worker->condition);
  225. tor_mutex_release(&worker->lock);
  226. return ent;
  227. }
  228. /**
  229. * Queue an item of work for a thread in a thread pool. The function
  230. * <b>fn</b> will be run in a worker thread, and will receive as arguments the
  231. * thread's state object, and the provided object <b>arg</b>. It must return
  232. * one of WQ_RPL_REPLY, WQ_RPL_ERROR, or WQ_RPL_SHUTDOWN.
  233. *
  234. * Regardless of its return value, the function <b>reply_fn</b> will later be
  235. * run in the main thread when it invokes replyqueue_process(), and will
  236. * receive as its argument the same <b>arg</b> object. It's the reply
  237. * function's responsibility to free the work object.
  238. *
  239. * On success, return a workqueue_entry_t object that can be passed to
  240. * workqueue_entry_cancel(). On failure, return NULL.
  241. *
  242. * Note that because each thread has its own work queue, work items may not
  243. * be executed strictly in order.
  244. */
  245. workqueue_entry_t *
  246. threadpool_queue_work(threadpool_t *pool,
  247. int (*fn)(void *, void *),
  248. void (*reply_fn)(void *),
  249. void *arg)
  250. {
  251. workerthread_t *worker;
  252. tor_mutex_acquire(&pool->lock);
  253. /* Pick the next thread in random-access order. */
  254. worker = pool->threads[pool->next_for_work++];
  255. if (!worker) {
  256. tor_mutex_release(&pool->lock);
  257. return NULL;
  258. }
  259. if (pool->next_for_work >= pool->n_threads)
  260. pool->next_for_work = 0;
  261. tor_mutex_release(&pool->lock);
  262. return workerthread_queue_work(worker, fn, reply_fn, arg);
  263. }
  264. /**
  265. * Queue a copy of a work item for every thread in a pool. This can be used,
  266. * for example, to tell the threads to update some parameter in their states.
  267. *
  268. * Arguments are as for <b>threadpool_queue_work</b>, except that the
  269. * <b>arg</b> value is passed to <b>dup_fn</b> once per each thread to
  270. * make a copy of it.
  271. *
  272. * Return 0 on success, -1 on failure.
  273. */
  274. int
  275. threadpool_queue_for_all(threadpool_t *pool,
  276. void *(*dup_fn)(const void *),
  277. int (*fn)(void *, void *),
  278. void (*reply_fn)(void *),
  279. void *arg)
  280. {
  281. int i = 0;
  282. workerthread_t *worker;
  283. void *arg_copy;
  284. while (1) {
  285. tor_mutex_acquire(&pool->lock);
  286. if (i >= pool->n_threads) {
  287. tor_mutex_release(&pool->lock);
  288. return 0;
  289. }
  290. worker = pool->threads[i++];
  291. tor_mutex_release(&pool->lock);
  292. arg_copy = dup_fn ? dup_fn(arg) : arg;
  293. /* CHECK*/ workerthread_queue_work(worker, fn, reply_fn, arg_copy);
  294. }
  295. }
  296. /** Launch threads until we have <b>n</b>. */
  297. static int
  298. threadpool_start_threads(threadpool_t *pool, int n)
  299. {
  300. tor_mutex_acquire(&pool->lock);
  301. if (pool->n_threads < n)
  302. pool->threads = tor_realloc(pool->threads, sizeof(workerthread_t*)*n);
  303. while (pool->n_threads < n) {
  304. void *state = pool->new_thread_state_fn(pool->new_thread_state_arg);
  305. workerthread_t *thr = workerthread_new(state, pool->reply_queue);
  306. if (!thr) {
  307. tor_mutex_release(&pool->lock);
  308. return -1;
  309. }
  310. pool->threads[pool->n_threads++] = thr;
  311. }
  312. tor_mutex_release(&pool->lock);
  313. return 0;
  314. }
  315. /**
  316. * Construct a new thread pool with <b>n</b> worker threads, configured to
  317. * send their output to <b>replyqueue</b>. The threads' states will be
  318. * constructed with the <b>new_thread_state_fn</b> call, receiving <b>arg</b>
  319. * as its argument. When the threads close, they will call
  320. * <b>free_thread_state_fn</b> on their states.
  321. */
  322. threadpool_t *
  323. threadpool_new(int n_threads,
  324. replyqueue_t *replyqueue,
  325. void *(*new_thread_state_fn)(void*),
  326. void (*free_thread_state_fn)(void*),
  327. void *arg)
  328. {
  329. threadpool_t *pool;
  330. pool = tor_malloc_zero(sizeof(threadpool_t));
  331. tor_mutex_init(&pool->lock);
  332. pool->new_thread_state_fn = new_thread_state_fn;
  333. pool->new_thread_state_arg = arg;
  334. pool->free_thread_state_fn = free_thread_state_fn;
  335. pool->reply_queue = replyqueue;
  336. if (threadpool_start_threads(pool, n_threads) < 0) {
  337. tor_mutex_uninit(&pool->lock);
  338. tor_free(pool);
  339. return NULL;
  340. }
  341. return pool;
  342. }
  343. /** Return the reply queue associated with a given thread pool. */
  344. replyqueue_t *
  345. threadpool_get_replyqueue(threadpool_t *tp)
  346. {
  347. return tp->reply_queue;
  348. }
  349. /** Allocate a new reply queue. Reply queues are used to pass results from
  350. * worker threads to the main thread. Since the main thread is running an
  351. * IO-centric event loop, it needs to get woken up with means other than a
  352. * condition variable. */
  353. replyqueue_t *
  354. replyqueue_new(void)
  355. {
  356. replyqueue_t *rq;
  357. rq = tor_malloc_zero(sizeof(replyqueue_t));
  358. if (alert_sockets_create(&rq->alert) < 0) {
  359. tor_free(rq);
  360. return NULL;
  361. }
  362. tor_mutex_init(&rq->lock);
  363. TOR_TAILQ_INIT(&rq->answers);
  364. return rq;
  365. }
  366. /**
  367. * Return the "read socket" for a given reply queue. The main thread should
  368. * listen for read events on this socket, and call replyqueue_process() every
  369. * time it triggers.
  370. */
  371. tor_socket_t
  372. replyqueue_get_socket(replyqueue_t *rq)
  373. {
  374. return rq->alert.read_fd;
  375. }
  376. /**
  377. * Process all pending replies on a reply queue. The main thread should call
  378. * this function every time the socket returned by replyqueue_get_socket() is
  379. * readable.
  380. */
  381. void
  382. replyqueue_process(replyqueue_t *queue)
  383. {
  384. if (queue->alert.drain_fn(queue->alert.read_fd) < 0) {
  385. /* XXXX complain! */
  386. }
  387. tor_mutex_acquire(&queue->lock);
  388. while (!TOR_TAILQ_EMPTY(&queue->answers)) {
  389. /* lock must be held at this point.*/
  390. workqueue_entry_t *work = TOR_TAILQ_FIRST(&queue->answers);
  391. TOR_TAILQ_REMOVE(&queue->answers, work, next_work);
  392. tor_mutex_release(&queue->lock);
  393. work->reply_fn(work->arg);
  394. workqueue_entry_free(work);
  395. tor_mutex_acquire(&queue->lock);
  396. }
  397. tor_mutex_release(&queue->lock);
  398. }