workqueue.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. /* copyright (c) 2013-2015, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file workqueue.c
  5. *
  6. * \brief Implements worker threads, queues of work for them, and mechanisms
  7. * for them to send answers back to the main thread.
  8. *
  9. * The main structure here is a threadpool_t : it manages a set of worker
  10. * threads, a queue of pending work, and a reply queue. Every piece of work
  11. * is a workqueue_entry_t, containing data to process and a function to
  12. * process it with.
  13. *
  14. * The main thread informs the worker threads of pending work by using a
  15. * condition variable. The workers inform the main process of completed work
  16. * by using an alert_sockets_t object, as implemented in compat_threads.c.
  17. *
  18. * The main thread can also queue an "update" that will be handled by all the
  19. * workers. This is useful for updating state that all the workers share.
  20. *
  21. * In Tor today, there is currently only one thread pool, used in cpuworker.c.
  22. */
  23. #include "orconfig.h"
  24. #include "common/compat.h"
  25. #include "common/compat_libevent.h"
  26. #include "lib/thread/threads.h"
  27. #include "lib/crypt_ops/crypto_rand.h"
  28. #include "common/util.h"
  29. #include "common/workqueue.h"
  30. #include "tor_queue.h"
  31. #include "lib/net/alertsock.h"
  32. #include "lib/log/torlog.h"
  33. #include <event2/event.h>
  34. #define WORKQUEUE_PRIORITY_FIRST WQ_PRI_HIGH
  35. #define WORKQUEUE_PRIORITY_LAST WQ_PRI_LOW
  36. #define WORKQUEUE_N_PRIORITIES (((int) WORKQUEUE_PRIORITY_LAST)+1)
  37. TOR_TAILQ_HEAD(work_tailq_t, workqueue_entry_s);
  38. typedef struct work_tailq_t work_tailq_t;
  39. struct threadpool_s {
  40. /** An array of pointers to workerthread_t: one for each running worker
  41. * thread. */
  42. struct workerthread_s **threads;
  43. /** Condition variable that we wait on when we have no work, and which
  44. * gets signaled when our queue becomes nonempty. */
  45. tor_cond_t condition;
  46. /** Queues of pending work that we have to do. The queue with priority
  47. * <b>p</b> is work[p]. */
  48. work_tailq_t work[WORKQUEUE_N_PRIORITIES];
  49. /** Weak RNG, used to decide when to ignore priority. */
  50. tor_weak_rng_t weak_rng;
  51. /** The current 'update generation' of the threadpool. Any thread that is
  52. * at an earlier generation needs to run the update function. */
  53. unsigned generation;
  54. /** Function that should be run for updates on each thread. */
  55. workqueue_reply_t (*update_fn)(void *, void *);
  56. /** Function to free update arguments if they can't be run. */
  57. void (*free_update_arg_fn)(void *);
  58. /** Array of n_threads update arguments. */
  59. void **update_args;
  60. /** Event to notice when another thread has sent a reply. */
  61. struct event *reply_event;
  62. void (*reply_cb)(threadpool_t *);
  63. /** Number of elements in threads. */
  64. int n_threads;
  65. /** Mutex to protect all the above fields. */
  66. tor_mutex_t lock;
  67. /** A reply queue to use when constructing new threads. */
  68. replyqueue_t *reply_queue;
  69. /** Functions used to allocate and free thread state. */
  70. void *(*new_thread_state_fn)(void*);
  71. void (*free_thread_state_fn)(void*);
  72. void *new_thread_state_arg;
  73. };
  74. /** Used to put a workqueue_priority_t value into a bitfield. */
  75. #define workqueue_priority_bitfield_t ENUM_BF(workqueue_priority_t)
  76. /** Number of bits needed to hold all legal values of workqueue_priority_t */
  77. #define WORKQUEUE_PRIORITY_BITS 2
  78. struct workqueue_entry_s {
  79. /** The next workqueue_entry_t that's pending on the same thread or
  80. * reply queue. */
  81. TOR_TAILQ_ENTRY(workqueue_entry_s) next_work;
  82. /** The threadpool to which this workqueue_entry_t was assigned. This field
  83. * is set when the workqueue_entry_t is created, and won't be cleared until
  84. * after it's handled in the main thread. */
  85. struct threadpool_s *on_pool;
  86. /** True iff this entry is waiting for a worker to start processing it. */
  87. uint8_t pending;
  88. /** Priority of this entry. */
  89. workqueue_priority_bitfield_t priority : WORKQUEUE_PRIORITY_BITS;
  90. /** Function to run in the worker thread. */
  91. workqueue_reply_t (*fn)(void *state, void *arg);
  92. /** Function to run while processing the reply queue. */
  93. void (*reply_fn)(void *arg);
  94. /** Argument for the above functions. */
  95. void *arg;
  96. };
  97. struct replyqueue_s {
  98. /** Mutex to protect the answers field */
  99. tor_mutex_t lock;
  100. /** Doubly-linked list of answers that the reply queue needs to handle. */
  101. TOR_TAILQ_HEAD(, workqueue_entry_s) answers;
  102. /** Mechanism to wake up the main thread when it is receiving answers. */
  103. alert_sockets_t alert;
  104. };
  105. /** A worker thread represents a single thread in a thread pool. */
  106. typedef struct workerthread_s {
  107. /** Which thread it this? In range 0..in_pool->n_threads-1 */
  108. int index;
  109. /** The pool this thread is a part of. */
  110. struct threadpool_s *in_pool;
  111. /** User-supplied state field that we pass to the worker functions of each
  112. * work item. */
  113. void *state;
  114. /** Reply queue to which we pass our results. */
  115. replyqueue_t *reply_queue;
  116. /** The current update generation of this thread */
  117. unsigned generation;
  118. /** One over the probability of taking work from a lower-priority queue. */
  119. int32_t lower_priority_chance;
  120. } workerthread_t;
  121. static void queue_reply(replyqueue_t *queue, workqueue_entry_t *work);
  122. /** Allocate and return a new workqueue_entry_t, set up to run the function
  123. * <b>fn</b> in the worker thread, and <b>reply_fn</b> in the main
  124. * thread. See threadpool_queue_work() for full documentation. */
  125. static workqueue_entry_t *
  126. workqueue_entry_new(workqueue_reply_t (*fn)(void*, void*),
  127. void (*reply_fn)(void*),
  128. void *arg)
  129. {
  130. workqueue_entry_t *ent = tor_malloc_zero(sizeof(workqueue_entry_t));
  131. ent->fn = fn;
  132. ent->reply_fn = reply_fn;
  133. ent->arg = arg;
  134. ent->priority = WQ_PRI_HIGH;
  135. return ent;
  136. }
  137. #define workqueue_entry_free(ent) \
  138. FREE_AND_NULL(workqueue_entry_t, workqueue_entry_free_, (ent))
  139. /**
  140. * Release all storage held in <b>ent</b>. Call only when <b>ent</b> is not on
  141. * any queue.
  142. */
  143. static void
  144. workqueue_entry_free_(workqueue_entry_t *ent)
  145. {
  146. if (!ent)
  147. return;
  148. memset(ent, 0xf0, sizeof(*ent));
  149. tor_free(ent);
  150. }
  151. /**
  152. * Cancel a workqueue_entry_t that has been returned from
  153. * threadpool_queue_work.
  154. *
  155. * You must not call this function on any work whose reply function has been
  156. * executed in the main thread; that will cause undefined behavior (probably,
  157. * a crash).
  158. *
  159. * If the work is cancelled, this function return the argument passed to the
  160. * work function. It is the caller's responsibility to free this storage.
  161. *
  162. * This function will have no effect if the worker thread has already executed
  163. * or begun to execute the work item. In that case, it will return NULL.
  164. */
  165. void *
  166. workqueue_entry_cancel(workqueue_entry_t *ent)
  167. {
  168. int cancelled = 0;
  169. void *result = NULL;
  170. tor_mutex_acquire(&ent->on_pool->lock);
  171. workqueue_priority_t prio = ent->priority;
  172. if (ent->pending) {
  173. TOR_TAILQ_REMOVE(&ent->on_pool->work[prio], ent, next_work);
  174. cancelled = 1;
  175. result = ent->arg;
  176. }
  177. tor_mutex_release(&ent->on_pool->lock);
  178. if (cancelled) {
  179. workqueue_entry_free(ent);
  180. }
  181. return result;
  182. }
  183. /**DOCDOC
  184. must hold lock */
  185. static int
  186. worker_thread_has_work(workerthread_t *thread)
  187. {
  188. unsigned i;
  189. for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
  190. if (!TOR_TAILQ_EMPTY(&thread->in_pool->work[i]))
  191. return 1;
  192. }
  193. return thread->generation != thread->in_pool->generation;
  194. }
  195. /** Extract the next workqueue_entry_t from the the thread's pool, removing
  196. * it from the relevant queues and marking it as non-pending.
  197. *
  198. * The caller must hold the lock. */
  199. static workqueue_entry_t *
  200. worker_thread_extract_next_work(workerthread_t *thread)
  201. {
  202. threadpool_t *pool = thread->in_pool;
  203. work_tailq_t *queue = NULL, *this_queue;
  204. unsigned i;
  205. for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
  206. this_queue = &pool->work[i];
  207. if (!TOR_TAILQ_EMPTY(this_queue)) {
  208. queue = this_queue;
  209. if (! tor_weak_random_one_in_n(&pool->weak_rng,
  210. thread->lower_priority_chance)) {
  211. /* Usually we'll just break now, so that we can get out of the loop
  212. * and use the queue where we found work. But with a small
  213. * probability, we'll keep looking for lower priority work, so that
  214. * we don't ignore our low-priority queues entirely. */
  215. break;
  216. }
  217. }
  218. }
  219. if (queue == NULL)
  220. return NULL;
  221. workqueue_entry_t *work = TOR_TAILQ_FIRST(queue);
  222. TOR_TAILQ_REMOVE(queue, work, next_work);
  223. work->pending = 0;
  224. return work;
  225. }
  226. /**
  227. * Main function for the worker thread.
  228. */
  229. static void
  230. worker_thread_main(void *thread_)
  231. {
  232. workerthread_t *thread = thread_;
  233. threadpool_t *pool = thread->in_pool;
  234. workqueue_entry_t *work;
  235. workqueue_reply_t result;
  236. tor_mutex_acquire(&pool->lock);
  237. while (1) {
  238. /* lock must be held at this point. */
  239. while (worker_thread_has_work(thread)) {
  240. /* lock must be held at this point. */
  241. if (thread->in_pool->generation != thread->generation) {
  242. void *arg = thread->in_pool->update_args[thread->index];
  243. thread->in_pool->update_args[thread->index] = NULL;
  244. workqueue_reply_t (*update_fn)(void*,void*) =
  245. thread->in_pool->update_fn;
  246. thread->generation = thread->in_pool->generation;
  247. tor_mutex_release(&pool->lock);
  248. workqueue_reply_t r = update_fn(thread->state, arg);
  249. if (r != WQ_RPL_REPLY) {
  250. return;
  251. }
  252. tor_mutex_acquire(&pool->lock);
  253. continue;
  254. }
  255. work = worker_thread_extract_next_work(thread);
  256. if (BUG(work == NULL))
  257. break;
  258. tor_mutex_release(&pool->lock);
  259. /* We run the work function without holding the thread lock. This
  260. * is the main thread's first opportunity to give us more work. */
  261. result = work->fn(thread->state, work->arg);
  262. /* Queue the reply for the main thread. */
  263. queue_reply(thread->reply_queue, work);
  264. /* We may need to exit the thread. */
  265. if (result != WQ_RPL_REPLY) {
  266. return;
  267. }
  268. tor_mutex_acquire(&pool->lock);
  269. }
  270. /* At this point the lock is held, and there is no work in this thread's
  271. * queue. */
  272. /* TODO: support an idle-function */
  273. /* Okay. Now, wait till somebody has work for us. */
  274. if (tor_cond_wait(&pool->condition, &pool->lock, NULL) < 0) {
  275. log_warn(LD_GENERAL, "Fail tor_cond_wait.");
  276. }
  277. }
  278. }
  279. /** Put a reply on the reply queue. The reply must not currently be on
  280. * any thread's work queue. */
  281. static void
  282. queue_reply(replyqueue_t *queue, workqueue_entry_t *work)
  283. {
  284. int was_empty;
  285. tor_mutex_acquire(&queue->lock);
  286. was_empty = TOR_TAILQ_EMPTY(&queue->answers);
  287. TOR_TAILQ_INSERT_TAIL(&queue->answers, work, next_work);
  288. tor_mutex_release(&queue->lock);
  289. if (was_empty) {
  290. if (queue->alert.alert_fn(queue->alert.write_fd) < 0) {
  291. /* XXXX complain! */
  292. }
  293. }
  294. }
  295. /** Allocate and start a new worker thread to use state object <b>state</b>,
  296. * and send responses to <b>replyqueue</b>. */
  297. static workerthread_t *
  298. workerthread_new(int32_t lower_priority_chance,
  299. void *state, threadpool_t *pool, replyqueue_t *replyqueue)
  300. {
  301. workerthread_t *thr = tor_malloc_zero(sizeof(workerthread_t));
  302. thr->state = state;
  303. thr->reply_queue = replyqueue;
  304. thr->in_pool = pool;
  305. thr->lower_priority_chance = lower_priority_chance;
  306. if (spawn_func(worker_thread_main, thr) < 0) {
  307. //LCOV_EXCL_START
  308. tor_assert_nonfatal_unreached();
  309. log_err(LD_GENERAL, "Can't launch worker thread.");
  310. tor_free(thr);
  311. return NULL;
  312. //LCOV_EXCL_STOP
  313. }
  314. return thr;
  315. }
  316. /**
  317. * Queue an item of work for a thread in a thread pool. The function
  318. * <b>fn</b> will be run in a worker thread, and will receive as arguments the
  319. * thread's state object, and the provided object <b>arg</b>. It must return
  320. * one of WQ_RPL_REPLY, WQ_RPL_ERROR, or WQ_RPL_SHUTDOWN.
  321. *
  322. * Regardless of its return value, the function <b>reply_fn</b> will later be
  323. * run in the main thread when it invokes replyqueue_process(), and will
  324. * receive as its argument the same <b>arg</b> object. It's the reply
  325. * function's responsibility to free the work object.
  326. *
  327. * On success, return a workqueue_entry_t object that can be passed to
  328. * workqueue_entry_cancel(). On failure, return NULL. (Failure is not
  329. * currently possible, but callers should check anyway.)
  330. *
  331. * Items are executed in a loose priority order -- each thread will usually
  332. * take from the queued work with the highest prioirity, but will occasionally
  333. * visit lower-priority queues to keep them from starving completely.
  334. *
  335. * Note that because of priorities and thread behavior, work items may not
  336. * be executed strictly in order.
  337. */
  338. workqueue_entry_t *
  339. threadpool_queue_work_priority(threadpool_t *pool,
  340. workqueue_priority_t prio,
  341. workqueue_reply_t (*fn)(void *, void *),
  342. void (*reply_fn)(void *),
  343. void *arg)
  344. {
  345. tor_assert(((int)prio) >= WORKQUEUE_PRIORITY_FIRST &&
  346. ((int)prio) <= WORKQUEUE_PRIORITY_LAST);
  347. workqueue_entry_t *ent = workqueue_entry_new(fn, reply_fn, arg);
  348. ent->on_pool = pool;
  349. ent->pending = 1;
  350. ent->priority = prio;
  351. tor_mutex_acquire(&pool->lock);
  352. TOR_TAILQ_INSERT_TAIL(&pool->work[prio], ent, next_work);
  353. tor_cond_signal_one(&pool->condition);
  354. tor_mutex_release(&pool->lock);
  355. return ent;
  356. }
  357. /** As threadpool_queue_work_priority(), but assumes WQ_PRI_HIGH */
  358. workqueue_entry_t *
  359. threadpool_queue_work(threadpool_t *pool,
  360. workqueue_reply_t (*fn)(void *, void *),
  361. void (*reply_fn)(void *),
  362. void *arg)
  363. {
  364. return threadpool_queue_work_priority(pool, WQ_PRI_HIGH, fn, reply_fn, arg);
  365. }
  366. /**
  367. * Queue a copy of a work item for every thread in a pool. This can be used,
  368. * for example, to tell the threads to update some parameter in their states.
  369. *
  370. * Arguments are as for <b>threadpool_queue_work</b>, except that the
  371. * <b>arg</b> value is passed to <b>dup_fn</b> once per each thread to
  372. * make a copy of it.
  373. *
  374. * UPDATE FUNCTIONS MUST BE IDEMPOTENT. We do not guarantee that every update
  375. * will be run. If a new update is scheduled before the old update finishes
  376. * running, then the new will replace the old in any threads that haven't run
  377. * it yet.
  378. *
  379. * Return 0 on success, -1 on failure.
  380. */
  381. int
  382. threadpool_queue_update(threadpool_t *pool,
  383. void *(*dup_fn)(void *),
  384. workqueue_reply_t (*fn)(void *, void *),
  385. void (*free_fn)(void *),
  386. void *arg)
  387. {
  388. int i, n_threads;
  389. void (*old_args_free_fn)(void *arg);
  390. void **old_args;
  391. void **new_args;
  392. tor_mutex_acquire(&pool->lock);
  393. n_threads = pool->n_threads;
  394. old_args = pool->update_args;
  395. old_args_free_fn = pool->free_update_arg_fn;
  396. new_args = tor_calloc(n_threads, sizeof(void*));
  397. for (i = 0; i < n_threads; ++i) {
  398. if (dup_fn)
  399. new_args[i] = dup_fn(arg);
  400. else
  401. new_args[i] = arg;
  402. }
  403. pool->update_args = new_args;
  404. pool->free_update_arg_fn = free_fn;
  405. pool->update_fn = fn;
  406. ++pool->generation;
  407. tor_cond_signal_all(&pool->condition);
  408. tor_mutex_release(&pool->lock);
  409. if (old_args) {
  410. for (i = 0; i < n_threads; ++i) {
  411. if (old_args[i] && old_args_free_fn)
  412. old_args_free_fn(old_args[i]);
  413. }
  414. tor_free(old_args);
  415. }
  416. return 0;
  417. }
  418. /** Don't have more than this many threads per pool. */
  419. #define MAX_THREADS 1024
  420. /** For half of our threads, choose lower priority queues with probability
  421. * 1/N for each of these values. Both are chosen somewhat arbitrarily. If
  422. * CHANCE_PERMISSIVE is too low, then we have a risk of low-priority tasks
  423. * stalling forever. If it's too high, we have a risk of low-priority tasks
  424. * grabbing half of the threads. */
  425. #define CHANCE_PERMISSIVE 37
  426. #define CHANCE_STRICT INT32_MAX
  427. /** Launch threads until we have <b>n</b>. */
  428. static int
  429. threadpool_start_threads(threadpool_t *pool, int n)
  430. {
  431. if (BUG(n < 0))
  432. return -1; // LCOV_EXCL_LINE
  433. if (n > MAX_THREADS)
  434. n = MAX_THREADS;
  435. tor_mutex_acquire(&pool->lock);
  436. if (pool->n_threads < n)
  437. pool->threads = tor_reallocarray(pool->threads,
  438. sizeof(workerthread_t*), n);
  439. while (pool->n_threads < n) {
  440. /* For half of our threads, we'll choose lower priorities permissively;
  441. * for the other half, we'll stick more strictly to higher priorities.
  442. * This keeps slow low-priority tasks from taking over completely. */
  443. int32_t chance = (pool->n_threads & 1) ? CHANCE_STRICT : CHANCE_PERMISSIVE;
  444. void *state = pool->new_thread_state_fn(pool->new_thread_state_arg);
  445. workerthread_t *thr = workerthread_new(chance,
  446. state, pool, pool->reply_queue);
  447. if (!thr) {
  448. //LCOV_EXCL_START
  449. tor_assert_nonfatal_unreached();
  450. pool->free_thread_state_fn(state);
  451. tor_mutex_release(&pool->lock);
  452. return -1;
  453. //LCOV_EXCL_STOP
  454. }
  455. thr->index = pool->n_threads;
  456. pool->threads[pool->n_threads++] = thr;
  457. }
  458. tor_mutex_release(&pool->lock);
  459. return 0;
  460. }
  461. /**
  462. * Construct a new thread pool with <b>n</b> worker threads, configured to
  463. * send their output to <b>replyqueue</b>. The threads' states will be
  464. * constructed with the <b>new_thread_state_fn</b> call, receiving <b>arg</b>
  465. * as its argument. When the threads close, they will call
  466. * <b>free_thread_state_fn</b> on their states.
  467. */
  468. threadpool_t *
  469. threadpool_new(int n_threads,
  470. replyqueue_t *replyqueue,
  471. void *(*new_thread_state_fn)(void*),
  472. void (*free_thread_state_fn)(void*),
  473. void *arg)
  474. {
  475. threadpool_t *pool;
  476. pool = tor_malloc_zero(sizeof(threadpool_t));
  477. tor_mutex_init_nonrecursive(&pool->lock);
  478. tor_cond_init(&pool->condition);
  479. unsigned i;
  480. for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
  481. TOR_TAILQ_INIT(&pool->work[i]);
  482. }
  483. {
  484. unsigned seed;
  485. crypto_rand((void*)&seed, sizeof(seed));
  486. tor_init_weak_random(&pool->weak_rng, seed);
  487. }
  488. pool->new_thread_state_fn = new_thread_state_fn;
  489. pool->new_thread_state_arg = arg;
  490. pool->free_thread_state_fn = free_thread_state_fn;
  491. pool->reply_queue = replyqueue;
  492. if (threadpool_start_threads(pool, n_threads) < 0) {
  493. //LCOV_EXCL_START
  494. tor_assert_nonfatal_unreached();
  495. tor_cond_uninit(&pool->condition);
  496. tor_mutex_uninit(&pool->lock);
  497. tor_free(pool);
  498. return NULL;
  499. //LCOV_EXCL_STOP
  500. }
  501. return pool;
  502. }
  503. /** Return the reply queue associated with a given thread pool. */
  504. replyqueue_t *
  505. threadpool_get_replyqueue(threadpool_t *tp)
  506. {
  507. return tp->reply_queue;
  508. }
  509. /** Allocate a new reply queue. Reply queues are used to pass results from
  510. * worker threads to the main thread. Since the main thread is running an
  511. * IO-centric event loop, it needs to get woken up with means other than a
  512. * condition variable. */
  513. replyqueue_t *
  514. replyqueue_new(uint32_t alertsocks_flags)
  515. {
  516. replyqueue_t *rq;
  517. rq = tor_malloc_zero(sizeof(replyqueue_t));
  518. if (alert_sockets_create(&rq->alert, alertsocks_flags) < 0) {
  519. //LCOV_EXCL_START
  520. tor_free(rq);
  521. return NULL;
  522. //LCOV_EXCL_STOP
  523. }
  524. tor_mutex_init(&rq->lock);
  525. TOR_TAILQ_INIT(&rq->answers);
  526. return rq;
  527. }
  528. /** Internal: Run from the libevent mainloop when there is work to handle in
  529. * the reply queue handler. */
  530. static void
  531. reply_event_cb(evutil_socket_t sock, short events, void *arg)
  532. {
  533. threadpool_t *tp = arg;
  534. (void) sock;
  535. (void) events;
  536. replyqueue_process(tp->reply_queue);
  537. if (tp->reply_cb)
  538. tp->reply_cb(tp);
  539. }
  540. /** Register the threadpool <b>tp</b>'s reply queue with the libevent
  541. * mainloop of <b>base</b>. If <b>tp</b> is provided, it is run after
  542. * each time there is work to process from the reply queue. Return 0 on
  543. * success, -1 on failure.
  544. */
  545. int
  546. threadpool_register_reply_event(threadpool_t *tp,
  547. void (*cb)(threadpool_t *tp))
  548. {
  549. struct event_base *base = tor_libevent_get_base();
  550. if (tp->reply_event) {
  551. tor_event_free(tp->reply_event);
  552. }
  553. tp->reply_event = tor_event_new(base,
  554. tp->reply_queue->alert.read_fd,
  555. EV_READ|EV_PERSIST,
  556. reply_event_cb,
  557. tp);
  558. tor_assert(tp->reply_event);
  559. tp->reply_cb = cb;
  560. return event_add(tp->reply_event, NULL);
  561. }
  562. /**
  563. * Process all pending replies on a reply queue. The main thread should call
  564. * this function every time the socket returned by replyqueue_get_socket() is
  565. * readable.
  566. */
  567. void
  568. replyqueue_process(replyqueue_t *queue)
  569. {
  570. int r = queue->alert.drain_fn(queue->alert.read_fd);
  571. if (r < 0) {
  572. //LCOV_EXCL_START
  573. static ratelim_t warn_limit = RATELIM_INIT(7200);
  574. log_fn_ratelim(&warn_limit, LOG_WARN, LD_GENERAL,
  575. "Failure from drain_fd: %s",
  576. tor_socket_strerror(-r));
  577. //LCOV_EXCL_STOP
  578. }
  579. tor_mutex_acquire(&queue->lock);
  580. while (!TOR_TAILQ_EMPTY(&queue->answers)) {
  581. /* lock must be held at this point.*/
  582. workqueue_entry_t *work = TOR_TAILQ_FIRST(&queue->answers);
  583. TOR_TAILQ_REMOVE(&queue->answers, work, next_work);
  584. tor_mutex_release(&queue->lock);
  585. work->on_pool = NULL;
  586. work->reply_fn(work->arg);
  587. workqueue_entry_free(work);
  588. tor_mutex_acquire(&queue->lock);
  589. }
  590. tor_mutex_release(&queue->lock);
  591. }