workqueue.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758
  1. /* copyright (c) 2013-2015, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file workqueue.c
  5. *
  6. * \brief Implements worker threads, queues of work for them, and mechanisms
  7. * for them to send answers back to the main thread.
  8. *
  9. * The main structure here is a threadpool_t : it manages a set of worker
  10. * threads, a queue of pending work, and a reply queue. Every piece of work
  11. * is a workqueue_entry_t, containing data to process and a function to
  12. * process it with.
  13. *
  14. * The main thread informs the worker threads of pending work by using a
  15. * condition variable. The workers inform the main process of completed work
  16. * by using an alert_sockets_t object, as implemented in net/alertsock.c.
  17. *
  18. * The main thread can also queue an "update" that will be handled by all the
  19. * workers. This is useful for updating state that all the workers share.
  20. *
  21. * In Tor today, there is currently only one thread pool, used in cpuworker.c.
  22. */
  23. #include "orconfig.h"
  24. #include "lib/evloop/compat_libevent.h"
  25. #include "lib/evloop/workqueue.h"
  26. #include "lib/crypt_ops/crypto_rand.h"
  27. #include "lib/intmath/weakrng.h"
  28. #include "lib/log/ratelim.h"
  29. #include "lib/log/log.h"
  30. #include "lib/log/util_bug.h"
  31. #include "lib/net/alertsock.h"
  32. #include "lib/net/socket.h"
  33. #include "ext/tor_queue.h"
  34. #include <event2/event.h>
  35. #include <string.h>
  36. #define WORKQUEUE_PRIORITY_FIRST WQ_PRI_HIGH
  37. #define WORKQUEUE_PRIORITY_LAST WQ_PRI_LOW
  38. #define WORKQUEUE_N_PRIORITIES (((int) WORKQUEUE_PRIORITY_LAST)+1)
  39. TOR_TAILQ_HEAD(work_tailq_t, workqueue_entry_s);
  40. typedef struct work_tailq_t work_tailq_t;
  41. struct threadpool_s {
  42. /** An array of pointers to workerthread_t: one for each running worker
  43. * thread. */
  44. struct workerthread_s **threads;
  45. /** Condition variable that we wait on when we have no work, and which
  46. * gets signaled when our queue becomes nonempty. */
  47. tor_cond_t condition;
  48. /** Queues of pending work that we have to do. The queue with priority
  49. * <b>p</b> is work[p]. */
  50. work_tailq_t work[WORKQUEUE_N_PRIORITIES];
  51. /** The current 'update generation' of the threadpool. Any thread that is
  52. * at an earlier generation needs to run the update function. */
  53. unsigned generation;
  54. /** Flag to tell the worker threads to stop. */
  55. int shutdown;
  56. /** Function that should be run for updates on each thread. */
  57. workqueue_reply_t (*update_fn)(void *, void *);
  58. /** Function to free update arguments if they can't be run. */
  59. void (*free_update_arg_fn)(void *);
  60. /** Array of n_threads update arguments. */
  61. void **update_args;
  62. /** Callback that is run after a reply queue has processed work. */
  63. void (*reply_cb)(threadpool_t *, replyqueue_t *);
  64. /** Number of elements in threads. */
  65. int n_threads;
  66. /** Mutex to protect all the above fields. */
  67. tor_mutex_t lock;
  68. /** Functions used to allocate and free thread state. */
  69. void *(*new_thread_state_fn)(void*);
  70. void (*free_thread_state_fn)(void*);
  71. void *new_thread_state_arg;
  72. /** Function to start a thread. Should return a negative number on error. */
  73. tor_thread_t *(*thread_spawn_fn)(void (*func)(void *), void *data);
  74. };
  75. /** Used to put a workqueue_priority_t value into a bitfield. */
  76. #define workqueue_priority_bitfield_t ENUM_BF(workqueue_priority_t)
  77. /** Number of bits needed to hold all legal values of workqueue_priority_t */
  78. #define WORKQUEUE_PRIORITY_BITS 2
  79. struct workqueue_entry_s {
  80. /** The next workqueue_entry_t that's pending on the same thread or
  81. * reply queue. */
  82. TOR_TAILQ_ENTRY(workqueue_entry_s) next_work;
  83. /** The threadpool to which this workqueue_entry_t was assigned. This field
  84. * is set when the workqueue_entry_t is created, and won't be cleared until
  85. * after it's handled in the main thread. */
  86. struct threadpool_s *on_pool;
  87. /** True iff this entry is waiting for a worker to start processing it. */
  88. uint8_t pending;
  89. /** Priority of this entry. */
  90. workqueue_priority_bitfield_t priority : WORKQUEUE_PRIORITY_BITS;
  91. /** Function to run in the worker thread. */
  92. workqueue_reply_t (*fn)(void *state, void *arg);
  93. /** Function to run while processing the reply queue. */
  94. void (*reply_fn)(void *arg, workqueue_reply_t reply_status);
  95. /** Linked reply queue */
  96. replyqueue_t *reply_queue;
  97. /** Argument for the above functions. */
  98. void *arg;
  99. /** Reply status of the worker thread function after it has returned. */
  100. workqueue_reply_t reply_status;
  101. };
  102. struct replyqueue_s {
  103. /** Mutex to protect the answers field */
  104. tor_mutex_t lock;
  105. /** Doubly-linked list of answers that the reply queue needs to handle. */
  106. TOR_TAILQ_HEAD(, workqueue_entry_s) answers;
  107. /** Mechanism to wake up the main thread when it is receiving answers. */
  108. alert_sockets_t alert;
  109. /** Event to notice when another thread has sent a reply. */
  110. struct event *reply_event;
  111. /** The threadpool that uses this reply queue. */
  112. struct threadpool_s *pool;
  113. };
  114. /** A worker thread represents a single thread in a thread pool. */
  115. typedef struct workerthread_s {
  116. /** Which thread it this? In range 0..in_pool->n_threads-1 */
  117. int index;
  118. /** The tor thread object. */
  119. tor_thread_t* thread;
  120. /** The pool this thread is a part of. */
  121. struct threadpool_s *in_pool;
  122. /** User-supplied state field that we pass to the worker functions of each
  123. * work item. */
  124. void *state;
  125. /** The current update generation of this thread */
  126. unsigned generation;
  127. /** One over the probability of taking work from a lower-priority queue. */
  128. int32_t lower_priority_chance;
  129. } workerthread_t;
  130. static void queue_reply(replyqueue_t *queue, workqueue_entry_t *work);
  131. /** Allocate and return a new workqueue_entry_t, set up to run the function
  132. * <b>fn</b> in the worker thread, and <b>reply_fn</b> in the main
  133. * thread. See threadpool_queue_work() for full documentation. */
  134. static workqueue_entry_t *
  135. workqueue_entry_new(workqueue_reply_t (*fn)(void*, void*),
  136. void (*reply_fn)(void*, workqueue_reply_t),
  137. replyqueue_t *reply_queue,
  138. void *arg)
  139. {
  140. workqueue_entry_t *ent = tor_malloc_zero(sizeof(workqueue_entry_t));
  141. ent->fn = fn;
  142. ent->reply_fn = reply_fn;
  143. ent->reply_queue = reply_queue;
  144. ent->arg = arg;
  145. ent->priority = WQ_PRI_HIGH;
  146. return ent;
  147. }
  148. #define workqueue_entry_free(ent) \
  149. FREE_AND_NULL(workqueue_entry_t, workqueue_entry_free_, (ent))
  150. /**
  151. * Release all storage held in <b>ent</b>. Call only when <b>ent</b> is not on
  152. * any queue.
  153. */
  154. static void
  155. workqueue_entry_free_(workqueue_entry_t *ent)
  156. {
  157. if (!ent)
  158. return;
  159. memset(ent, 0xf0, sizeof(*ent));
  160. tor_free(ent);
  161. }
  162. /**
  163. * Cancel a workqueue_entry_t that has been returned from
  164. * threadpool_queue_work.
  165. *
  166. * You must not call this function on any work whose reply function has been
  167. * executed in the main thread; that will cause undefined behavior (probably,
  168. * a crash).
  169. *
  170. * If the work is cancelled, this function return the argument passed to the
  171. * work function. It is the caller's responsibility to free this storage.
  172. *
  173. * This function will have no effect if the worker thread has already executed
  174. * or begun to execute the work item. In that case, it will return NULL.
  175. */
  176. void *
  177. workqueue_entry_cancel(workqueue_entry_t *ent)
  178. {
  179. int cancelled = 0;
  180. void *result = NULL;
  181. tor_mutex_acquire(&ent->on_pool->lock);
  182. workqueue_priority_t prio = ent->priority;
  183. if (ent->pending) {
  184. TOR_TAILQ_REMOVE(&ent->on_pool->work[prio], ent, next_work);
  185. cancelled = 1;
  186. result = ent->arg;
  187. }
  188. tor_mutex_release(&ent->on_pool->lock);
  189. if (cancelled) {
  190. workqueue_entry_free(ent);
  191. }
  192. return result;
  193. }
  194. /**DOCDOC
  195. must hold lock */
  196. static int
  197. worker_thread_has_work(workerthread_t *thread)
  198. {
  199. unsigned i;
  200. for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
  201. if (!TOR_TAILQ_EMPTY(&thread->in_pool->work[i]))
  202. return 1;
  203. }
  204. return thread->generation != thread->in_pool->generation;
  205. }
  206. /** Extract the next workqueue_entry_t from the the thread's pool, removing
  207. * it from the relevant queues and marking it as non-pending.
  208. *
  209. * The caller must hold the lock. */
  210. static workqueue_entry_t *
  211. worker_thread_extract_next_work(workerthread_t *thread)
  212. {
  213. threadpool_t *pool = thread->in_pool;
  214. work_tailq_t *queue = NULL, *this_queue;
  215. unsigned i;
  216. for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
  217. this_queue = &pool->work[i];
  218. if (!TOR_TAILQ_EMPTY(this_queue)) {
  219. queue = this_queue;
  220. if (! crypto_fast_rng_one_in_n(get_thread_fast_rng(),
  221. thread->lower_priority_chance)) {
  222. /* Usually we'll just break now, so that we can get out of the loop
  223. * and use the queue where we found work. But with a small
  224. * probability, we'll keep looking for lower priority work, so that
  225. * we don't ignore our low-priority queues entirely. */
  226. break;
  227. }
  228. }
  229. }
  230. if (queue == NULL)
  231. return NULL;
  232. workqueue_entry_t *work = TOR_TAILQ_FIRST(queue);
  233. TOR_TAILQ_REMOVE(queue, work, next_work);
  234. work->pending = 0;
  235. return work;
  236. }
  237. /**
  238. * Main function for the worker thread.
  239. */
  240. static void
  241. worker_thread_main(void *thread_)
  242. {
  243. workerthread_t *thread = thread_;
  244. threadpool_t *pool = thread->in_pool;
  245. workqueue_entry_t *work;
  246. workqueue_reply_t result;
  247. tor_mutex_acquire(&pool->lock);
  248. while (1) {
  249. /* lock must be held at this point. */
  250. while (worker_thread_has_work(thread)) {
  251. /* lock must be held at this point. */
  252. if (thread->in_pool->generation != thread->generation) {
  253. void *arg = thread->in_pool->update_args[thread->index];
  254. thread->in_pool->update_args[thread->index] = NULL;
  255. workqueue_reply_t (*update_fn)(void*,void*) =
  256. thread->in_pool->update_fn;
  257. thread->generation = thread->in_pool->generation;
  258. tor_mutex_release(&pool->lock);
  259. workqueue_reply_t r = update_fn(thread->state, arg);
  260. if (r != WQ_RPL_REPLY) {
  261. return;
  262. }
  263. tor_mutex_acquire(&pool->lock);
  264. continue;
  265. }
  266. work = worker_thread_extract_next_work(thread);
  267. if (BUG(work == NULL)) {
  268. break;
  269. }
  270. if (pool->shutdown) {
  271. /* If the pool wants to shutdown, we still need to reply so
  272. that the reply functions have a chance to free memory. */
  273. tor_mutex_release(&pool->lock);
  274. work->reply_status = WQ_RPL_SHUTDOWN;
  275. queue_reply(work->reply_queue, work);
  276. tor_mutex_acquire(&pool->lock);
  277. } else {
  278. tor_mutex_release(&pool->lock);
  279. /* We run the work function without holding the thread lock. This
  280. * is the main thread's first opportunity to give us more work. */
  281. result = work->fn(thread->state, work->arg);
  282. /* Queue the reply for the main thread. */
  283. work->reply_status = result;
  284. queue_reply(work->reply_queue, work);
  285. /* We may need to exit the thread. */
  286. if (result != WQ_RPL_REPLY) {
  287. return;
  288. }
  289. tor_mutex_acquire(&pool->lock);
  290. }
  291. }
  292. /* At this point the lock is held, and there is no work in this thread's
  293. * queue. */
  294. if (pool->shutdown) {
  295. tor_mutex_release(&pool->lock);
  296. return;
  297. }
  298. /* TODO: support an idle-function */
  299. /* Okay. Now, wait till somebody has work for us. */
  300. if (tor_cond_wait(&pool->condition, &pool->lock, NULL) < 0) {
  301. log_warn(LD_GENERAL, "Fail tor_cond_wait.");
  302. }
  303. }
  304. }
  305. /** Put a reply on the reply queue. The reply must not currently be on
  306. * any thread's work queue. */
  307. static void
  308. queue_reply(replyqueue_t *queue, workqueue_entry_t *work)
  309. {
  310. int was_empty;
  311. tor_mutex_acquire(&queue->lock);
  312. was_empty = TOR_TAILQ_EMPTY(&queue->answers);
  313. TOR_TAILQ_INSERT_TAIL(&queue->answers, work, next_work);
  314. tor_mutex_release(&queue->lock);
  315. if (was_empty) {
  316. if (queue->alert.alert_fn(queue->alert.write_fd) < 0) {
  317. /* XXXX complain! */
  318. }
  319. }
  320. }
  321. /** Allocate and start a new worker thread to use state object <b>state</b>. */
  322. static workerthread_t *
  323. workerthread_new(int32_t lower_priority_chance,
  324. void *state, threadpool_t *pool)
  325. {
  326. workerthread_t *thr = tor_malloc_zero(sizeof(workerthread_t));
  327. thr->state = state;
  328. thr->in_pool = pool;
  329. thr->lower_priority_chance = lower_priority_chance;
  330. tor_assert(pool->thread_spawn_fn != NULL);
  331. tor_thread_t* thread = pool->thread_spawn_fn(worker_thread_main, thr);
  332. if (thread == NULL) {
  333. //LCOV_EXCL_START
  334. tor_assert_nonfatal_unreached();
  335. log_err(LD_GENERAL, "Can't launch worker thread.");
  336. tor_free(thr);
  337. return NULL;
  338. //LCOV_EXCL_STOP
  339. }
  340. thr->thread = thread;
  341. return thr;
  342. }
  343. static void
  344. workerthread_join(workerthread_t* thr)
  345. {
  346. if (join_thread(thr->thread) != 0) {
  347. log_err(LD_GENERAL, "Could not join workerthread.");
  348. }
  349. }
  350. static void
  351. workerthread_free(workerthread_t* thr)
  352. {
  353. free_thread(thr->thread);
  354. }
  355. /**
  356. * Queue an item of work for a thread in a thread pool. The function
  357. * <b>fn</b> will be run in a worker thread, and will receive as arguments the
  358. * thread's state object, and the provided object <b>arg</b>. It must return
  359. * one of WQ_RPL_REPLY, WQ_RPL_ERROR, or WQ_RPL_SHUTDOWN.
  360. *
  361. * Regardless of its return value, the function <b>reply_fn</b> will later be
  362. * run in the main thread when it invokes replyqueue_process(), and will
  363. * receive as its argument the same <b>arg</b> object. It's the reply
  364. * function's responsibility to free the work object.
  365. *
  366. * On success, return a workqueue_entry_t object that can be passed to
  367. * workqueue_entry_cancel(). On failure, return NULL.
  368. *
  369. * Items are executed in a loose priority order -- each thread will usually
  370. * take from the queued work with the highest prioirity, but will occasionally
  371. * visit lower-priority queues to keep them from starving completely.
  372. *
  373. * Note that because of priorities and thread behavior, work items may not
  374. * be executed strictly in order.
  375. */
  376. workqueue_entry_t *
  377. threadpool_queue_work_priority(threadpool_t *pool,
  378. workqueue_priority_t prio,
  379. workqueue_reply_t (*fn)(void *, void *),
  380. void (*reply_fn)(void *, workqueue_reply_t),
  381. replyqueue_t *reply_queue,
  382. void *arg)
  383. {
  384. tor_assert(((int)prio) >= WORKQUEUE_PRIORITY_FIRST &&
  385. ((int)prio) <= WORKQUEUE_PRIORITY_LAST);
  386. tor_mutex_acquire(&pool->lock);
  387. if (pool->shutdown) {
  388. return NULL;
  389. }
  390. workqueue_entry_t *ent = workqueue_entry_new(fn, reply_fn, reply_queue, arg);
  391. ent->on_pool = pool;
  392. ent->pending = 1;
  393. ent->priority = prio;
  394. TOR_TAILQ_INSERT_TAIL(&pool->work[prio], ent, next_work);
  395. tor_cond_signal_one(&pool->condition);
  396. tor_mutex_release(&pool->lock);
  397. return ent;
  398. }
  399. /** As threadpool_queue_work_priority(), but assumes WQ_PRI_HIGH */
  400. workqueue_entry_t *
  401. threadpool_queue_work(threadpool_t *pool,
  402. workqueue_reply_t (*fn)(void *, void *),
  403. void (*reply_fn)(void *, workqueue_reply_t),
  404. replyqueue_t *reply_queue,
  405. void *arg)
  406. {
  407. return threadpool_queue_work_priority(pool, WQ_PRI_HIGH, fn,
  408. reply_fn, reply_queue, arg);
  409. }
  410. /**
  411. * Queue a copy of a work item for every thread in a pool. This can be used,
  412. * for example, to tell the threads to update some parameter in their states.
  413. *
  414. * Arguments are as for <b>threadpool_queue_work</b>, except that the
  415. * <b>arg</b> value is passed to <b>dup_fn</b> once per each thread to
  416. * make a copy of it.
  417. *
  418. * UPDATE FUNCTIONS MUST BE IDEMPOTENT. We do not guarantee that every update
  419. * will be run. If a new update is scheduled before the old update finishes
  420. * running, then the new will replace the old in any threads that haven't run
  421. * it yet.
  422. *
  423. * Return 0 on success, -1 on failure.
  424. */
  425. int
  426. threadpool_queue_update(threadpool_t *pool,
  427. void *(*dup_fn)(void *),
  428. workqueue_reply_t (*fn)(void *, void *),
  429. void (*free_fn)(void *),
  430. void *arg)
  431. {
  432. int i, n_threads;
  433. void (*old_args_free_fn)(void *arg);
  434. void **old_args;
  435. void **new_args;
  436. tor_mutex_acquire(&pool->lock);
  437. if (pool->shutdown) {
  438. return -1;
  439. }
  440. n_threads = pool->n_threads;
  441. old_args = pool->update_args;
  442. old_args_free_fn = pool->free_update_arg_fn;
  443. new_args = tor_calloc(n_threads, sizeof(void*));
  444. for (i = 0; i < n_threads; ++i) {
  445. if (dup_fn)
  446. new_args[i] = dup_fn(arg);
  447. else
  448. new_args[i] = arg;
  449. }
  450. pool->update_args = new_args;
  451. pool->free_update_arg_fn = free_fn;
  452. pool->update_fn = fn;
  453. ++pool->generation;
  454. tor_cond_signal_all(&pool->condition);
  455. tor_mutex_release(&pool->lock);
  456. if (old_args) {
  457. for (i = 0; i < n_threads; ++i) {
  458. if (old_args[i] && old_args_free_fn)
  459. old_args_free_fn(old_args[i]);
  460. }
  461. tor_free(old_args);
  462. }
  463. return 0;
  464. }
  465. /** Don't have more than this many threads per pool. */
  466. #define MAX_THREADS 1024
  467. /** For half of our threads, choose lower priority queues with probability
  468. * 1/N for each of these values. Both are chosen somewhat arbitrarily. If
  469. * CHANCE_PERMISSIVE is too low, then we have a risk of low-priority tasks
  470. * stalling forever. If it's too high, we have a risk of low-priority tasks
  471. * grabbing half of the threads. */
  472. #define CHANCE_PERMISSIVE 37
  473. #define CHANCE_STRICT INT32_MAX
  474. /** Launch threads until we have <b>n</b>. */
  475. static int
  476. threadpool_start_threads(threadpool_t *pool, int n)
  477. {
  478. if (BUG(n < 0))
  479. return -1; // LCOV_EXCL_LINE
  480. if (n > MAX_THREADS)
  481. n = MAX_THREADS;
  482. tor_mutex_acquire(&pool->lock);
  483. if (pool->n_threads < n)
  484. pool->threads = tor_reallocarray(pool->threads,
  485. sizeof(workerthread_t*), n);
  486. while (pool->n_threads < n) {
  487. /* For half of our threads, we'll choose lower priorities permissively;
  488. * for the other half, we'll stick more strictly to higher priorities.
  489. * This keeps slow low-priority tasks from taking over completely. */
  490. int32_t chance = (pool->n_threads & 1) ? CHANCE_STRICT : CHANCE_PERMISSIVE;
  491. void *state = pool->new_thread_state_fn(pool->new_thread_state_arg);
  492. workerthread_t *thr = workerthread_new(chance,
  493. state, pool);
  494. if (!thr) {
  495. //LCOV_EXCL_START
  496. tor_assert_nonfatal_unreached();
  497. pool->free_thread_state_fn(state);
  498. tor_mutex_release(&pool->lock);
  499. return -1;
  500. //LCOV_EXCL_STOP
  501. }
  502. thr->index = pool->n_threads;
  503. pool->threads[pool->n_threads++] = thr;
  504. }
  505. tor_mutex_release(&pool->lock);
  506. return 0;
  507. }
  508. /**
  509. * Construct a new thread pool with <b>n</b> worker threads. The threads'
  510. * states will be constructed with the <b>new_thread_state_fn</b> call,
  511. * receiving <b>arg</b> as its argument. When the threads close, they
  512. * will call <b>free_thread_state_fn</b> on their states.
  513. */
  514. threadpool_t *
  515. threadpool_new(int n_threads,
  516. void *(*new_thread_state_fn)(void*),
  517. void (*free_thread_state_fn)(void*),
  518. void *arg,
  519. tor_thread_t *(*thread_spawn_fn)(void (*func)(void *), void *data))
  520. {
  521. threadpool_t *pool;
  522. pool = tor_malloc_zero(sizeof(threadpool_t));
  523. tor_mutex_init_nonrecursive(&pool->lock);
  524. tor_cond_init(&pool->condition);
  525. unsigned i;
  526. for (i = WORKQUEUE_PRIORITY_FIRST; i <= WORKQUEUE_PRIORITY_LAST; ++i) {
  527. TOR_TAILQ_INIT(&pool->work[i]);
  528. }
  529. pool->new_thread_state_fn = new_thread_state_fn;
  530. pool->new_thread_state_arg = arg;
  531. pool->free_thread_state_fn = free_thread_state_fn;
  532. pool->thread_spawn_fn = thread_spawn_fn;
  533. if (threadpool_start_threads(pool, n_threads) < 0) {
  534. //LCOV_EXCL_START
  535. tor_assert_nonfatal_unreached();
  536. tor_cond_uninit(&pool->condition);
  537. tor_mutex_uninit(&pool->lock);
  538. tor_free(pool);
  539. return NULL;
  540. //LCOV_EXCL_STOP
  541. }
  542. return pool;
  543. }
  544. void
  545. threadpool_shutdown(threadpool_t* pool)
  546. {
  547. tor_assert(pool != NULL);
  548. tor_mutex_acquire(&pool->lock);
  549. pool->shutdown = 1;
  550. tor_cond_signal_all(&pool->condition);
  551. for (int i=0; i<pool->n_threads; i++) {
  552. workerthread_t *thread = pool->threads[i];
  553. tor_mutex_release(&pool->lock);
  554. workerthread_join(thread);
  555. tor_mutex_acquire(&pool->lock);
  556. }
  557. for (int i=0; i<pool->n_threads; i++) {
  558. workerthread_free(pool->threads[i]);
  559. pool->free_thread_state_fn(pool->threads[i]->state);
  560. }
  561. tor_mutex_release(&pool->lock);
  562. }
  563. /** Return the thread pool associated with a given reply queue. */
  564. threadpool_t *
  565. replyqueue_get_threadpool(replyqueue_t *rq)
  566. {
  567. return rq->pool;
  568. }
  569. /** Allocate a new reply queue. Reply queues are used to pass results from
  570. * worker threads to the main thread. Since the main thread is running an
  571. * IO-centric event loop, it needs to get woken up with means other than a
  572. * condition variable. */
  573. replyqueue_t *
  574. replyqueue_new(uint32_t alertsocks_flags, threadpool_t *pool)
  575. {
  576. replyqueue_t *rq;
  577. rq = tor_malloc_zero(sizeof(replyqueue_t));
  578. if (alert_sockets_create(&rq->alert, alertsocks_flags) < 0) {
  579. //LCOV_EXCL_START
  580. tor_free(rq);
  581. return NULL;
  582. //LCOV_EXCL_STOP
  583. }
  584. rq->pool = pool;
  585. tor_mutex_init(&rq->lock);
  586. TOR_TAILQ_INIT(&rq->answers);
  587. return rq;
  588. }
  589. /** Internal: Run from the libevent mainloop when there is work to handle in
  590. * the reply queue handler. */
  591. static void
  592. reply_event_cb(evutil_socket_t sock, short events, void *arg)
  593. {
  594. replyqueue_t *reply_queue = arg;
  595. (void) sock;
  596. (void) events;
  597. replyqueue_process(reply_queue);
  598. if (reply_queue->pool && reply_queue->pool->reply_cb)
  599. reply_queue->pool->reply_cb(reply_queue->pool, reply_queue);
  600. }
  601. /** Register the reply queue with the given libevent mainloop. Return 0
  602. * on success, -1 on failure.
  603. */
  604. int
  605. replyqueue_register_reply_event(replyqueue_t *reply_queue,
  606. struct event_base *base)
  607. {
  608. if (reply_queue->reply_event) {
  609. tor_event_free(reply_queue->reply_event);
  610. }
  611. reply_queue->reply_event = tor_event_new(base,
  612. reply_queue->alert.read_fd,
  613. EV_READ|EV_PERSIST,
  614. reply_event_cb,
  615. reply_queue);
  616. tor_assert(reply_queue->reply_event);
  617. return event_add(reply_queue->reply_event, NULL);
  618. }
  619. /** The given callback is run after each time there is work to process
  620. * from a reply queue. Return 0 on success, -1 on failure.
  621. */
  622. void
  623. threadpool_set_reply_cb(threadpool_t *tp,
  624. void (*cb)(threadpool_t *tp, replyqueue_t *rq))
  625. {
  626. tp->reply_cb = cb;
  627. }
  628. /**
  629. * Process all pending replies on a reply queue. The main thread should call
  630. * this function every time the socket returned by replyqueue_get_socket() is
  631. * readable.
  632. */
  633. void
  634. replyqueue_process(replyqueue_t *queue)
  635. {
  636. int r = queue->alert.drain_fn(queue->alert.read_fd);
  637. if (r < 0) {
  638. //LCOV_EXCL_START
  639. static ratelim_t warn_limit = RATELIM_INIT(7200);
  640. log_fn_ratelim(&warn_limit, LOG_WARN, LD_GENERAL,
  641. "Failure from drain_fd: %s",
  642. tor_socket_strerror(-r));
  643. //LCOV_EXCL_STOP
  644. }
  645. tor_mutex_acquire(&queue->lock);
  646. while (!TOR_TAILQ_EMPTY(&queue->answers)) {
  647. /* lock must be held at this point.*/
  648. workqueue_entry_t *work = TOR_TAILQ_FIRST(&queue->answers);
  649. TOR_TAILQ_REMOVE(&queue->answers, work, next_work);
  650. tor_mutex_release(&queue->lock);
  651. work->on_pool = NULL;
  652. work->reply_fn(work->arg, work->reply_status);
  653. workqueue_entry_free(work);
  654. tor_mutex_acquire(&queue->lock);
  655. }
  656. tor_mutex_release(&queue->lock);
  657. }