scheduler.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738
  1. /* * Copyright (c) 2013-2016, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #include "or.h"
  4. #define TOR_CHANNEL_INTERNAL_ /* For channel_flush_some_cells() */
  5. #include "channel.h"
  6. #include "compat_libevent.h"
  7. #define SCHEDULER_PRIVATE_
  8. #include "scheduler.h"
  9. #include <event2/event.h>
  10. /*
  11. * Scheduler high/low watermarks
  12. */
  13. static uint32_t sched_q_low_water = 16384;
  14. static uint32_t sched_q_high_water = 32768;
  15. /*
  16. * Maximum cells to flush in a single call to channel_flush_some_cells();
  17. * setting this low means more calls, but too high and we could overshoot
  18. * sched_q_high_water.
  19. */
  20. static uint32_t sched_max_flush_cells = 16;
  21. /**
  22. * \file scheduler.c
  23. * \brief Channel scheduling system: decides which channels should send and
  24. * receive when.
  25. *
  26. * This module implements a scheduler algorithm, to decide
  27. * which channels should send/receive when.
  28. *
  29. * The earliest versions of Tor approximated a kind of round-robin system
  30. * among active connections, but only approximated it.
  31. *
  32. * Now, write scheduling works by keeping track of which channels can
  33. * accept cells, and have cells to write. From the scheduler's perspective,
  34. * a channel can be in four possible states:
  35. *
  36. * <ol>
  37. * <li>
  38. * Not open for writes, no cells to send.
  39. * <ul><li> Not much to do here, and the channel will have scheduler_state
  40. * == SCHED_CHAN_IDLE
  41. * <li> Transitions from:
  42. * <ul>
  43. * <li>Open for writes/has cells by simultaneously draining all circuit
  44. * queues and filling the output buffer.
  45. * </ul>
  46. * <li> Transitions to:
  47. * <ul>
  48. * <li> Not open for writes/has cells by arrival of cells on an attached
  49. * circuit (this would be driven from append_cell_to_circuit_queue())
  50. * <li> Open for writes/no cells by a channel type specific path;
  51. * driven from connection_or_flushed_some() for channel_tls_t.
  52. * </ul>
  53. * </ul>
  54. *
  55. * <li> Open for writes, no cells to send
  56. * <ul>
  57. * <li>Not much here either; this will be the state an idle but open
  58. * channel can be expected to settle in. It will have scheduler_state
  59. * == SCHED_CHAN_WAITING_FOR_CELLS
  60. * <li> Transitions from:
  61. * <ul>
  62. * <li>Not open for writes/no cells by flushing some of the output
  63. * buffer.
  64. * <li>Open for writes/has cells by the scheduler moving cells from
  65. * circuit queues to channel output queue, but not having enough
  66. * to fill the output queue.
  67. * </ul>
  68. * <li> Transitions to:
  69. * <ul>
  70. * <li>Open for writes/has cells by arrival of new cells on an attached
  71. * circuit, in append_cell_to_circuit_queue()
  72. * </ul>
  73. * </ul>
  74. *
  75. * <li>Not open for writes, cells to send
  76. * <ul>
  77. * <li>This is the state of a busy circuit limited by output bandwidth;
  78. * cells have piled up in the circuit queues waiting to be relayed.
  79. * The channel will have scheduler_state == SCHED_CHAN_WAITING_TO_WRITE.
  80. * <li> Transitions from:
  81. * <ul>
  82. * <li>Not open for writes/no cells by arrival of cells on an attached
  83. * circuit
  84. * <li> Open for writes/has cells by filling an output buffer without
  85. * draining all cells from attached circuits
  86. * </ul>
  87. * <li> Transitions to:
  88. * <ul>
  89. * <li>Opens for writes/has cells by draining some of the output buffer
  90. * via the connection_or_flushed_some() path (for channel_tls_t).
  91. * </ul>
  92. * </ul>
  93. *
  94. * <li>Open for writes, cells to send
  95. * <ul>
  96. * <li>This connection is ready to relay some cells and waiting for
  97. * the scheduler to choose it. The channel will have scheduler_state ==
  98. * SCHED_CHAN_PENDING.
  99. * <li>Transitions from:
  100. * <ul>
  101. * <li> Not open for writes/has cells by the connection_or_flushed_some()
  102. * path
  103. * <li> Open for writes/no cells by the append_cell_to_circuit_queue()
  104. * path
  105. * </ul>
  106. * <li> Transitions to:
  107. * <ul>
  108. * <li>Not open for writes/no cells by draining all circuit queues and
  109. * simultaneously filling the output buffer.
  110. * <li>Not open for writes/has cells by writing enough cells to fill the
  111. * output buffer
  112. * <li>Open for writes/no cells by draining all attached circuit queues
  113. * without also filling the output buffer
  114. * </ul>
  115. * </ul>
  116. * </ol>
  117. *
  118. * Other event-driven parts of the code move channels between these scheduling
  119. * states by calling scheduler functions; the scheduler only runs on open-for-
  120. * writes/has-cells channels and is the only path for those to transition to
  121. * other states. The scheduler_run() function gives us the opportunity to do
  122. * scheduling work, and is called from other scheduler functions whenever a
  123. * state transition occurs, and periodically from the main event loop.
  124. */
  125. /* Scheduler global data structures */
  126. /*
  127. * We keep a list of channels that are pending - i.e, have cells to write
  128. * and can accept them to send. The enum scheduler_state in channel_t
  129. * is reserved for our use.
  130. */
  131. /* Pqueue of channels that can write and have cells (pending work) */
  132. STATIC smartlist_t *channels_pending = NULL;
  133. /*
  134. * This event runs the scheduler from its callback, and is manually
  135. * activated whenever a channel enters open for writes/cells to send.
  136. */
  137. STATIC struct event *run_sched_ev = NULL;
  138. /*
  139. * Queue heuristic; this is not the queue size, but an 'effective queuesize'
  140. * that ages out contributions from stalled channels.
  141. */
  142. STATIC uint64_t queue_heuristic = 0;
  143. /*
  144. * Timestamp for last queue heuristic update
  145. */
  146. STATIC time_t queue_heuristic_timestamp = 0;
  147. /* Scheduler static function declarations */
  148. static void scheduler_evt_callback(evutil_socket_t fd,
  149. short events, void *arg);
  150. static int scheduler_more_work(void);
  151. static void scheduler_retrigger(void);
  152. #if 0
  153. static void scheduler_trigger(void);
  154. #endif
  155. /* Scheduler function implementations */
  156. /** Free everything and shut down the scheduling system */
  157. void
  158. scheduler_free_all(void)
  159. {
  160. log_debug(LD_SCHED, "Shutting down scheduler");
  161. if (run_sched_ev) {
  162. if (event_del(run_sched_ev) < 0) {
  163. log_warn(LD_BUG, "Problem deleting run_sched_ev");
  164. }
  165. tor_event_free(run_sched_ev);
  166. run_sched_ev = NULL;
  167. }
  168. if (channels_pending) {
  169. smartlist_free(channels_pending);
  170. channels_pending = NULL;
  171. }
  172. }
  173. /**
  174. * Comparison function to use when sorting pending channels
  175. */
  176. MOCK_IMPL(STATIC int,
  177. scheduler_compare_channels, (const void *c1_v, const void *c2_v))
  178. {
  179. channel_t *c1 = NULL, *c2 = NULL;
  180. /* These are a workaround for -Wbad-function-cast throwing a fit */
  181. const circuitmux_policy_t *p1, *p2;
  182. uintptr_t p1_i, p2_i;
  183. tor_assert(c1_v);
  184. tor_assert(c2_v);
  185. c1 = (channel_t *)(c1_v);
  186. c2 = (channel_t *)(c2_v);
  187. tor_assert(c1);
  188. tor_assert(c2);
  189. if (c1 != c2) {
  190. if (circuitmux_get_policy(c1->cmux) ==
  191. circuitmux_get_policy(c2->cmux)) {
  192. /* Same cmux policy, so use the mux comparison */
  193. return circuitmux_compare_muxes(c1->cmux, c2->cmux);
  194. } else {
  195. /*
  196. * Different policies; not important to get this edge case perfect
  197. * because the current code never actually gives different channels
  198. * different cmux policies anyway. Just use this arbitrary but
  199. * definite choice.
  200. */
  201. p1 = circuitmux_get_policy(c1->cmux);
  202. p2 = circuitmux_get_policy(c2->cmux);
  203. p1_i = (uintptr_t)p1;
  204. p2_i = (uintptr_t)p2;
  205. return (p1_i < p2_i) ? -1 : 1;
  206. }
  207. } else {
  208. /* c1 == c2, so always equal */
  209. return 0;
  210. }
  211. }
  212. /*
  213. * Scheduler event callback; this should get triggered once per event loop
  214. * if any scheduling work was created during the event loop.
  215. */
  216. static void
  217. scheduler_evt_callback(evutil_socket_t fd, short events, void *arg)
  218. {
  219. (void)fd;
  220. (void)events;
  221. (void)arg;
  222. log_debug(LD_SCHED, "Scheduler event callback called");
  223. tor_assert(run_sched_ev);
  224. /* Run the scheduler */
  225. scheduler_run();
  226. /* Do we have more work to do? */
  227. if (scheduler_more_work()) scheduler_retrigger();
  228. }
  229. /** Mark a channel as no longer ready to accept writes */
  230. MOCK_IMPL(void,
  231. scheduler_channel_doesnt_want_writes,(channel_t *chan))
  232. {
  233. tor_assert(chan);
  234. tor_assert(channels_pending);
  235. /* If it's already in pending, we can put it in waiting_to_write */
  236. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  237. /*
  238. * It's in channels_pending, so it shouldn't be in any of
  239. * the other lists. It can't write any more, so it goes to
  240. * channels_waiting_to_write.
  241. */
  242. smartlist_pqueue_remove(channels_pending,
  243. scheduler_compare_channels,
  244. STRUCT_OFFSET(channel_t, sched_heap_idx),
  245. chan);
  246. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  247. log_debug(LD_SCHED,
  248. "Channel " U64_FORMAT " at %p went from pending "
  249. "to waiting_to_write",
  250. U64_PRINTF_ARG(chan->global_identifier), chan);
  251. } else {
  252. /*
  253. * It's not in pending, so it can't become waiting_to_write; it's
  254. * either not in any of the lists (nothing to do) or it's already in
  255. * waiting_for_cells (remove it, can't write any more).
  256. */
  257. if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
  258. chan->scheduler_state = SCHED_CHAN_IDLE;
  259. log_debug(LD_SCHED,
  260. "Channel " U64_FORMAT " at %p left waiting_for_cells",
  261. U64_PRINTF_ARG(chan->global_identifier), chan);
  262. }
  263. }
  264. }
  265. /** Mark a channel as having waiting cells */
  266. MOCK_IMPL(void,
  267. scheduler_channel_has_waiting_cells,(channel_t *chan))
  268. {
  269. int became_pending = 0;
  270. tor_assert(chan);
  271. tor_assert(channels_pending);
  272. /* First, check if this one also writeable */
  273. if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
  274. /*
  275. * It's in channels_waiting_for_cells, so it shouldn't be in any of
  276. * the other lists. It has waiting cells now, so it goes to
  277. * channels_pending.
  278. */
  279. chan->scheduler_state = SCHED_CHAN_PENDING;
  280. smartlist_pqueue_add(channels_pending,
  281. scheduler_compare_channels,
  282. STRUCT_OFFSET(channel_t, sched_heap_idx),
  283. chan);
  284. log_debug(LD_SCHED,
  285. "Channel " U64_FORMAT " at %p went from waiting_for_cells "
  286. "to pending",
  287. U64_PRINTF_ARG(chan->global_identifier), chan);
  288. became_pending = 1;
  289. } else {
  290. /*
  291. * It's not in waiting_for_cells, so it can't become pending; it's
  292. * either not in any of the lists (we add it to waiting_to_write)
  293. * or it's already in waiting_to_write or pending (we do nothing)
  294. */
  295. if (!(chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE ||
  296. chan->scheduler_state == SCHED_CHAN_PENDING)) {
  297. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  298. log_debug(LD_SCHED,
  299. "Channel " U64_FORMAT " at %p entered waiting_to_write",
  300. U64_PRINTF_ARG(chan->global_identifier), chan);
  301. }
  302. }
  303. /*
  304. * If we made a channel pending, we potentially have scheduling work
  305. * to do.
  306. */
  307. if (became_pending) scheduler_retrigger();
  308. }
  309. /** Set up the scheduling system */
  310. void
  311. scheduler_init(void)
  312. {
  313. log_debug(LD_SCHED, "Initting scheduler");
  314. tor_assert(!run_sched_ev);
  315. run_sched_ev = tor_event_new(tor_libevent_get_base(), -1,
  316. 0, scheduler_evt_callback, NULL);
  317. channels_pending = smartlist_new();
  318. queue_heuristic = 0;
  319. queue_heuristic_timestamp = approx_time();
  320. }
  321. /** Check if there's more scheduling work */
  322. static int
  323. scheduler_more_work(void)
  324. {
  325. tor_assert(channels_pending);
  326. return ((scheduler_get_queue_heuristic() < sched_q_low_water) &&
  327. ((smartlist_len(channels_pending) > 0))) ? 1 : 0;
  328. }
  329. /** Retrigger the scheduler in a way safe to use from the callback */
  330. static void
  331. scheduler_retrigger(void)
  332. {
  333. tor_assert(run_sched_ev);
  334. event_active(run_sched_ev, EV_TIMEOUT, 1);
  335. }
  336. /** Notify the scheduler of a channel being closed */
  337. MOCK_IMPL(void,
  338. scheduler_release_channel,(channel_t *chan))
  339. {
  340. tor_assert(chan);
  341. tor_assert(channels_pending);
  342. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  343. smartlist_pqueue_remove(channels_pending,
  344. scheduler_compare_channels,
  345. STRUCT_OFFSET(channel_t, sched_heap_idx),
  346. chan);
  347. }
  348. chan->scheduler_state = SCHED_CHAN_IDLE;
  349. }
  350. /** Run the scheduling algorithm if necessary */
  351. MOCK_IMPL(void,
  352. scheduler_run, (void))
  353. {
  354. int n_cells, n_chans_before, n_chans_after;
  355. uint64_t q_len_before, q_heur_before, q_len_after, q_heur_after;
  356. ssize_t flushed, flushed_this_time;
  357. smartlist_t *to_readd = NULL;
  358. channel_t *chan = NULL;
  359. log_debug(LD_SCHED, "We have a chance to run the scheduler");
  360. if (scheduler_get_queue_heuristic() < sched_q_low_water) {
  361. n_chans_before = smartlist_len(channels_pending);
  362. q_len_before = channel_get_global_queue_estimate();
  363. q_heur_before = scheduler_get_queue_heuristic();
  364. while (scheduler_get_queue_heuristic() <= sched_q_high_water &&
  365. smartlist_len(channels_pending) > 0) {
  366. /* Pop off a channel */
  367. chan = smartlist_pqueue_pop(channels_pending,
  368. scheduler_compare_channels,
  369. STRUCT_OFFSET(channel_t, sched_heap_idx));
  370. tor_assert(chan);
  371. /* Figure out how many cells we can write */
  372. n_cells = channel_num_cells_writeable(chan);
  373. if (n_cells > 0) {
  374. log_debug(LD_SCHED,
  375. "Scheduler saw pending channel " U64_FORMAT " at %p with "
  376. "%d cells writeable",
  377. U64_PRINTF_ARG(chan->global_identifier), chan, n_cells);
  378. flushed = 0;
  379. while (flushed < n_cells &&
  380. scheduler_get_queue_heuristic() <= sched_q_high_water) {
  381. flushed_this_time =
  382. channel_flush_some_cells(chan,
  383. MIN(sched_max_flush_cells,
  384. (size_t) n_cells - flushed));
  385. if (flushed_this_time <= 0) break;
  386. flushed += flushed_this_time;
  387. }
  388. if (flushed < n_cells) {
  389. /* We ran out of cells to flush */
  390. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  391. log_debug(LD_SCHED,
  392. "Channel " U64_FORMAT " at %p "
  393. "entered waiting_for_cells from pending",
  394. U64_PRINTF_ARG(chan->global_identifier),
  395. chan);
  396. } else {
  397. /* The channel may still have some cells */
  398. if (channel_more_to_flush(chan)) {
  399. /* The channel goes to either pending or waiting_to_write */
  400. if (channel_num_cells_writeable(chan) > 0) {
  401. /* Add it back to pending later */
  402. if (!to_readd) to_readd = smartlist_new();
  403. smartlist_add(to_readd, chan);
  404. log_debug(LD_SCHED,
  405. "Channel " U64_FORMAT " at %p "
  406. "is still pending",
  407. U64_PRINTF_ARG(chan->global_identifier),
  408. chan);
  409. } else {
  410. /* It's waiting to be able to write more */
  411. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  412. log_debug(LD_SCHED,
  413. "Channel " U64_FORMAT " at %p "
  414. "entered waiting_to_write from pending",
  415. U64_PRINTF_ARG(chan->global_identifier),
  416. chan);
  417. }
  418. } else {
  419. /* No cells left; it can go to idle or waiting_for_cells */
  420. if (channel_num_cells_writeable(chan) > 0) {
  421. /*
  422. * It can still accept writes, so it goes to
  423. * waiting_for_cells
  424. */
  425. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  426. log_debug(LD_SCHED,
  427. "Channel " U64_FORMAT " at %p "
  428. "entered waiting_for_cells from pending",
  429. U64_PRINTF_ARG(chan->global_identifier),
  430. chan);
  431. } else {
  432. /*
  433. * We exactly filled up the output queue with all available
  434. * cells; go to idle.
  435. */
  436. chan->scheduler_state = SCHED_CHAN_IDLE;
  437. log_debug(LD_SCHED,
  438. "Channel " U64_FORMAT " at %p "
  439. "become idle from pending",
  440. U64_PRINTF_ARG(chan->global_identifier),
  441. chan);
  442. }
  443. }
  444. }
  445. log_debug(LD_SCHED,
  446. "Scheduler flushed %d cells onto pending channel "
  447. U64_FORMAT " at %p",
  448. (int)flushed, U64_PRINTF_ARG(chan->global_identifier),
  449. chan);
  450. } else {
  451. log_info(LD_SCHED,
  452. "Scheduler saw pending channel " U64_FORMAT " at %p with "
  453. "no cells writeable",
  454. U64_PRINTF_ARG(chan->global_identifier), chan);
  455. /* Put it back to WAITING_TO_WRITE */
  456. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  457. }
  458. }
  459. /* Readd any channels we need to */
  460. if (to_readd) {
  461. SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, readd_chan) {
  462. readd_chan->scheduler_state = SCHED_CHAN_PENDING;
  463. smartlist_pqueue_add(channels_pending,
  464. scheduler_compare_channels,
  465. STRUCT_OFFSET(channel_t, sched_heap_idx),
  466. readd_chan);
  467. } SMARTLIST_FOREACH_END(readd_chan);
  468. smartlist_free(to_readd);
  469. }
  470. n_chans_after = smartlist_len(channels_pending);
  471. q_len_after = channel_get_global_queue_estimate();
  472. q_heur_after = scheduler_get_queue_heuristic();
  473. log_debug(LD_SCHED,
  474. "Scheduler handled %d of %d pending channels, queue size from "
  475. U64_FORMAT " to " U64_FORMAT ", queue heuristic from "
  476. U64_FORMAT " to " U64_FORMAT,
  477. n_chans_before - n_chans_after, n_chans_before,
  478. U64_PRINTF_ARG(q_len_before), U64_PRINTF_ARG(q_len_after),
  479. U64_PRINTF_ARG(q_heur_before), U64_PRINTF_ARG(q_heur_after));
  480. }
  481. }
  482. /** Trigger the scheduling event so we run the scheduler later */
  483. #if 0
  484. static void
  485. scheduler_trigger(void)
  486. {
  487. log_debug(LD_SCHED, "Triggering scheduler event");
  488. tor_assert(run_sched_ev);
  489. event_add(run_sched_ev, EV_TIMEOUT, 1);
  490. }
  491. #endif
  492. /** Mark a channel as ready to accept writes */
  493. void
  494. scheduler_channel_wants_writes(channel_t *chan)
  495. {
  496. int became_pending = 0;
  497. tor_assert(chan);
  498. tor_assert(channels_pending);
  499. /* If it's already in waiting_to_write, we can put it in pending */
  500. if (chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE) {
  501. /*
  502. * It can write now, so it goes to channels_pending.
  503. */
  504. smartlist_pqueue_add(channels_pending,
  505. scheduler_compare_channels,
  506. STRUCT_OFFSET(channel_t, sched_heap_idx),
  507. chan);
  508. chan->scheduler_state = SCHED_CHAN_PENDING;
  509. log_debug(LD_SCHED,
  510. "Channel " U64_FORMAT " at %p went from waiting_to_write "
  511. "to pending",
  512. U64_PRINTF_ARG(chan->global_identifier), chan);
  513. became_pending = 1;
  514. } else {
  515. /*
  516. * It's not in SCHED_CHAN_WAITING_TO_WRITE, so it can't become pending;
  517. * it's either idle and goes to WAITING_FOR_CELLS, or it's a no-op.
  518. */
  519. if (!(chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS ||
  520. chan->scheduler_state == SCHED_CHAN_PENDING)) {
  521. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  522. log_debug(LD_SCHED,
  523. "Channel " U64_FORMAT " at %p entered waiting_for_cells",
  524. U64_PRINTF_ARG(chan->global_identifier), chan);
  525. }
  526. }
  527. /*
  528. * If we made a channel pending, we potentially have scheduling work
  529. * to do.
  530. */
  531. if (became_pending) scheduler_retrigger();
  532. }
  533. /**
  534. * Notify the scheduler that a channel's position in the pqueue may have
  535. * changed
  536. */
  537. void
  538. scheduler_touch_channel(channel_t *chan)
  539. {
  540. tor_assert(chan);
  541. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  542. /* Remove and re-add it */
  543. smartlist_pqueue_remove(channels_pending,
  544. scheduler_compare_channels,
  545. STRUCT_OFFSET(channel_t, sched_heap_idx),
  546. chan);
  547. smartlist_pqueue_add(channels_pending,
  548. scheduler_compare_channels,
  549. STRUCT_OFFSET(channel_t, sched_heap_idx),
  550. chan);
  551. }
  552. /* else no-op, since it isn't in the queue */
  553. }
  554. /**
  555. * Notify the scheduler of a queue size adjustment, to recalculate the
  556. * queue heuristic.
  557. */
  558. void
  559. scheduler_adjust_queue_size(channel_t *chan, int dir, uint64_t adj)
  560. {
  561. time_t now = approx_time();
  562. log_debug(LD_SCHED,
  563. "Queue size adjustment by %s" U64_FORMAT " for channel "
  564. U64_FORMAT,
  565. (dir >= 0) ? "+" : "-",
  566. U64_PRINTF_ARG(adj),
  567. U64_PRINTF_ARG(chan->global_identifier));
  568. /* Get the queue heuristic up to date */
  569. scheduler_update_queue_heuristic(now);
  570. /* Adjust as appropriate */
  571. if (dir >= 0) {
  572. /* Increasing it */
  573. queue_heuristic += adj;
  574. } else {
  575. /* Decreasing it */
  576. if (queue_heuristic > adj) queue_heuristic -= adj;
  577. else queue_heuristic = 0;
  578. }
  579. log_debug(LD_SCHED,
  580. "Queue heuristic is now " U64_FORMAT,
  581. U64_PRINTF_ARG(queue_heuristic));
  582. }
  583. /**
  584. * Query the current value of the queue heuristic
  585. */
  586. STATIC uint64_t
  587. scheduler_get_queue_heuristic(void)
  588. {
  589. time_t now = approx_time();
  590. scheduler_update_queue_heuristic(now);
  591. return queue_heuristic;
  592. }
  593. /**
  594. * Adjust the queue heuristic value to the present time
  595. */
  596. STATIC void
  597. scheduler_update_queue_heuristic(time_t now)
  598. {
  599. time_t diff;
  600. if (queue_heuristic_timestamp == 0) {
  601. /*
  602. * Nothing we can sensibly do; must not have been initted properly.
  603. * Oh well.
  604. */
  605. queue_heuristic_timestamp = now;
  606. } else if (queue_heuristic_timestamp < now) {
  607. diff = now - queue_heuristic_timestamp;
  608. /*
  609. * This is a simple exponential age-out; the other proposed alternative
  610. * was a linear age-out using the bandwidth history in rephist.c; I'm
  611. * going with this out of concern that if an adversary can jam the
  612. * scheduler long enough, it would cause the bandwidth to drop to
  613. * zero and render the aging mechanism ineffective thereafter.
  614. */
  615. if (0 <= diff && diff < 64) queue_heuristic >>= diff;
  616. else queue_heuristic = 0;
  617. queue_heuristic_timestamp = now;
  618. log_debug(LD_SCHED,
  619. "Queue heuristic is now " U64_FORMAT,
  620. U64_PRINTF_ARG(queue_heuristic));
  621. }
  622. /* else no update needed, or time went backward */
  623. }
  624. /**
  625. * Set scheduler watermarks and flush size
  626. */
  627. void
  628. scheduler_set_watermarks(uint32_t lo, uint32_t hi, uint32_t max_flush)
  629. {
  630. /* Sanity assertions - caller should ensure these are true */
  631. tor_assert(lo > 0);
  632. tor_assert(hi > lo);
  633. tor_assert(max_flush > 0);
  634. sched_q_low_water = lo;
  635. sched_q_high_water = hi;
  636. sched_max_flush_cells = max_flush;
  637. }