scheduler.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /* * Copyright (c) 2013-2015, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file scheduler.c
  5. * \brief Relay scheduling system
  6. **/
  7. #include "or.h"
  8. #define TOR_CHANNEL_INTERNAL_ /* For channel_flush_some_cells() */
  9. #include "channel.h"
  10. #include "compat_libevent.h"
  11. #define SCHEDULER_PRIVATE_
  12. #include "scheduler.h"
  13. #ifdef HAVE_EVENT2_EVENT_H
  14. #include <event2/event.h>
  15. #else
  16. #include <event.h>
  17. #endif
  18. /*
  19. * Scheduler high/low watermarks
  20. */
  21. static uint32_t sched_q_low_water = 16384;
  22. static uint32_t sched_q_high_water = 32768;
  23. /*
  24. * Maximum cells to flush in a single call to channel_flush_some_cells();
  25. * setting this low means more calls, but too high and we could overshoot
  26. * sched_q_high_water.
  27. */
  28. static uint32_t sched_max_flush_cells = 16;
  29. /*
  30. * Write scheduling works by keeping track of which channels can
  31. * accept cells, and have cells to write. From the scheduler's perspective,
  32. * a channel can be in four possible states:
  33. *
  34. * 1.) Not open for writes, no cells to send
  35. * - Not much to do here, and the channel will have scheduler_state ==
  36. * SCHED_CHAN_IDLE
  37. * - Transitions from:
  38. * - Open for writes/has cells by simultaneously draining all circuit
  39. * queues and filling the output buffer.
  40. * - Transitions to:
  41. * - Not open for writes/has cells by arrival of cells on an attached
  42. * circuit (this would be driven from append_cell_to_circuit_queue())
  43. * - Open for writes/no cells by a channel type specific path;
  44. * driven from connection_or_flushed_some() for channel_tls_t.
  45. *
  46. * 2.) Open for writes, no cells to send
  47. * - Not much here either; this will be the state an idle but open channel
  48. * can be expected to settle in. It will have scheduler_state ==
  49. * SCHED_CHAN_WAITING_FOR_CELLS
  50. * - Transitions from:
  51. * - Not open for writes/no cells by flushing some of the output
  52. * buffer.
  53. * - Open for writes/has cells by the scheduler moving cells from
  54. * circuit queues to channel output queue, but not having enough
  55. * to fill the output queue.
  56. * - Transitions to:
  57. * - Open for writes/has cells by arrival of new cells on an attached
  58. * circuit, in append_cell_to_circuit_queue()
  59. *
  60. * 3.) Not open for writes, cells to send
  61. * - This is the state of a busy circuit limited by output bandwidth;
  62. * cells have piled up in the circuit queues waiting to be relayed.
  63. * The channel will have scheduler_state == SCHED_CHAN_WAITING_TO_WRITE.
  64. * - Transitions from:
  65. * - Not open for writes/no cells by arrival of cells on an attached
  66. * circuit
  67. * - Open for writes/has cells by filling an output buffer without
  68. * draining all cells from attached circuits
  69. * - Transitions to:
  70. * - Opens for writes/has cells by draining some of the output buffer
  71. * via the connection_or_flushed_some() path (for channel_tls_t).
  72. *
  73. * 4.) Open for writes, cells to send
  74. * - This connection is ready to relay some cells and waiting for
  75. * the scheduler to choose it. The channel will have scheduler_state ==
  76. * SCHED_CHAN_PENDING.
  77. * - Transitions from:
  78. * - Not open for writes/has cells by the connection_or_flushed_some()
  79. * path
  80. * - Open for writes/no cells by the append_cell_to_circuit_queue()
  81. * path
  82. * - Transitions to:
  83. * - Not open for writes/no cells by draining all circuit queues and
  84. * simultaneously filling the output buffer.
  85. * - Not open for writes/has cells by writing enough cells to fill the
  86. * output buffer
  87. * - Open for writes/no cells by draining all attached circuit queues
  88. * without also filling the output buffer
  89. *
  90. * Other event-driven parts of the code move channels between these scheduling
  91. * states by calling scheduler functions; the scheduler only runs on open-for-
  92. * writes/has-cells channels and is the only path for those to transition to
  93. * other states. The scheduler_run() function gives us the opportunity to do
  94. * scheduling work, and is called from other scheduler functions whenever a
  95. * state transition occurs, and periodically from the main event loop.
  96. */
  97. /* Scheduler global data structures */
  98. /*
  99. * We keep a list of channels that are pending - i.e, have cells to write
  100. * and can accept them to send. The enum scheduler_state in channel_t
  101. * is reserved for our use.
  102. */
  103. /* Pqueue of channels that can write and have cells (pending work) */
  104. STATIC smartlist_t *channels_pending = NULL;
  105. /*
  106. * This event runs the scheduler from its callback, and is manually
  107. * activated whenever a channel enters open for writes/cells to send.
  108. */
  109. STATIC struct event *run_sched_ev = NULL;
  110. /*
  111. * Queue heuristic; this is not the queue size, but an 'effective queuesize'
  112. * that ages out contributions from stalled channels.
  113. */
  114. STATIC uint64_t queue_heuristic = 0;
  115. /*
  116. * Timestamp for last queue heuristic update
  117. */
  118. STATIC time_t queue_heuristic_timestamp = 0;
  119. /* Scheduler static function declarations */
  120. static void scheduler_evt_callback(evutil_socket_t fd,
  121. short events, void *arg);
  122. static int scheduler_more_work(void);
  123. static void scheduler_retrigger(void);
  124. #if 0
  125. static void scheduler_trigger(void);
  126. #endif
  127. /* Scheduler function implementations */
  128. /** Free everything and shut down the scheduling system */
  129. void
  130. scheduler_free_all(void)
  131. {
  132. log_debug(LD_SCHED, "Shutting down scheduler");
  133. if (run_sched_ev) {
  134. if (event_del(run_sched_ev) < 0) {
  135. log_warn(LD_BUG, "Problem deleting run_sched_ev");
  136. }
  137. tor_event_free(run_sched_ev);
  138. run_sched_ev = NULL;
  139. }
  140. if (channels_pending) {
  141. smartlist_free(channels_pending);
  142. channels_pending = NULL;
  143. }
  144. }
  145. /**
  146. * Comparison function to use when sorting pending channels
  147. */
  148. MOCK_IMPL(STATIC int,
  149. scheduler_compare_channels, (const void *c1_v, const void *c2_v))
  150. {
  151. channel_t *c1 = NULL, *c2 = NULL;
  152. /* These are a workaround for -Wbad-function-cast throwing a fit */
  153. const circuitmux_policy_t *p1, *p2;
  154. uintptr_t p1_i, p2_i;
  155. tor_assert(c1_v);
  156. tor_assert(c2_v);
  157. c1 = (channel_t *)(c1_v);
  158. c2 = (channel_t *)(c2_v);
  159. tor_assert(c1);
  160. tor_assert(c2);
  161. if (c1 != c2) {
  162. if (circuitmux_get_policy(c1->cmux) ==
  163. circuitmux_get_policy(c2->cmux)) {
  164. /* Same cmux policy, so use the mux comparison */
  165. return circuitmux_compare_muxes(c1->cmux, c2->cmux);
  166. } else {
  167. /*
  168. * Different policies; not important to get this edge case perfect
  169. * because the current code never actually gives different channels
  170. * different cmux policies anyway. Just use this arbitrary but
  171. * definite choice.
  172. */
  173. p1 = circuitmux_get_policy(c1->cmux);
  174. p2 = circuitmux_get_policy(c2->cmux);
  175. p1_i = (uintptr_t)p1;
  176. p2_i = (uintptr_t)p2;
  177. return (p1_i < p2_i) ? -1 : 1;
  178. }
  179. } else {
  180. /* c1 == c2, so always equal */
  181. return 0;
  182. }
  183. }
  184. /*
  185. * Scheduler event callback; this should get triggered once per event loop
  186. * if any scheduling work was created during the event loop.
  187. */
  188. static void
  189. scheduler_evt_callback(evutil_socket_t fd, short events, void *arg)
  190. {
  191. (void)fd;
  192. (void)events;
  193. (void)arg;
  194. log_debug(LD_SCHED, "Scheduler event callback called");
  195. tor_assert(run_sched_ev);
  196. /* Run the scheduler */
  197. scheduler_run();
  198. /* Do we have more work to do? */
  199. if (scheduler_more_work()) scheduler_retrigger();
  200. }
  201. /** Mark a channel as no longer ready to accept writes */
  202. MOCK_IMPL(void,
  203. scheduler_channel_doesnt_want_writes,(channel_t *chan))
  204. {
  205. tor_assert(chan);
  206. tor_assert(channels_pending);
  207. /* If it's already in pending, we can put it in waiting_to_write */
  208. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  209. /*
  210. * It's in channels_pending, so it shouldn't be in any of
  211. * the other lists. It can't write any more, so it goes to
  212. * channels_waiting_to_write.
  213. */
  214. smartlist_pqueue_remove(channels_pending,
  215. scheduler_compare_channels,
  216. STRUCT_OFFSET(channel_t, sched_heap_idx),
  217. chan);
  218. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  219. log_debug(LD_SCHED,
  220. "Channel " U64_FORMAT " at %p went from pending "
  221. "to waiting_to_write",
  222. U64_PRINTF_ARG(chan->global_identifier), chan);
  223. } else {
  224. /*
  225. * It's not in pending, so it can't become waiting_to_write; it's
  226. * either not in any of the lists (nothing to do) or it's already in
  227. * waiting_for_cells (remove it, can't write any more).
  228. */
  229. if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
  230. chan->scheduler_state = SCHED_CHAN_IDLE;
  231. log_debug(LD_SCHED,
  232. "Channel " U64_FORMAT " at %p left waiting_for_cells",
  233. U64_PRINTF_ARG(chan->global_identifier), chan);
  234. }
  235. }
  236. }
  237. /** Mark a channel as having waiting cells */
  238. MOCK_IMPL(void,
  239. scheduler_channel_has_waiting_cells,(channel_t *chan))
  240. {
  241. int became_pending = 0;
  242. tor_assert(chan);
  243. tor_assert(channels_pending);
  244. /* First, check if this one also writeable */
  245. if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
  246. /*
  247. * It's in channels_waiting_for_cells, so it shouldn't be in any of
  248. * the other lists. It has waiting cells now, so it goes to
  249. * channels_pending.
  250. */
  251. chan->scheduler_state = SCHED_CHAN_PENDING;
  252. smartlist_pqueue_add(channels_pending,
  253. scheduler_compare_channels,
  254. STRUCT_OFFSET(channel_t, sched_heap_idx),
  255. chan);
  256. log_debug(LD_SCHED,
  257. "Channel " U64_FORMAT " at %p went from waiting_for_cells "
  258. "to pending",
  259. U64_PRINTF_ARG(chan->global_identifier), chan);
  260. became_pending = 1;
  261. } else {
  262. /*
  263. * It's not in waiting_for_cells, so it can't become pending; it's
  264. * either not in any of the lists (we add it to waiting_to_write)
  265. * or it's already in waiting_to_write or pending (we do nothing)
  266. */
  267. if (!(chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE ||
  268. chan->scheduler_state == SCHED_CHAN_PENDING)) {
  269. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  270. log_debug(LD_SCHED,
  271. "Channel " U64_FORMAT " at %p entered waiting_to_write",
  272. U64_PRINTF_ARG(chan->global_identifier), chan);
  273. }
  274. }
  275. /*
  276. * If we made a channel pending, we potentially have scheduling work
  277. * to do.
  278. */
  279. if (became_pending) scheduler_retrigger();
  280. }
  281. /** Set up the scheduling system */
  282. void
  283. scheduler_init(void)
  284. {
  285. log_debug(LD_SCHED, "Initting scheduler");
  286. tor_assert(!run_sched_ev);
  287. run_sched_ev = tor_event_new(tor_libevent_get_base(), -1,
  288. 0, scheduler_evt_callback, NULL);
  289. channels_pending = smartlist_new();
  290. queue_heuristic = 0;
  291. queue_heuristic_timestamp = approx_time();
  292. }
  293. /** Check if there's more scheduling work */
  294. static int
  295. scheduler_more_work(void)
  296. {
  297. tor_assert(channels_pending);
  298. return ((scheduler_get_queue_heuristic() < sched_q_low_water) &&
  299. ((smartlist_len(channels_pending) > 0))) ? 1 : 0;
  300. }
  301. /** Retrigger the scheduler in a way safe to use from the callback */
  302. static void
  303. scheduler_retrigger(void)
  304. {
  305. tor_assert(run_sched_ev);
  306. event_active(run_sched_ev, EV_TIMEOUT, 1);
  307. }
  308. /** Notify the scheduler of a channel being closed */
  309. MOCK_IMPL(void,
  310. scheduler_release_channel,(channel_t *chan))
  311. {
  312. tor_assert(chan);
  313. tor_assert(channels_pending);
  314. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  315. smartlist_pqueue_remove(channels_pending,
  316. scheduler_compare_channels,
  317. STRUCT_OFFSET(channel_t, sched_heap_idx),
  318. chan);
  319. }
  320. chan->scheduler_state = SCHED_CHAN_IDLE;
  321. }
  322. /** Run the scheduling algorithm if necessary */
  323. MOCK_IMPL(void,
  324. scheduler_run, (void))
  325. {
  326. int n_cells, n_chans_before, n_chans_after;
  327. uint64_t q_len_before, q_heur_before, q_len_after, q_heur_after;
  328. ssize_t flushed, flushed_this_time;
  329. smartlist_t *to_readd = NULL;
  330. channel_t *chan = NULL;
  331. log_debug(LD_SCHED, "We have a chance to run the scheduler");
  332. if (scheduler_get_queue_heuristic() < sched_q_low_water) {
  333. n_chans_before = smartlist_len(channels_pending);
  334. q_len_before = channel_get_global_queue_estimate();
  335. q_heur_before = scheduler_get_queue_heuristic();
  336. while (scheduler_get_queue_heuristic() <= sched_q_high_water &&
  337. smartlist_len(channels_pending) > 0) {
  338. /* Pop off a channel */
  339. chan = smartlist_pqueue_pop(channels_pending,
  340. scheduler_compare_channels,
  341. STRUCT_OFFSET(channel_t, sched_heap_idx));
  342. tor_assert(chan);
  343. /* Figure out how many cells we can write */
  344. n_cells = channel_num_cells_writeable(chan);
  345. if (n_cells > 0) {
  346. log_debug(LD_SCHED,
  347. "Scheduler saw pending channel " U64_FORMAT " at %p with "
  348. "%d cells writeable",
  349. U64_PRINTF_ARG(chan->global_identifier), chan, n_cells);
  350. flushed = 0;
  351. while (flushed < n_cells &&
  352. scheduler_get_queue_heuristic() <= sched_q_high_water) {
  353. flushed_this_time =
  354. channel_flush_some_cells(chan,
  355. MIN(sched_max_flush_cells,
  356. (size_t) n_cells - flushed));
  357. if (flushed_this_time <= 0) break;
  358. flushed += flushed_this_time;
  359. }
  360. if (flushed < n_cells) {
  361. /* We ran out of cells to flush */
  362. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  363. log_debug(LD_SCHED,
  364. "Channel " U64_FORMAT " at %p "
  365. "entered waiting_for_cells from pending",
  366. U64_PRINTF_ARG(chan->global_identifier),
  367. chan);
  368. } else {
  369. /* The channel may still have some cells */
  370. if (channel_more_to_flush(chan)) {
  371. /* The channel goes to either pending or waiting_to_write */
  372. if (channel_num_cells_writeable(chan) > 0) {
  373. /* Add it back to pending later */
  374. if (!to_readd) to_readd = smartlist_new();
  375. smartlist_add(to_readd, chan);
  376. log_debug(LD_SCHED,
  377. "Channel " U64_FORMAT " at %p "
  378. "is still pending",
  379. U64_PRINTF_ARG(chan->global_identifier),
  380. chan);
  381. } else {
  382. /* It's waiting to be able to write more */
  383. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  384. log_debug(LD_SCHED,
  385. "Channel " U64_FORMAT " at %p "
  386. "entered waiting_to_write from pending",
  387. U64_PRINTF_ARG(chan->global_identifier),
  388. chan);
  389. }
  390. } else {
  391. /* No cells left; it can go to idle or waiting_for_cells */
  392. if (channel_num_cells_writeable(chan) > 0) {
  393. /*
  394. * It can still accept writes, so it goes to
  395. * waiting_for_cells
  396. */
  397. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  398. log_debug(LD_SCHED,
  399. "Channel " U64_FORMAT " at %p "
  400. "entered waiting_for_cells from pending",
  401. U64_PRINTF_ARG(chan->global_identifier),
  402. chan);
  403. } else {
  404. /*
  405. * We exactly filled up the output queue with all available
  406. * cells; go to idle.
  407. */
  408. chan->scheduler_state = SCHED_CHAN_IDLE;
  409. log_debug(LD_SCHED,
  410. "Channel " U64_FORMAT " at %p "
  411. "become idle from pending",
  412. U64_PRINTF_ARG(chan->global_identifier),
  413. chan);
  414. }
  415. }
  416. }
  417. log_debug(LD_SCHED,
  418. "Scheduler flushed %d cells onto pending channel "
  419. U64_FORMAT " at %p",
  420. (int)flushed, U64_PRINTF_ARG(chan->global_identifier),
  421. chan);
  422. } else {
  423. log_info(LD_SCHED,
  424. "Scheduler saw pending channel " U64_FORMAT " at %p with "
  425. "no cells writeable",
  426. U64_PRINTF_ARG(chan->global_identifier), chan);
  427. /* Put it back to WAITING_TO_WRITE */
  428. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  429. }
  430. }
  431. /* Readd any channels we need to */
  432. if (to_readd) {
  433. SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, chan) {
  434. chan->scheduler_state = SCHED_CHAN_PENDING;
  435. smartlist_pqueue_add(channels_pending,
  436. scheduler_compare_channels,
  437. STRUCT_OFFSET(channel_t, sched_heap_idx),
  438. chan);
  439. } SMARTLIST_FOREACH_END(chan);
  440. smartlist_free(to_readd);
  441. }
  442. n_chans_after = smartlist_len(channels_pending);
  443. q_len_after = channel_get_global_queue_estimate();
  444. q_heur_after = scheduler_get_queue_heuristic();
  445. log_debug(LD_SCHED,
  446. "Scheduler handled %d of %d pending channels, queue size from "
  447. U64_FORMAT " to " U64_FORMAT ", queue heuristic from "
  448. U64_FORMAT " to " U64_FORMAT,
  449. n_chans_before - n_chans_after, n_chans_before,
  450. U64_PRINTF_ARG(q_len_before), U64_PRINTF_ARG(q_len_after),
  451. U64_PRINTF_ARG(q_heur_before), U64_PRINTF_ARG(q_heur_after));
  452. }
  453. }
  454. /** Trigger the scheduling event so we run the scheduler later */
  455. #if 0
  456. static void
  457. scheduler_trigger(void)
  458. {
  459. log_debug(LD_SCHED, "Triggering scheduler event");
  460. tor_assert(run_sched_ev);
  461. event_add(run_sched_ev, EV_TIMEOUT, 1);
  462. }
  463. #endif
  464. /** Mark a channel as ready to accept writes */
  465. void
  466. scheduler_channel_wants_writes(channel_t *chan)
  467. {
  468. int became_pending = 0;
  469. tor_assert(chan);
  470. tor_assert(channels_pending);
  471. /* If it's already in waiting_to_write, we can put it in pending */
  472. if (chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE) {
  473. /*
  474. * It can write now, so it goes to channels_pending.
  475. */
  476. smartlist_pqueue_add(channels_pending,
  477. scheduler_compare_channels,
  478. STRUCT_OFFSET(channel_t, sched_heap_idx),
  479. chan);
  480. chan->scheduler_state = SCHED_CHAN_PENDING;
  481. log_debug(LD_SCHED,
  482. "Channel " U64_FORMAT " at %p went from waiting_to_write "
  483. "to pending",
  484. U64_PRINTF_ARG(chan->global_identifier), chan);
  485. became_pending = 1;
  486. } else {
  487. /*
  488. * It's not in SCHED_CHAN_WAITING_TO_WRITE, so it can't become pending;
  489. * it's either idle and goes to WAITING_FOR_CELLS, or it's a no-op.
  490. */
  491. if (!(chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS ||
  492. chan->scheduler_state == SCHED_CHAN_PENDING)) {
  493. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  494. log_debug(LD_SCHED,
  495. "Channel " U64_FORMAT " at %p entered waiting_for_cells",
  496. U64_PRINTF_ARG(chan->global_identifier), chan);
  497. }
  498. }
  499. /*
  500. * If we made a channel pending, we potentially have scheduling work
  501. * to do.
  502. */
  503. if (became_pending) scheduler_retrigger();
  504. }
  505. /**
  506. * Notify the scheduler that a channel's position in the pqueue may have
  507. * changed
  508. */
  509. void
  510. scheduler_touch_channel(channel_t *chan)
  511. {
  512. tor_assert(chan);
  513. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  514. /* Remove and re-add it */
  515. smartlist_pqueue_remove(channels_pending,
  516. scheduler_compare_channels,
  517. STRUCT_OFFSET(channel_t, sched_heap_idx),
  518. chan);
  519. smartlist_pqueue_add(channels_pending,
  520. scheduler_compare_channels,
  521. STRUCT_OFFSET(channel_t, sched_heap_idx),
  522. chan);
  523. }
  524. /* else no-op, since it isn't in the queue */
  525. }
  526. /**
  527. * Notify the scheduler of a queue size adjustment, to recalculate the
  528. * queue heuristic.
  529. */
  530. void
  531. scheduler_adjust_queue_size(channel_t *chan, int dir, uint64_t adj)
  532. {
  533. time_t now = approx_time();
  534. log_debug(LD_SCHED,
  535. "Queue size adjustment by %s" U64_FORMAT " for channel "
  536. U64_FORMAT,
  537. (dir >= 0) ? "+" : "-",
  538. U64_PRINTF_ARG(adj),
  539. U64_PRINTF_ARG(chan->global_identifier));
  540. /* Get the queue heuristic up to date */
  541. scheduler_update_queue_heuristic(now);
  542. /* Adjust as appropriate */
  543. if (dir >= 0) {
  544. /* Increasing it */
  545. queue_heuristic += adj;
  546. } else {
  547. /* Decreasing it */
  548. if (queue_heuristic > adj) queue_heuristic -= adj;
  549. else queue_heuristic = 0;
  550. }
  551. log_debug(LD_SCHED,
  552. "Queue heuristic is now " U64_FORMAT,
  553. U64_PRINTF_ARG(queue_heuristic));
  554. }
  555. /**
  556. * Query the current value of the queue heuristic
  557. */
  558. STATIC uint64_t
  559. scheduler_get_queue_heuristic(void)
  560. {
  561. time_t now = approx_time();
  562. scheduler_update_queue_heuristic(now);
  563. return queue_heuristic;
  564. }
  565. /**
  566. * Adjust the queue heuristic value to the present time
  567. */
  568. STATIC void
  569. scheduler_update_queue_heuristic(time_t now)
  570. {
  571. time_t diff;
  572. if (queue_heuristic_timestamp == 0) {
  573. /*
  574. * Nothing we can sensibly do; must not have been initted properly.
  575. * Oh well.
  576. */
  577. queue_heuristic_timestamp = now;
  578. } else if (queue_heuristic_timestamp < now) {
  579. diff = now - queue_heuristic_timestamp;
  580. /*
  581. * This is a simple exponential age-out; the other proposed alternative
  582. * was a linear age-out using the bandwidth history in rephist.c; I'm
  583. * going with this out of concern that if an adversary can jam the
  584. * scheduler long enough, it would cause the bandwidth to drop to
  585. * zero and render the aging mechanism ineffective thereafter.
  586. */
  587. if (0 <= diff && diff < 64) queue_heuristic >>= diff;
  588. else queue_heuristic = 0;
  589. queue_heuristic_timestamp = now;
  590. log_debug(LD_SCHED,
  591. "Queue heuristic is now " U64_FORMAT,
  592. U64_PRINTF_ARG(queue_heuristic));
  593. }
  594. /* else no update needed, or time went backward */
  595. }
  596. /**
  597. * Set scheduler watermarks and flush size
  598. */
  599. void
  600. scheduler_set_watermarks(uint32_t lo, uint32_t hi, uint32_t max_flush)
  601. {
  602. /* Sanity assertions - caller should ensure these are true */
  603. tor_assert(lo > 0);
  604. tor_assert(hi > lo);
  605. tor_assert(max_flush > 0);
  606. sched_q_low_water = lo;
  607. sched_q_high_water = hi;
  608. sched_max_flush_cells = max_flush;
  609. }