scheduler.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. /* * Copyright (c) 2013, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file scheduler.c
  5. * \brief Relay scheduling system
  6. **/
  7. #include "or.h"
  8. #define TOR_CHANNEL_INTERNAL_ /* For channel_flush_some_cells() */
  9. #include "channel.h"
  10. #include "compat_libevent.h"
  11. #define SCHEDULER_PRIVATE_
  12. #include "scheduler.h"
  13. #ifdef HAVE_EVENT2_EVENT_H
  14. #include <event2/event.h>
  15. #else
  16. #include <event.h>
  17. #endif
  18. #define SCHED_Q_LOW_WATER 16384
  19. #define SCHED_Q_HIGH_WATER (2 * SCHED_Q_LOW_WATER)
  20. /*
  21. * Maximum cells to flush in a single call to channel_flush_some_cells();
  22. * setting this low means more calls, but too high and we could overshoot
  23. * SCHED_Q_HIGH_WATER.
  24. */
  25. #define SCHED_MAX_FLUSH_CELLS 16
  26. /*
  27. * Write scheduling works by keeping track of which channels can
  28. * accept cells, and have cells to write. From the scheduler's perspective,
  29. * a channel can be in four possible states:
  30. *
  31. * 1.) Not open for writes, no cells to send
  32. * - Not much to do here, and the channel will have scheduler_state ==
  33. * SCHED_CHAN_IDLE
  34. * - Transitions from:
  35. * - Open for writes/has cells by simultaneously draining all circuit
  36. * queues and filling the output buffer.
  37. * - Transitions to:
  38. * - Not open for writes/has cells by arrival of cells on an attached
  39. * circuit (this would be driven from append_cell_to_circuit_queue())
  40. * - Open for writes/no cells by a channel type specific path;
  41. * driven from connection_or_flushed_some() for channel_tls_t.
  42. *
  43. * 2.) Open for writes, no cells to send
  44. * - Not much here either; this will be the state an idle but open channel
  45. * can be expected to settle in. It will have scheduler_state ==
  46. * SCHED_CHAN_WAITING_FOR_CELLS
  47. * - Transitions from:
  48. * - Not open for writes/no cells by flushing some of the output
  49. * buffer.
  50. * - Open for writes/has cells by the scheduler moving cells from
  51. * circuit queues to channel output queue, but not having enough
  52. * to fill the output queue.
  53. * - Transitions to:
  54. * - Open for writes/has cells by arrival of new cells on an attached
  55. * circuit, in append_cell_to_circuit_queue()
  56. *
  57. * 3.) Not open for writes, cells to send
  58. * - This is the state of a busy circuit limited by output bandwidth;
  59. * cells have piled up in the circuit queues waiting to be relayed.
  60. * The channel will have scheduler_state == SCHED_CHAN_WAITING_TO_WRITE.
  61. * - Transitions from:
  62. * - Not open for writes/no cells by arrival of cells on an attached
  63. * circuit
  64. * - Open for writes/has cells by filling an output buffer without
  65. * draining all cells from attached circuits
  66. * - Transitions to:
  67. * - Opens for writes/has cells by draining some of the output buffer
  68. * via the connection_or_flushed_some() path (for channel_tls_t).
  69. *
  70. * 4.) Open for writes, cells to send
  71. * - This connection is ready to relay some cells and waiting for
  72. * the scheduler to choose it. The channel will have scheduler_state ==
  73. * SCHED_CHAN_PENDING.
  74. * - Transitions from:
  75. * - Not open for writes/has cells by the connection_or_flushed_some()
  76. * path
  77. * - Open for writes/no cells by the append_cell_to_circuit_queue()
  78. * path
  79. * - Transitions to:
  80. * - Not open for writes/no cells by draining all circuit queues and
  81. * simultaneously filling the output buffer.
  82. * - Not open for writes/has cells by writing enough cells to fill the
  83. * output buffer
  84. * - Open for writes/no cells by draining all attached circuit queues
  85. * without also filling the output buffer
  86. *
  87. * Other event-driven parts of the code move channels between these scheduling
  88. * states by calling scheduler functions; the scheduler only runs on open-for-
  89. * writes/has-cells channels and is the only path for those to transition to
  90. * other states. The scheduler_run() function gives us the opportunity to do
  91. * scheduling work, and is called from other scheduler functions whenever a
  92. * state transition occurs, and periodically from the main event loop.
  93. */
  94. /* Scheduler global data structures */
  95. /*
  96. * We keep a list of channels that are pending - i.e, have cells to write
  97. * and can accept them to send. The enum scheduler_state in channel_t
  98. * is reserved for our use.
  99. */
  100. /* Pqueue of channels that can write and have cells (pending work) */
  101. STATIC smartlist_t *channels_pending = NULL;
  102. /*
  103. * This event runs the scheduler from its callback, and is manually
  104. * activated whenever a channel enters open for writes/cells to send.
  105. */
  106. STATIC struct event *run_sched_ev = NULL;
  107. /*
  108. * Queue heuristic; this is not the queue size, but an 'effective queuesize'
  109. * that ages out contributions from stalled channels.
  110. */
  111. STATIC uint64_t queue_heuristic = 0;
  112. /*
  113. * Timestamp for last queue heuristic update
  114. */
  115. STATIC time_t queue_heuristic_timestamp = 0;
  116. /* Scheduler static function declarations */
  117. static void scheduler_evt_callback(evutil_socket_t fd,
  118. short events, void *arg);
  119. static int scheduler_more_work(void);
  120. static void scheduler_retrigger(void);
  121. #if 0
  122. static void scheduler_trigger(void);
  123. #endif
  124. /* Scheduler function implementations */
  125. /** Free everything and shut down the scheduling system */
  126. void
  127. scheduler_free_all(void)
  128. {
  129. log_debug(LD_SCHED, "Shutting down scheduler");
  130. if (run_sched_ev) {
  131. event_del(run_sched_ev);
  132. tor_event_free(run_sched_ev);
  133. run_sched_ev = NULL;
  134. }
  135. if (channels_pending) {
  136. smartlist_free(channels_pending);
  137. channels_pending = NULL;
  138. }
  139. }
  140. /**
  141. * Comparison function to use when sorting pending channels
  142. */
  143. STATIC int
  144. scheduler_compare_channels(const void *c1_v, const void *c2_v)
  145. {
  146. channel_t *c1 = NULL, *c2 = NULL;
  147. /* These are a workaround for -Wbad-function-cast throwing a fit */
  148. const circuitmux_policy_t *p1, *p2;
  149. uintptr_t p1_i, p2_i;
  150. tor_assert(c1_v);
  151. tor_assert(c2_v);
  152. c1 = (channel_t *)(c1_v);
  153. c2 = (channel_t *)(c2_v);
  154. tor_assert(c1);
  155. tor_assert(c2);
  156. if (c1 != c2) {
  157. if (circuitmux_get_policy(c1->cmux) ==
  158. circuitmux_get_policy(c2->cmux)) {
  159. /* Same cmux policy, so use the mux comparison */
  160. return circuitmux_compare_muxes(c1->cmux, c2->cmux);
  161. } else {
  162. /*
  163. * Different policies; not important to get this edge case perfect
  164. * because the current code never actually gives different channels
  165. * different cmux policies anyway. Just use this arbitrary but
  166. * definite choice.
  167. */
  168. p1 = circuitmux_get_policy(c1->cmux);
  169. p2 = circuitmux_get_policy(c2->cmux);
  170. p1_i = (uintptr_t)p1;
  171. p2_i = (uintptr_t)p2;
  172. return (p1_i < p2_i) ? -1 : 1;
  173. }
  174. } else {
  175. /* c1 == c2, so always equal */
  176. return 0;
  177. }
  178. }
  179. /*
  180. * Scheduler event callback; this should get triggered once per event loop
  181. * if any scheduling work was created during the event loop.
  182. */
  183. static void
  184. scheduler_evt_callback(evutil_socket_t fd, short events, void *arg)
  185. {
  186. (void)fd;
  187. (void)events;
  188. (void)arg;
  189. log_debug(LD_SCHED, "Scheduler event callback called");
  190. tor_assert(run_sched_ev);
  191. /* Run the scheduler */
  192. scheduler_run();
  193. /* Do we have more work to do? */
  194. if (scheduler_more_work()) scheduler_retrigger();
  195. }
  196. /** Mark a channel as no longer ready to accept writes */
  197. MOCK_IMPL(void,
  198. scheduler_channel_doesnt_want_writes,(channel_t *chan))
  199. {
  200. tor_assert(chan);
  201. tor_assert(channels_pending);
  202. /* If it's already in pending, we can put it in waiting_to_write */
  203. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  204. /*
  205. * It's in channels_pending, so it shouldn't be in any of
  206. * the other lists. It can't write any more, so it goes to
  207. * channels_waiting_to_write.
  208. */
  209. smartlist_pqueue_remove(channels_pending,
  210. scheduler_compare_channels,
  211. STRUCT_OFFSET(channel_t, sched_heap_idx),
  212. chan);
  213. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  214. log_debug(LD_SCHED,
  215. "Channel " U64_FORMAT " at %p went from pending "
  216. "to waiting_to_write",
  217. U64_PRINTF_ARG(chan->global_identifier), chan);
  218. } else {
  219. /*
  220. * It's not in pending, so it can't become waiting_to_write; it's
  221. * either not in any of the lists (nothing to do) or it's already in
  222. * waiting_for_cells (remove it, can't write any more).
  223. */
  224. if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
  225. chan->scheduler_state = SCHED_CHAN_IDLE;
  226. log_debug(LD_SCHED,
  227. "Channel " U64_FORMAT " at %p left waiting_for_cells",
  228. U64_PRINTF_ARG(chan->global_identifier), chan);
  229. }
  230. }
  231. }
  232. /** Mark a channel as having waiting cells */
  233. MOCK_IMPL(void,
  234. scheduler_channel_has_waiting_cells,(channel_t *chan))
  235. {
  236. int became_pending = 0;
  237. tor_assert(chan);
  238. tor_assert(channels_pending);
  239. /* First, check if this one also writeable */
  240. if (chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS) {
  241. /*
  242. * It's in channels_waiting_for_cells, so it shouldn't be in any of
  243. * the other lists. It has waiting cells now, so it goes to
  244. * channels_pending.
  245. */
  246. chan->scheduler_state = SCHED_CHAN_PENDING;
  247. smartlist_pqueue_add(channels_pending,
  248. scheduler_compare_channels,
  249. STRUCT_OFFSET(channel_t, sched_heap_idx),
  250. chan);
  251. log_debug(LD_SCHED,
  252. "Channel " U64_FORMAT " at %p went from waiting_for_cells "
  253. "to pending",
  254. U64_PRINTF_ARG(chan->global_identifier), chan);
  255. became_pending = 1;
  256. } else {
  257. /*
  258. * It's not in waiting_for_cells, so it can't become pending; it's
  259. * either not in any of the lists (we add it to waiting_to_write)
  260. * or it's already in waiting_to_write or pending (we do nothing)
  261. */
  262. if (!(chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE ||
  263. chan->scheduler_state == SCHED_CHAN_PENDING)) {
  264. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  265. log_debug(LD_SCHED,
  266. "Channel " U64_FORMAT " at %p entered waiting_to_write",
  267. U64_PRINTF_ARG(chan->global_identifier), chan);
  268. }
  269. }
  270. /*
  271. * If we made a channel pending, we potentially have scheduling work
  272. * to do.
  273. */
  274. if (became_pending) scheduler_retrigger();
  275. }
  276. /** Set up the scheduling system */
  277. void
  278. scheduler_init(void)
  279. {
  280. log_debug(LD_SCHED, "Initting scheduler");
  281. tor_assert(!run_sched_ev);
  282. run_sched_ev = tor_event_new(tor_libevent_get_base(), -1,
  283. 0, scheduler_evt_callback, NULL);
  284. channels_pending = smartlist_new();
  285. queue_heuristic = 0;
  286. queue_heuristic_timestamp = approx_time();
  287. }
  288. /** Check if there's more scheduling work */
  289. static int
  290. scheduler_more_work(void)
  291. {
  292. tor_assert(channels_pending);
  293. return ((scheduler_get_queue_heuristic() < SCHED_Q_LOW_WATER) &&
  294. ((smartlist_len(channels_pending) > 0))) ? 1 : 0;
  295. }
  296. /** Retrigger the scheduler in a way safe to use from the callback */
  297. static void
  298. scheduler_retrigger(void)
  299. {
  300. tor_assert(run_sched_ev);
  301. event_active(run_sched_ev, EV_TIMEOUT, 1);
  302. }
  303. /** Notify the scheduler of a channel being closed */
  304. MOCK_IMPL(void,
  305. scheduler_release_channel,(channel_t *chan))
  306. {
  307. tor_assert(chan);
  308. tor_assert(channels_pending);
  309. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  310. smartlist_pqueue_remove(channels_pending,
  311. scheduler_compare_channels,
  312. STRUCT_OFFSET(channel_t, sched_heap_idx),
  313. chan);
  314. }
  315. chan->scheduler_state = SCHED_CHAN_IDLE;
  316. }
  317. /** Run the scheduling algorithm if necessary */
  318. MOCK_IMPL(void,
  319. scheduler_run, (void))
  320. {
  321. int n_cells, n_chans_before, n_chans_after;
  322. uint64_t q_len_before, q_heur_before, q_len_after, q_heur_after;
  323. ssize_t flushed, flushed_this_time;
  324. smartlist_t *to_readd = NULL;
  325. channel_t *chan = NULL;
  326. log_debug(LD_SCHED, "We have a chance to run the scheduler");
  327. if (scheduler_get_queue_heuristic() < SCHED_Q_LOW_WATER) {
  328. n_chans_before = smartlist_len(channels_pending);
  329. q_len_before = channel_get_global_queue_estimate();
  330. q_heur_before = scheduler_get_queue_heuristic();
  331. while (scheduler_get_queue_heuristic() <= SCHED_Q_HIGH_WATER &&
  332. smartlist_len(channels_pending) > 0) {
  333. /* Pop off a channel */
  334. chan = smartlist_pqueue_pop(channels_pending,
  335. scheduler_compare_channels,
  336. STRUCT_OFFSET(channel_t, sched_heap_idx));
  337. tor_assert(chan);
  338. /* Figure out how many cells we can write */
  339. n_cells = channel_num_cells_writeable(chan);
  340. if (n_cells > 0) {
  341. log_debug(LD_SCHED,
  342. "Scheduler saw pending channel " U64_FORMAT " at %p with "
  343. "%d cells writeable",
  344. U64_PRINTF_ARG(chan->global_identifier), chan, n_cells);
  345. flushed = 0;
  346. while (flushed < n_cells &&
  347. scheduler_get_queue_heuristic() <= SCHED_Q_HIGH_WATER) {
  348. flushed_this_time =
  349. channel_flush_some_cells(chan,
  350. MIN(SCHED_MAX_FLUSH_CELLS,
  351. n_cells - flushed));
  352. if (flushed_this_time <= 0) break;
  353. flushed += flushed_this_time;
  354. }
  355. if (flushed < n_cells) {
  356. /* We ran out of cells to flush */
  357. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  358. log_debug(LD_SCHED,
  359. "Channel " U64_FORMAT " at %p "
  360. "entered waiting_for_cells from pending",
  361. U64_PRINTF_ARG(chan->global_identifier),
  362. chan);
  363. } else {
  364. /* The channel may still have some cells */
  365. if (channel_more_to_flush(chan)) {
  366. /* The channel goes to either pending or waiting_to_write */
  367. if (channel_num_cells_writeable(chan) > 0) {
  368. /* Add it back to pending later */
  369. if (!to_readd) to_readd = smartlist_new();
  370. smartlist_add(to_readd, chan);
  371. log_debug(LD_SCHED,
  372. "Channel " U64_FORMAT " at %p "
  373. "is still pending",
  374. U64_PRINTF_ARG(chan->global_identifier),
  375. chan);
  376. } else {
  377. /* It's waiting to be able to write more */
  378. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  379. log_debug(LD_SCHED,
  380. "Channel " U64_FORMAT " at %p "
  381. "entered waiting_to_write from pending",
  382. U64_PRINTF_ARG(chan->global_identifier),
  383. chan);
  384. }
  385. } else {
  386. /* No cells left; it can go to idle or waiting_for_cells */
  387. if (channel_num_cells_writeable(chan) > 0) {
  388. /*
  389. * It can still accept writes, so it goes to
  390. * waiting_for_cells
  391. */
  392. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  393. log_debug(LD_SCHED,
  394. "Channel " U64_FORMAT " at %p "
  395. "entered waiting_for_cells from pending",
  396. U64_PRINTF_ARG(chan->global_identifier),
  397. chan);
  398. } else {
  399. /*
  400. * We exactly filled up the output queue with all available
  401. * cells; go to idle.
  402. */
  403. chan->scheduler_state = SCHED_CHAN_IDLE;
  404. log_debug(LD_SCHED,
  405. "Channel " U64_FORMAT " at %p "
  406. "become idle from pending",
  407. U64_PRINTF_ARG(chan->global_identifier),
  408. chan);
  409. }
  410. }
  411. }
  412. log_debug(LD_SCHED,
  413. "Scheduler flushed %d cells onto pending channel "
  414. U64_FORMAT " at %p",
  415. (int)flushed, U64_PRINTF_ARG(chan->global_identifier),
  416. chan);
  417. } else {
  418. log_info(LD_SCHED,
  419. "Scheduler saw pending channel " U64_FORMAT " at %p with "
  420. "no cells writeable",
  421. U64_PRINTF_ARG(chan->global_identifier), chan);
  422. /* Put it back to WAITING_TO_WRITE */
  423. chan->scheduler_state = SCHED_CHAN_WAITING_TO_WRITE;
  424. }
  425. }
  426. /* Readd any channels we need to */
  427. if (to_readd) {
  428. SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, chan) {
  429. chan->scheduler_state = SCHED_CHAN_PENDING;
  430. smartlist_pqueue_add(channels_pending,
  431. scheduler_compare_channels,
  432. STRUCT_OFFSET(channel_t, sched_heap_idx),
  433. chan);
  434. } SMARTLIST_FOREACH_END(chan);
  435. smartlist_free(to_readd);
  436. }
  437. n_chans_after = smartlist_len(channels_pending);
  438. q_len_after = channel_get_global_queue_estimate();
  439. q_heur_after = scheduler_get_queue_heuristic();
  440. log_debug(LD_SCHED,
  441. "Scheduler handled %d of %d pending channels, queue size from "
  442. U64_FORMAT " to " U64_FORMAT ", queue heuristic from "
  443. U64_FORMAT " to " U64_FORMAT,
  444. n_chans_before - n_chans_after, n_chans_before,
  445. U64_PRINTF_ARG(q_len_before), U64_PRINTF_ARG(q_len_after),
  446. U64_PRINTF_ARG(q_heur_before), U64_PRINTF_ARG(q_heur_after));
  447. }
  448. }
  449. /** Trigger the scheduling event so we run the scheduler later */
  450. #if 0
  451. static void
  452. scheduler_trigger(void)
  453. {
  454. log_debug(LD_SCHED, "Triggering scheduler event");
  455. tor_assert(run_sched_ev);
  456. event_add(run_sched_ev, EV_TIMEOUT, 1);
  457. }
  458. #endif
  459. /** Mark a channel as ready to accept writes */
  460. void
  461. scheduler_channel_wants_writes(channel_t *chan)
  462. {
  463. int became_pending = 0;
  464. tor_assert(chan);
  465. tor_assert(channels_pending);
  466. /* If it's already in waiting_to_write, we can put it in pending */
  467. if (chan->scheduler_state == SCHED_CHAN_WAITING_TO_WRITE) {
  468. /*
  469. * It can write now, so it goes to channels_pending.
  470. */
  471. smartlist_pqueue_add(channels_pending,
  472. scheduler_compare_channels,
  473. STRUCT_OFFSET(channel_t, sched_heap_idx),
  474. chan);
  475. chan->scheduler_state = SCHED_CHAN_PENDING;
  476. log_debug(LD_SCHED,
  477. "Channel " U64_FORMAT " at %p went from waiting_to_write "
  478. "to pending",
  479. U64_PRINTF_ARG(chan->global_identifier), chan);
  480. became_pending = 1;
  481. } else {
  482. /*
  483. * It's not in SCHED_CHAN_WAITING_TO_WRITE, so it can't become pending;
  484. * it's either idle and goes to WAITING_FOR_CELLS, or it's a no-op.
  485. */
  486. if (!(chan->scheduler_state == SCHED_CHAN_WAITING_FOR_CELLS ||
  487. chan->scheduler_state == SCHED_CHAN_PENDING)) {
  488. chan->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  489. log_debug(LD_SCHED,
  490. "Channel " U64_FORMAT " at %p entered waiting_for_cells",
  491. U64_PRINTF_ARG(chan->global_identifier), chan);
  492. }
  493. }
  494. /*
  495. * If we made a channel pending, we potentially have scheduling work
  496. * to do.
  497. */
  498. if (became_pending) scheduler_retrigger();
  499. }
  500. /**
  501. * Notify the scheduler that a channel's position in the pqueue may have
  502. * changed
  503. */
  504. void
  505. scheduler_touch_channel(channel_t *chan)
  506. {
  507. tor_assert(chan);
  508. if (chan->scheduler_state == SCHED_CHAN_PENDING) {
  509. /* Remove and re-add it */
  510. smartlist_pqueue_remove(channels_pending,
  511. scheduler_compare_channels,
  512. STRUCT_OFFSET(channel_t, sched_heap_idx),
  513. chan);
  514. smartlist_pqueue_add(channels_pending,
  515. scheduler_compare_channels,
  516. STRUCT_OFFSET(channel_t, sched_heap_idx),
  517. chan);
  518. }
  519. /* else no-op, since it isn't in the queue */
  520. }
  521. /**
  522. * Notify the scheduler of a queue size adjustment, to recalculate the
  523. * queue heuristic.
  524. */
  525. void
  526. scheduler_adjust_queue_size(channel_t *chan, char dir, uint64_t adj)
  527. {
  528. time_t now = approx_time();
  529. log_debug(LD_SCHED,
  530. "Queue size adjustment by %s" U64_FORMAT " for channel "
  531. U64_FORMAT,
  532. (dir >= 0) ? "+" : "-",
  533. U64_PRINTF_ARG(adj),
  534. U64_PRINTF_ARG(chan->global_identifier));
  535. /* Get the queue heuristic up to date */
  536. scheduler_update_queue_heuristic(now);
  537. /* Adjust as appropriate */
  538. if (dir >= 0) {
  539. /* Increasing it */
  540. queue_heuristic += adj;
  541. } else {
  542. /* Decreasing it */
  543. if (queue_heuristic > adj) queue_heuristic -= adj;
  544. else queue_heuristic = 0;
  545. }
  546. log_debug(LD_SCHED,
  547. "Queue heuristic is now " U64_FORMAT,
  548. U64_PRINTF_ARG(queue_heuristic));
  549. }
  550. /**
  551. * Query the current value of the queue heuristic
  552. */
  553. STATIC uint64_t
  554. scheduler_get_queue_heuristic(void)
  555. {
  556. time_t now = approx_time();
  557. scheduler_update_queue_heuristic(now);
  558. return queue_heuristic;
  559. }
  560. /**
  561. * Adjust the queue heuristic value to the present time
  562. */
  563. STATIC void
  564. scheduler_update_queue_heuristic(time_t now)
  565. {
  566. time_t diff;
  567. if (queue_heuristic_timestamp == 0) {
  568. /*
  569. * Nothing we can sensibly do; must not have been initted properly.
  570. * Oh well.
  571. */
  572. queue_heuristic_timestamp = now;
  573. } else if (queue_heuristic_timestamp < now) {
  574. diff = now - queue_heuristic_timestamp;
  575. /*
  576. * This is a simple exponential age-out; the other proposed alternative
  577. * was a linear age-out using the bandwidth history in rephist.c; I'm
  578. * going with this out of concern that if an adversary can jam the
  579. * scheduler long enough, it would cause the bandwidth to drop to
  580. * zero and render the aging mechanism ineffective thereafter.
  581. */
  582. if (0 <= diff && diff < 64) queue_heuristic >>= diff;
  583. else queue_heuristic = 0;
  584. queue_heuristic_timestamp = now;
  585. log_debug(LD_SCHED,
  586. "Queue heuristic is now " U64_FORMAT,
  587. U64_PRINTF_ARG(queue_heuristic));
  588. }
  589. /* else no update needed, or time went backward */
  590. }