scheduler_vanilla.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /* Copyright (c) 2017, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #include <event2/event.h>
  4. #include "or.h"
  5. #include "config.h"
  6. #define TOR_CHANNEL_INTERNAL_
  7. #include "channel.h"
  8. #define SCHEDULER_PRIVATE_
  9. #include "scheduler.h"
  10. /*****************************************************************************
  11. * Other internal data
  12. *****************************************************************************/
  13. /* Maximum cells to flush in a single call to channel_flush_some_cells(); */
  14. #define MAX_FLUSH_CELLS 1000
  15. /*****************************************************************************
  16. * Externally called function implementations
  17. *****************************************************************************/
  18. /* Return true iff the scheduler has work to perform. */
  19. static int
  20. have_work(void)
  21. {
  22. smartlist_t *cp = get_channels_pending();
  23. IF_BUG_ONCE(!cp) {
  24. return 0; // channels_pending doesn't exist so... no work?
  25. }
  26. return smartlist_len(cp) > 0;
  27. }
  28. /** Re-trigger the scheduler in a way safe to use from the callback */
  29. static void
  30. vanilla_scheduler_schedule(void)
  31. {
  32. if (!have_work()) {
  33. return;
  34. }
  35. /* Activate our event so it can process channels. */
  36. scheduler_ev_active(EV_TIMEOUT);
  37. }
  38. static void
  39. vanilla_scheduler_run(void)
  40. {
  41. int n_cells, n_chans_before, n_chans_after;
  42. ssize_t flushed, flushed_this_time;
  43. smartlist_t *cp = get_channels_pending();
  44. smartlist_t *to_readd = NULL;
  45. channel_t *chan = NULL;
  46. log_debug(LD_SCHED, "We have a chance to run the scheduler");
  47. n_chans_before = smartlist_len(cp);
  48. while (smartlist_len(cp) > 0) {
  49. /* Pop off a channel */
  50. chan = smartlist_pqueue_pop(cp,
  51. scheduler_compare_channels,
  52. offsetof(channel_t, sched_heap_idx));
  53. IF_BUG_ONCE(!chan) {
  54. /* Some-freaking-how a NULL got into the channels_pending. That should
  55. * never happen, but it should be harmless to ignore it and keep looping.
  56. */
  57. continue;
  58. }
  59. /* Figure out how many cells we can write */
  60. n_cells = channel_num_cells_writeable(chan);
  61. if (n_cells > 0) {
  62. log_debug(LD_SCHED,
  63. "Scheduler saw pending channel " U64_FORMAT " at %p with "
  64. "%d cells writeable",
  65. U64_PRINTF_ARG(chan->global_identifier), chan, n_cells);
  66. flushed = 0;
  67. while (flushed < n_cells) {
  68. flushed_this_time =
  69. channel_flush_some_cells(chan,
  70. MIN(MAX_FLUSH_CELLS, (size_t) n_cells - flushed));
  71. if (flushed_this_time <= 0) break;
  72. flushed += flushed_this_time;
  73. }
  74. if (flushed < n_cells) {
  75. /* We ran out of cells to flush */
  76. scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
  77. } else {
  78. /* The channel may still have some cells */
  79. if (channel_more_to_flush(chan)) {
  80. /* The channel goes to either pending or waiting_to_write */
  81. if (channel_num_cells_writeable(chan) > 0) {
  82. /* Add it back to pending later */
  83. if (!to_readd) to_readd = smartlist_new();
  84. smartlist_add(to_readd, chan);
  85. log_debug(LD_SCHED,
  86. "Channel " U64_FORMAT " at %p "
  87. "is still pending",
  88. U64_PRINTF_ARG(chan->global_identifier),
  89. chan);
  90. } else {
  91. /* It's waiting to be able to write more */
  92. scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
  93. }
  94. } else {
  95. /* No cells left; it can go to idle or waiting_for_cells */
  96. if (channel_num_cells_writeable(chan) > 0) {
  97. /*
  98. * It can still accept writes, so it goes to
  99. * waiting_for_cells
  100. */
  101. scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_FOR_CELLS);
  102. } else {
  103. /*
  104. * We exactly filled up the output queue with all available
  105. * cells; go to idle.
  106. */
  107. scheduler_set_channel_state(chan, SCHED_CHAN_IDLE);
  108. }
  109. }
  110. }
  111. log_debug(LD_SCHED,
  112. "Scheduler flushed %d cells onto pending channel "
  113. U64_FORMAT " at %p",
  114. (int)flushed, U64_PRINTF_ARG(chan->global_identifier),
  115. chan);
  116. } else {
  117. log_info(LD_SCHED,
  118. "Scheduler saw pending channel " U64_FORMAT " at %p with "
  119. "no cells writeable",
  120. U64_PRINTF_ARG(chan->global_identifier), chan);
  121. /* Put it back to WAITING_TO_WRITE */
  122. scheduler_set_channel_state(chan, SCHED_CHAN_WAITING_TO_WRITE);
  123. }
  124. }
  125. /* Readd any channels we need to */
  126. if (to_readd) {
  127. SMARTLIST_FOREACH_BEGIN(to_readd, channel_t *, readd_chan) {
  128. scheduler_set_channel_state(readd_chan, SCHED_CHAN_PENDING);
  129. smartlist_pqueue_add(cp,
  130. scheduler_compare_channels,
  131. offsetof(channel_t, sched_heap_idx),
  132. readd_chan);
  133. } SMARTLIST_FOREACH_END(readd_chan);
  134. smartlist_free(to_readd);
  135. }
  136. n_chans_after = smartlist_len(cp);
  137. log_debug(LD_SCHED, "Scheduler handled %d of %d pending channels",
  138. n_chans_before - n_chans_after, n_chans_before);
  139. }
  140. /* Stores the vanilla scheduler function pointers. */
  141. static scheduler_t vanilla_scheduler = {
  142. .type = SCHEDULER_VANILLA,
  143. .free_all = NULL,
  144. .on_channel_free = NULL,
  145. .init = NULL,
  146. .on_new_consensus = NULL,
  147. .schedule = vanilla_scheduler_schedule,
  148. .run = vanilla_scheduler_run,
  149. .on_new_options = NULL,
  150. };
  151. scheduler_t *
  152. get_vanilla_scheduler(void)
  153. {
  154. return &vanilla_scheduler;
  155. }