test_scheduler.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352
  1. /* Copyright (c) 2014-2017, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #include "orconfig.h"
  4. #include <math.h>
  5. #include <event2/event.h>
  6. #define SCHEDULER_KIST_PRIVATE
  7. #define TOR_CHANNEL_INTERNAL_
  8. #define CHANNEL_PRIVATE_
  9. #include "or.h"
  10. #include "config.h"
  11. #include "compat_libevent.h"
  12. #include "channel.h"
  13. #include "channeltls.h"
  14. #include "connection.h"
  15. #include "networkstatus.h"
  16. #define SCHEDULER_PRIVATE_
  17. #include "scheduler.h"
  18. /* Test suite stuff */
  19. #include "test.h"
  20. #include "fakechans.h"
  21. /* Shamelessly stolen from compat_libevent.c */
  22. #define V(major, minor, patch) \
  23. (((major) << 24) | ((minor) << 16) | ((patch) << 8))
  24. /******************************************************************************
  25. * Statistical info
  26. *****************************************************************************/
  27. static int scheduler_compare_channels_mock_ctr = 0;
  28. static int scheduler_run_mock_ctr = 0;
  29. /******************************************************************************
  30. * Utility functions and things we need to mock
  31. *****************************************************************************/
  32. static or_options_t mocked_options;
  33. static const or_options_t *
  34. mock_get_options(void)
  35. {
  36. return &mocked_options;
  37. }
  38. static void
  39. cleanup_scheduler_options(void)
  40. {
  41. if (mocked_options.SchedulerTypes_) {
  42. SMARTLIST_FOREACH(mocked_options.SchedulerTypes_, int *, i, tor_free(i));
  43. smartlist_free(mocked_options.SchedulerTypes_);
  44. mocked_options.SchedulerTypes_ = NULL;
  45. }
  46. }
  47. static void
  48. set_scheduler_options(int val)
  49. {
  50. int *type;
  51. if (mocked_options.SchedulerTypes_ == NULL) {
  52. mocked_options.SchedulerTypes_ = smartlist_new();
  53. }
  54. type = tor_malloc_zero(sizeof(int));
  55. *type = val;
  56. smartlist_add(mocked_options.SchedulerTypes_, type);
  57. }
  58. static void
  59. clear_options(void)
  60. {
  61. cleanup_scheduler_options();
  62. memset(&mocked_options, 0, sizeof(mocked_options));
  63. }
  64. static int32_t
  65. mock_vanilla_networkstatus_get_param(
  66. const networkstatus_t *ns, const char *param_name, int32_t default_val,
  67. int32_t min_val, int32_t max_val)
  68. {
  69. (void)ns;
  70. (void)default_val;
  71. (void)min_val;
  72. (void)max_val;
  73. // only support KISTSchedRunInterval right now
  74. tor_assert(strcmp(param_name, "KISTSchedRunInterval")==0);
  75. return 0;
  76. }
  77. static int32_t
  78. mock_kist_networkstatus_get_param(
  79. const networkstatus_t *ns, const char *param_name, int32_t default_val,
  80. int32_t min_val, int32_t max_val)
  81. {
  82. (void)ns;
  83. (void)default_val;
  84. (void)min_val;
  85. (void)max_val;
  86. // only support KISTSchedRunInterval right now
  87. tor_assert(strcmp(param_name, "KISTSchedRunInterval")==0);
  88. return 12;
  89. }
  90. /* Event base for scheduelr tests */
  91. static struct event_base *mock_event_base = NULL;
  92. /* Setup for mock event stuff */
  93. static void mock_event_free_all(void);
  94. static void mock_event_init(void);
  95. static void
  96. mock_event_free_all(void)
  97. {
  98. tt_ptr_op(mock_event_base, OP_NE, NULL);
  99. if (mock_event_base) {
  100. event_base_free(mock_event_base);
  101. mock_event_base = NULL;
  102. }
  103. tt_ptr_op(mock_event_base, OP_EQ, NULL);
  104. done:
  105. return;
  106. }
  107. static void
  108. mock_event_init(void)
  109. {
  110. struct event_config *cfg = NULL;
  111. tt_ptr_op(mock_event_base, OP_EQ, NULL);
  112. /*
  113. * Really cut down from tor_libevent_initialize of
  114. * src/common/compat_libevent.c to kill config dependencies
  115. */
  116. if (!mock_event_base) {
  117. cfg = event_config_new();
  118. #if LIBEVENT_VERSION_NUMBER >= V(2,0,9)
  119. /* We can enable changelist support with epoll, since we don't give
  120. * Libevent any dup'd fds. This lets us avoid some syscalls. */
  121. event_config_set_flag(cfg, EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST);
  122. #endif
  123. mock_event_base = event_base_new_with_config(cfg);
  124. event_config_free(cfg);
  125. }
  126. tt_ptr_op(mock_event_base, OP_NE, NULL);
  127. done:
  128. return;
  129. }
  130. static struct event_base *
  131. tor_libevent_get_base_mock(void)
  132. {
  133. return mock_event_base;
  134. }
  135. static int
  136. scheduler_compare_channels_mock(const void *c1_v,
  137. const void *c2_v)
  138. {
  139. uintptr_t p1, p2;
  140. p1 = (uintptr_t)(c1_v);
  141. p2 = (uintptr_t)(c2_v);
  142. ++scheduler_compare_channels_mock_ctr;
  143. if (p1 == p2) return 0;
  144. else if (p1 < p2) return 1;
  145. else return -1;
  146. }
  147. static void
  148. scheduler_run_noop_mock(void)
  149. {
  150. ++scheduler_run_mock_ctr;
  151. }
  152. static circuitmux_t *mock_ccm_tgt_1 = NULL;
  153. static circuitmux_t *mock_ccm_tgt_2 = NULL;
  154. static circuitmux_t *mock_cgp_tgt_1 = NULL;
  155. static circuitmux_policy_t *mock_cgp_val_1 = NULL;
  156. static circuitmux_t *mock_cgp_tgt_2 = NULL;
  157. static circuitmux_policy_t *mock_cgp_val_2 = NULL;
  158. static const circuitmux_policy_t *
  159. circuitmux_get_policy_mock(circuitmux_t *cmux)
  160. {
  161. const circuitmux_policy_t *result = NULL;
  162. tt_assert(cmux != NULL);
  163. if (cmux) {
  164. if (cmux == mock_cgp_tgt_1) result = mock_cgp_val_1;
  165. else if (cmux == mock_cgp_tgt_2) result = mock_cgp_val_2;
  166. else result = circuitmux_get_policy__real(cmux);
  167. }
  168. done:
  169. return result;
  170. }
  171. static int
  172. circuitmux_compare_muxes_mock(circuitmux_t *cmux_1,
  173. circuitmux_t *cmux_2)
  174. {
  175. int result = 0;
  176. tt_assert(cmux_1 != NULL);
  177. tt_assert(cmux_2 != NULL);
  178. if (cmux_1 != cmux_2) {
  179. if (cmux_1 == mock_ccm_tgt_1 && cmux_2 == mock_ccm_tgt_2) result = -1;
  180. else if (cmux_1 == mock_ccm_tgt_2 && cmux_2 == mock_ccm_tgt_1) {
  181. result = 1;
  182. } else {
  183. if (cmux_1 == mock_ccm_tgt_1 || cmux_1 == mock_ccm_tgt_2) result = -1;
  184. else if (cmux_2 == mock_ccm_tgt_1 || cmux_2 == mock_ccm_tgt_2) {
  185. result = 1;
  186. } else {
  187. result = circuitmux_compare_muxes__real(cmux_1, cmux_2);
  188. }
  189. }
  190. }
  191. /* else result = 0 always */
  192. done:
  193. return result;
  194. }
  195. typedef struct {
  196. const channel_t *chan;
  197. ssize_t cells;
  198. } flush_mock_channel_t;
  199. static smartlist_t *chans_for_flush_mock = NULL;
  200. static void
  201. channel_flush_some_cells_mock_free_all(void)
  202. {
  203. if (chans_for_flush_mock) {
  204. SMARTLIST_FOREACH_BEGIN(chans_for_flush_mock,
  205. flush_mock_channel_t *,
  206. flush_mock_ch) {
  207. SMARTLIST_DEL_CURRENT(chans_for_flush_mock, flush_mock_ch);
  208. tor_free(flush_mock_ch);
  209. } SMARTLIST_FOREACH_END(flush_mock_ch);
  210. smartlist_free(chans_for_flush_mock);
  211. chans_for_flush_mock = NULL;
  212. }
  213. }
  214. static void
  215. channel_flush_some_cells_mock_set(channel_t *chan, ssize_t num_cells)
  216. {
  217. int found = 0;
  218. if (!chan) return;
  219. if (num_cells <= 0) return;
  220. if (!chans_for_flush_mock) {
  221. chans_for_flush_mock = smartlist_new();
  222. }
  223. SMARTLIST_FOREACH_BEGIN(chans_for_flush_mock,
  224. flush_mock_channel_t *,
  225. flush_mock_ch) {
  226. if (flush_mock_ch != NULL && flush_mock_ch->chan != NULL) {
  227. if (flush_mock_ch->chan == chan) {
  228. /* Found it */
  229. flush_mock_ch->cells = num_cells;
  230. found = 1;
  231. break;
  232. }
  233. } else {
  234. /* That shouldn't be there... */
  235. SMARTLIST_DEL_CURRENT(chans_for_flush_mock, flush_mock_ch);
  236. tor_free(flush_mock_ch);
  237. }
  238. } SMARTLIST_FOREACH_END(flush_mock_ch);
  239. if (! found) {
  240. /* The loop didn't find it */
  241. flush_mock_channel_t *flush_mock_ch;
  242. flush_mock_ch = tor_malloc_zero(sizeof(*flush_mock_ch));
  243. flush_mock_ch->chan = chan;
  244. flush_mock_ch->cells = num_cells;
  245. smartlist_add(chans_for_flush_mock, flush_mock_ch);
  246. }
  247. }
  248. static int
  249. channel_more_to_flush_mock(channel_t *chan)
  250. {
  251. tor_assert(chan);
  252. flush_mock_channel_t *found_mock_ch = NULL;
  253. SMARTLIST_FOREACH_BEGIN(chans_for_flush_mock,
  254. flush_mock_channel_t *,
  255. flush_mock_ch) {
  256. if (flush_mock_ch != NULL && flush_mock_ch->chan != NULL) {
  257. if (flush_mock_ch->chan == chan) {
  258. /* Found it */
  259. found_mock_ch = flush_mock_ch;
  260. break;
  261. }
  262. } else {
  263. /* That shouldn't be there... */
  264. SMARTLIST_DEL_CURRENT(chans_for_flush_mock, flush_mock_ch);
  265. tor_free(flush_mock_ch);
  266. }
  267. } SMARTLIST_FOREACH_END(flush_mock_ch);
  268. tor_assert(found_mock_ch);
  269. /* Check if any circuits would like to queue some */
  270. /* special for the mock: return the number of cells (instead of 1), or zero
  271. * if nothing to flush */
  272. return (found_mock_ch->cells > 0 ? (int)found_mock_ch->cells : 0 );
  273. }
  274. static void
  275. channel_write_to_kernel_mock(channel_t *chan)
  276. {
  277. (void)chan;
  278. //log_debug(LD_SCHED, "chan=%d writing to kernel",
  279. // (int)chan->global_identifier);
  280. }
  281. static int
  282. channel_should_write_to_kernel_mock(outbuf_table_t *ot, channel_t *chan)
  283. {
  284. (void)ot;
  285. (void)chan;
  286. return 1;
  287. /* We could make this more complicated if we wanted. But I don't think doing
  288. * so tests much of anything */
  289. //static int called_counter = 0;
  290. //if (++called_counter >= 3) {
  291. // called_counter -= 3;
  292. // log_debug(LD_SCHED, "chan=%d should write to kernel",
  293. // (int)chan->global_identifier);
  294. // return 1;
  295. //}
  296. //return 0;
  297. }
  298. static ssize_t
  299. channel_flush_some_cells_mock(channel_t *chan, ssize_t num_cells)
  300. {
  301. ssize_t flushed = 0, max;
  302. char unlimited = 0;
  303. flush_mock_channel_t *found = NULL;
  304. tt_ptr_op(chan, OP_NE, NULL);
  305. if (chan) {
  306. if (num_cells < 0) {
  307. num_cells = 0;
  308. unlimited = 1;
  309. }
  310. /* Check if we have it */
  311. if (chans_for_flush_mock != NULL) {
  312. SMARTLIST_FOREACH_BEGIN(chans_for_flush_mock,
  313. flush_mock_channel_t *,
  314. flush_mock_ch) {
  315. if (flush_mock_ch != NULL && flush_mock_ch->chan != NULL) {
  316. if (flush_mock_ch->chan == chan) {
  317. /* Found it */
  318. found = flush_mock_ch;
  319. break;
  320. }
  321. } else {
  322. /* That shouldn't be there... */
  323. SMARTLIST_DEL_CURRENT(chans_for_flush_mock, flush_mock_ch);
  324. tor_free(flush_mock_ch);
  325. }
  326. } SMARTLIST_FOREACH_END(flush_mock_ch);
  327. if (found) {
  328. /* We found one */
  329. if (found->cells < 0) found->cells = 0;
  330. if (unlimited) max = found->cells;
  331. else max = MIN(found->cells, num_cells);
  332. flushed += max;
  333. found->cells -= max;
  334. }
  335. }
  336. }
  337. done:
  338. return flushed;
  339. }
  340. static void
  341. update_socket_info_impl_mock(socket_table_ent_t *ent)
  342. {
  343. ent->cwnd = ent->unacked = ent->mss = ent->notsent = 0;
  344. ent->limit = INT_MAX;
  345. }
  346. static void
  347. perform_channel_state_tests(int KISTSchedRunInterval, int sched_type)
  348. {
  349. channel_t *ch1 = NULL, *ch2 = NULL;
  350. int old_count;
  351. /* setup options so we're sure about what sched we are running */
  352. MOCK(get_options, mock_get_options);
  353. clear_options();
  354. mocked_options.KISTSchedRunInterval = KISTSchedRunInterval;
  355. set_scheduler_options(sched_type);
  356. /* Set up libevent and scheduler */
  357. mock_event_init();
  358. MOCK(tor_libevent_get_base, tor_libevent_get_base_mock);
  359. scheduler_init();
  360. /*
  361. * Install the compare channels mock so we can test
  362. * scheduler_touch_channel().
  363. */
  364. MOCK(scheduler_compare_channels, scheduler_compare_channels_mock);
  365. /*
  366. * Disable scheduler_run so we can just check the state transitions
  367. * without having to make everything it might call work too.
  368. */
  369. ((scheduler_t *) the_scheduler)->run = scheduler_run_noop_mock;
  370. tt_int_op(smartlist_len(channels_pending), OP_EQ, 0);
  371. /* Set up a fake channel */
  372. ch1 = new_fake_channel();
  373. tt_assert(ch1);
  374. /* Start it off in OPENING */
  375. ch1->state = CHANNEL_STATE_OPENING;
  376. /* Try to register it */
  377. channel_register(ch1);
  378. tt_assert(ch1->registered);
  379. /* It should start off in SCHED_CHAN_IDLE */
  380. tt_int_op(ch1->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  381. /* Now get another one */
  382. ch2 = new_fake_channel();
  383. tt_assert(ch2);
  384. ch2->state = CHANNEL_STATE_OPENING;
  385. channel_register(ch2);
  386. tt_assert(ch2->registered);
  387. /* Send ch1 to SCHED_CHAN_WAITING_TO_WRITE */
  388. scheduler_channel_has_waiting_cells(ch1);
  389. tt_int_op(ch1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_TO_WRITE);
  390. /* This should send it to SCHED_CHAN_PENDING */
  391. scheduler_channel_wants_writes(ch1);
  392. tt_int_op(ch1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  393. tt_int_op(smartlist_len(channels_pending), OP_EQ, 1);
  394. /* Now send ch2 to SCHED_CHAN_WAITING_FOR_CELLS */
  395. scheduler_channel_wants_writes(ch2);
  396. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  397. /* Drop ch2 back to idle */
  398. scheduler_channel_doesnt_want_writes(ch2);
  399. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  400. /* ...and back to SCHED_CHAN_WAITING_FOR_CELLS */
  401. scheduler_channel_wants_writes(ch2);
  402. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  403. /* ...and this should kick ch2 into SCHED_CHAN_PENDING */
  404. scheduler_channel_has_waiting_cells(ch2);
  405. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  406. tt_int_op(smartlist_len(channels_pending), OP_EQ, 2);
  407. /* This should send ch2 to SCHED_CHAN_WAITING_TO_WRITE */
  408. scheduler_channel_doesnt_want_writes(ch2);
  409. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_TO_WRITE);
  410. tt_int_op(smartlist_len(channels_pending), OP_EQ, 1);
  411. /* ...and back to SCHED_CHAN_PENDING */
  412. scheduler_channel_wants_writes(ch2);
  413. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  414. tt_int_op(smartlist_len(channels_pending), OP_EQ, 2);
  415. /* Now we exercise scheduler_touch_channel */
  416. old_count = scheduler_compare_channels_mock_ctr;
  417. scheduler_touch_channel(ch1);
  418. tt_assert(scheduler_compare_channels_mock_ctr > old_count);
  419. /* Release the ch2 and then do it another time to make sure it doesn't blow
  420. * up and we are still in a quiescent state. */
  421. scheduler_release_channel(ch2);
  422. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  423. tt_int_op(smartlist_len(channels_pending), OP_EQ, 1);
  424. /* Cheat a bit so make the release more confused but also will tells us if
  425. * the release did put the channel in the right state. */
  426. ch2->scheduler_state = SCHED_CHAN_PENDING;
  427. scheduler_release_channel(ch2);
  428. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  429. tt_int_op(smartlist_len(channels_pending), OP_EQ, 1);
  430. /* Close */
  431. channel_mark_for_close(ch1);
  432. tt_int_op(ch1->state, OP_EQ, CHANNEL_STATE_CLOSING);
  433. channel_mark_for_close(ch2);
  434. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_CLOSING);
  435. channel_closed(ch1);
  436. tt_int_op(ch1->state, OP_EQ, CHANNEL_STATE_CLOSED);
  437. ch1 = NULL;
  438. channel_closed(ch2);
  439. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_CLOSED);
  440. ch2 = NULL;
  441. /* Shut things down */
  442. channel_free_all();
  443. scheduler_free_all();
  444. mock_event_free_all();
  445. done:
  446. tor_free(ch1);
  447. tor_free(ch2);
  448. UNMOCK(scheduler_compare_channels);
  449. UNMOCK(tor_libevent_get_base);
  450. UNMOCK(get_options);
  451. cleanup_scheduler_options();
  452. return;
  453. }
  454. static void
  455. test_scheduler_compare_channels(void *arg)
  456. {
  457. /* We don't actually need whole fake channels... */
  458. channel_t c1, c2;
  459. /* ...and some dummy circuitmuxes too */
  460. circuitmux_t *cm1 = NULL, *cm2 = NULL;
  461. int result;
  462. (void)arg;
  463. /* We can't actually see sizeof(circuitmux_t) from here */
  464. cm1 = tor_malloc_zero(sizeof(void *));
  465. cm2 = tor_malloc_zero(sizeof(void *));
  466. c1.cmux = cm1;
  467. c2.cmux = cm2;
  468. /* Configure circuitmux_get_policy() mock */
  469. mock_cgp_tgt_1 = cm1;
  470. mock_cgp_tgt_2 = cm2;
  471. /*
  472. * This is to test the different-policies case, which uses the policy
  473. * cast to an uintptr_t as an arbitrary but definite thing to compare.
  474. */
  475. mock_cgp_val_1 = tor_malloc_zero(16);
  476. mock_cgp_val_2 = tor_malloc_zero(16);
  477. if ( ((uintptr_t) mock_cgp_val_1) > ((uintptr_t) mock_cgp_val_2) ) {
  478. void *tmp = mock_cgp_val_1;
  479. mock_cgp_val_1 = mock_cgp_val_2;
  480. mock_cgp_val_2 = tmp;
  481. }
  482. MOCK(circuitmux_get_policy, circuitmux_get_policy_mock);
  483. /* Now set up circuitmux_compare_muxes() mock using cm1/cm2 */
  484. mock_ccm_tgt_1 = cm1;
  485. mock_ccm_tgt_2 = cm2;
  486. MOCK(circuitmux_compare_muxes, circuitmux_compare_muxes_mock);
  487. /* Equal-channel case */
  488. result = scheduler_compare_channels(&c1, &c1);
  489. tt_int_op(result, OP_EQ, 0);
  490. /* Distinct channels, distinct policies */
  491. result = scheduler_compare_channels(&c1, &c2);
  492. tt_int_op(result, OP_EQ, -1);
  493. result = scheduler_compare_channels(&c2, &c1);
  494. tt_int_op(result, OP_EQ, 1);
  495. /* Distinct channels, same policy */
  496. tor_free(mock_cgp_val_2);
  497. mock_cgp_val_2 = mock_cgp_val_1;
  498. result = scheduler_compare_channels(&c1, &c2);
  499. tt_int_op(result, OP_EQ, -1);
  500. result = scheduler_compare_channels(&c2, &c1);
  501. tt_int_op(result, OP_EQ, 1);
  502. done:
  503. UNMOCK(circuitmux_compare_muxes);
  504. mock_ccm_tgt_1 = NULL;
  505. mock_ccm_tgt_2 = NULL;
  506. UNMOCK(circuitmux_get_policy);
  507. mock_cgp_tgt_1 = NULL;
  508. mock_cgp_tgt_2 = NULL;
  509. tor_free(cm1);
  510. tor_free(cm2);
  511. if (mock_cgp_val_1 != mock_cgp_val_2)
  512. tor_free(mock_cgp_val_1);
  513. tor_free(mock_cgp_val_2);
  514. mock_cgp_val_1 = NULL;
  515. mock_cgp_val_2 = NULL;
  516. return;
  517. }
  518. /******************************************************************************
  519. * The actual tests!
  520. *****************************************************************************/
  521. static void
  522. test_scheduler_loop_vanilla(void *arg)
  523. {
  524. (void)arg;
  525. channel_t *ch1 = NULL, *ch2 = NULL;
  526. void (*run_func_ptr)(void);
  527. /* setup options so we're sure about what sched we are running */
  528. MOCK(get_options, mock_get_options);
  529. clear_options();
  530. set_scheduler_options(SCHEDULER_VANILLA);
  531. mocked_options.KISTSchedRunInterval = 0;
  532. /* Set up libevent and scheduler */
  533. mock_event_init();
  534. MOCK(tor_libevent_get_base, tor_libevent_get_base_mock);
  535. scheduler_init();
  536. /*
  537. * Install the compare channels mock so we can test
  538. * scheduler_touch_channel().
  539. */
  540. MOCK(scheduler_compare_channels, scheduler_compare_channels_mock);
  541. /*
  542. * Disable scheduler_run so we can just check the state transitions
  543. * without having to make everything it might call work too.
  544. */
  545. run_func_ptr = the_scheduler->run;
  546. ((scheduler_t *) the_scheduler)->run = scheduler_run_noop_mock;
  547. tt_int_op(smartlist_len(channels_pending), OP_EQ, 0);
  548. /* Set up a fake channel */
  549. ch1 = new_fake_channel();
  550. ch1->magic = TLS_CHAN_MAGIC;
  551. tt_assert(ch1);
  552. /* Start it off in OPENING */
  553. ch1->state = CHANNEL_STATE_OPENING;
  554. /* Try to register it */
  555. channel_register(ch1);
  556. tt_assert(ch1->registered);
  557. /* Finish opening it */
  558. channel_change_state_open(ch1);
  559. /* It should start off in SCHED_CHAN_IDLE */
  560. tt_int_op(ch1->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  561. /* Now get another one */
  562. ch2 = new_fake_channel();
  563. ch2->magic = TLS_CHAN_MAGIC;
  564. tt_assert(ch2);
  565. ch2->state = CHANNEL_STATE_OPENING;
  566. channel_register(ch2);
  567. tt_assert(ch2->registered);
  568. /*
  569. * Don't open ch2; then channel_num_cells_writeable() will return
  570. * zero and we'll get coverage of that exception case in scheduler_run()
  571. */
  572. tt_int_op(ch1->state, OP_EQ, CHANNEL_STATE_OPEN);
  573. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_OPENING);
  574. /* Send it to SCHED_CHAN_WAITING_TO_WRITE */
  575. scheduler_channel_has_waiting_cells(ch1);
  576. tt_int_op(ch1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_TO_WRITE);
  577. /* This should send it to SCHED_CHAN_PENDING */
  578. scheduler_channel_wants_writes(ch1);
  579. tt_int_op(ch1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  580. tt_int_op(smartlist_len(channels_pending), OP_EQ, 1);
  581. /* Now send ch2 to SCHED_CHAN_WAITING_FOR_CELLS */
  582. scheduler_channel_wants_writes(ch2);
  583. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  584. /* Drop ch2 back to idle */
  585. scheduler_channel_doesnt_want_writes(ch2);
  586. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  587. /* ...and back to SCHED_CHAN_WAITING_FOR_CELLS */
  588. scheduler_channel_wants_writes(ch2);
  589. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  590. /* ...and this should kick ch2 into SCHED_CHAN_PENDING */
  591. scheduler_channel_has_waiting_cells(ch2);
  592. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  593. tt_int_op(smartlist_len(channels_pending), OP_EQ, 2);
  594. /*
  595. * Now we've got two pending channels and need to fire off
  596. * the scheduler run() that we kept.
  597. */
  598. run_func_ptr();
  599. /*
  600. * Assert that they're still in the states we left and aren't still
  601. * pending
  602. */
  603. tt_int_op(ch1->state, OP_EQ, CHANNEL_STATE_OPEN);
  604. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_OPENING);
  605. tt_assert(ch1->scheduler_state != SCHED_CHAN_PENDING);
  606. tt_assert(ch2->scheduler_state != SCHED_CHAN_PENDING);
  607. tt_int_op(smartlist_len(channels_pending), OP_EQ, 0);
  608. /* Now, finish opening ch2, and get both back to pending */
  609. channel_change_state_open(ch2);
  610. scheduler_channel_wants_writes(ch1);
  611. scheduler_channel_wants_writes(ch2);
  612. scheduler_channel_has_waiting_cells(ch1);
  613. scheduler_channel_has_waiting_cells(ch2);
  614. tt_int_op(ch1->state, OP_EQ, CHANNEL_STATE_OPEN);
  615. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_OPEN);
  616. tt_int_op(ch1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  617. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  618. tt_int_op(smartlist_len(channels_pending), OP_EQ, 2);
  619. /* Now, set up the channel_flush_some_cells() mock */
  620. MOCK(channel_flush_some_cells, channel_flush_some_cells_mock);
  621. /*
  622. * 16 cells on ch1 means it'll completely drain into the 32 cells
  623. * fakechan's num_cells_writeable() returns.
  624. */
  625. channel_flush_some_cells_mock_set(ch1, 16);
  626. /*
  627. * This one should get sent back to pending, since num_cells_writeable()
  628. * will still return non-zero.
  629. */
  630. channel_flush_some_cells_mock_set(ch2, 48);
  631. /*
  632. * And re-run the scheduler run() loop with non-zero returns from
  633. * channel_flush_some_cells() this time.
  634. */
  635. run_func_ptr();
  636. /*
  637. * ch1 should have gone to SCHED_CHAN_WAITING_FOR_CELLS, with 16 flushed
  638. * and 32 writeable.
  639. */
  640. tt_int_op(ch1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  641. /*
  642. * ...ch2 should also have gone to SCHED_CHAN_WAITING_FOR_CELLS, with
  643. * channel_more_to_flush() returning false and channel_num_cells_writeable()
  644. * > 0/
  645. */
  646. tt_int_op(ch2->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  647. /* Close */
  648. channel_mark_for_close(ch1);
  649. tt_int_op(ch1->state, OP_EQ, CHANNEL_STATE_CLOSING);
  650. channel_mark_for_close(ch2);
  651. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_CLOSING);
  652. channel_closed(ch1);
  653. tt_int_op(ch1->state, OP_EQ, CHANNEL_STATE_CLOSED);
  654. ch1 = NULL;
  655. channel_closed(ch2);
  656. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_CLOSED);
  657. ch2 = NULL;
  658. /* Shut things down */
  659. channel_flush_some_cells_mock_free_all();
  660. channel_free_all();
  661. scheduler_free_all();
  662. mock_event_free_all();
  663. done:
  664. tor_free(ch1);
  665. tor_free(ch2);
  666. cleanup_scheduler_options();
  667. UNMOCK(channel_flush_some_cells);
  668. UNMOCK(scheduler_compare_channels);
  669. UNMOCK(tor_libevent_get_base);
  670. UNMOCK(get_options);
  671. }
  672. static void
  673. test_scheduler_loop_kist(void *arg)
  674. {
  675. (void) arg;
  676. #ifndef HAVE_KIST_SUPPORT
  677. return;
  678. #endif
  679. channel_t *ch1 = new_fake_channel(), *ch2 = new_fake_channel();
  680. channel_t *ch3 = new_fake_channel();
  681. /* setup options so we're sure about what sched we are running */
  682. MOCK(get_options, mock_get_options);
  683. MOCK(channel_flush_some_cells, channel_flush_some_cells_mock);
  684. MOCK(channel_more_to_flush, channel_more_to_flush_mock);
  685. MOCK(channel_write_to_kernel, channel_write_to_kernel_mock);
  686. MOCK(channel_should_write_to_kernel, channel_should_write_to_kernel_mock);
  687. MOCK(update_socket_info_impl, update_socket_info_impl_mock);
  688. clear_options();
  689. mocked_options.KISTSchedRunInterval = 11;
  690. set_scheduler_options(SCHEDULER_KIST);
  691. scheduler_init();
  692. tt_assert(ch1);
  693. ch1->magic = TLS_CHAN_MAGIC;
  694. ch1->state = CHANNEL_STATE_OPENING;
  695. channel_register(ch1);
  696. tt_assert(ch1->registered);
  697. channel_change_state_open(ch1);
  698. scheduler_channel_has_waiting_cells(ch1);
  699. scheduler_channel_wants_writes(ch1);
  700. channel_flush_some_cells_mock_set(ch1, 5);
  701. tt_assert(ch2);
  702. ch2->magic = TLS_CHAN_MAGIC;
  703. ch2->state = CHANNEL_STATE_OPENING;
  704. channel_register(ch2);
  705. tt_assert(ch2->registered);
  706. channel_change_state_open(ch2);
  707. scheduler_channel_has_waiting_cells(ch2);
  708. scheduler_channel_wants_writes(ch2);
  709. channel_flush_some_cells_mock_set(ch2, 5);
  710. the_scheduler->run();
  711. scheduler_channel_has_waiting_cells(ch1);
  712. channel_flush_some_cells_mock_set(ch1, 5);
  713. the_scheduler->run();
  714. scheduler_channel_has_waiting_cells(ch1);
  715. channel_flush_some_cells_mock_set(ch1, 5);
  716. scheduler_channel_has_waiting_cells(ch2);
  717. channel_flush_some_cells_mock_set(ch2, 5);
  718. the_scheduler->run();
  719. channel_flush_some_cells_mock_free_all();
  720. /* We'll try to run this closed channel threw the scheduler loop and make
  721. * sure it ends up in the right state. */
  722. tt_assert(ch3);
  723. ch3->magic = TLS_CHAN_MAGIC;
  724. ch3->state = CHANNEL_STATE_OPEN;
  725. circuitmux_free(ch3->cmux);
  726. ch3->cmux = circuitmux_alloc();
  727. channel_register(ch3);
  728. tt_assert(ch3->registered);
  729. ch3->scheduler_state = SCHED_CHAN_WAITING_FOR_CELLS;
  730. scheduler_channel_has_waiting_cells(ch3);
  731. /* Should be in the pending list now waiting to be handled. */
  732. tt_int_op(ch3->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  733. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 1);
  734. /* By running the scheduler on a closed channel, it should end up in the
  735. * IDLE state and not in the pending channel list. */
  736. ch3->state = CHANNEL_STATE_CLOSED;
  737. the_scheduler->run();
  738. tt_int_op(ch3->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  739. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  740. done:
  741. /* Prep the channel so the free() function doesn't explode. */
  742. ch1->state = ch2->state = ch3->state = CHANNEL_STATE_CLOSED;
  743. ch1->registered = ch2->registered = ch3->registered = 0;
  744. channel_free(ch1);
  745. channel_free(ch2);
  746. channel_free(ch3);
  747. UNMOCK(update_socket_info_impl);
  748. UNMOCK(channel_should_write_to_kernel);
  749. UNMOCK(channel_write_to_kernel);
  750. UNMOCK(channel_more_to_flush);
  751. UNMOCK(channel_flush_some_cells);
  752. UNMOCK(get_options);
  753. scheduler_free_all();
  754. return;
  755. }
  756. static void
  757. test_scheduler_channel_states(void *arg)
  758. {
  759. (void)arg;
  760. perform_channel_state_tests(-1, SCHEDULER_VANILLA);
  761. perform_channel_state_tests(11, SCHEDULER_KIST_LITE);
  762. #ifdef HAVE_KIST_SUPPORT
  763. perform_channel_state_tests(11, SCHEDULER_KIST);
  764. #endif
  765. }
  766. static void
  767. test_scheduler_initfree(void *arg)
  768. {
  769. (void)arg;
  770. tt_ptr_op(channels_pending, ==, NULL);
  771. tt_ptr_op(run_sched_ev, ==, NULL);
  772. mock_event_init();
  773. MOCK(tor_libevent_get_base, tor_libevent_get_base_mock);
  774. MOCK(get_options, mock_get_options);
  775. set_scheduler_options(SCHEDULER_KIST);
  776. set_scheduler_options(SCHEDULER_KIST_LITE);
  777. set_scheduler_options(SCHEDULER_VANILLA);
  778. scheduler_init();
  779. tt_ptr_op(channels_pending, !=, NULL);
  780. tt_ptr_op(run_sched_ev, !=, NULL);
  781. /* We have specified nothing in the torrc and there's no consensus so the
  782. * KIST scheduler is what should be in use */
  783. tt_ptr_op(the_scheduler, ==, get_kist_scheduler());
  784. tt_int_op(sched_run_interval, ==, 10);
  785. scheduler_free_all();
  786. UNMOCK(tor_libevent_get_base);
  787. mock_event_free_all();
  788. tt_ptr_op(channels_pending, ==, NULL);
  789. tt_ptr_op(run_sched_ev, ==, NULL);
  790. done:
  791. UNMOCK(get_options);
  792. cleanup_scheduler_options();
  793. return;
  794. }
  795. static void
  796. test_scheduler_can_use_kist(void *arg)
  797. {
  798. (void)arg;
  799. int res_should, res_freq;
  800. MOCK(get_options, mock_get_options);
  801. /* Test force enabling of KIST */
  802. clear_options();
  803. mocked_options.KISTSchedRunInterval = 1234;
  804. res_should = scheduler_can_use_kist();
  805. res_freq = kist_scheduler_run_interval();
  806. #ifdef HAVE_KIST_SUPPORT
  807. tt_int_op(res_should, ==, 1);
  808. #else /* HAVE_KIST_SUPPORT */
  809. tt_int_op(res_should, ==, 0);
  810. #endif /* HAVE_KIST_SUPPORT */
  811. tt_int_op(res_freq, ==, 1234);
  812. /* Test defer to consensus, but no consensus available */
  813. clear_options();
  814. mocked_options.KISTSchedRunInterval = 0;
  815. res_should = scheduler_can_use_kist();
  816. res_freq = kist_scheduler_run_interval();
  817. #ifdef HAVE_KIST_SUPPORT
  818. tt_int_op(res_should, ==, 1);
  819. #else /* HAVE_KIST_SUPPORT */
  820. tt_int_op(res_should, ==, 0);
  821. #endif /* HAVE_KIST_SUPPORT */
  822. tt_int_op(res_freq, ==, 10);
  823. /* Test defer to consensus, and kist consensus available */
  824. MOCK(networkstatus_get_param, mock_kist_networkstatus_get_param);
  825. clear_options();
  826. mocked_options.KISTSchedRunInterval = 0;
  827. res_should = scheduler_can_use_kist();
  828. res_freq = kist_scheduler_run_interval();
  829. #ifdef HAVE_KIST_SUPPORT
  830. tt_int_op(res_should, ==, 1);
  831. #else /* HAVE_KIST_SUPPORT */
  832. tt_int_op(res_should, ==, 0);
  833. #endif /* HAVE_KIST_SUPPORT */
  834. tt_int_op(res_freq, ==, 12);
  835. UNMOCK(networkstatus_get_param);
  836. /* Test defer to consensus, and vanilla consensus available */
  837. MOCK(networkstatus_get_param, mock_vanilla_networkstatus_get_param);
  838. clear_options();
  839. mocked_options.KISTSchedRunInterval = 0;
  840. res_should = scheduler_can_use_kist();
  841. res_freq = kist_scheduler_run_interval();
  842. tt_int_op(res_should, ==, 0);
  843. tt_int_op(res_freq, ==, 0);
  844. UNMOCK(networkstatus_get_param);
  845. done:
  846. UNMOCK(get_options);
  847. return;
  848. }
  849. static void
  850. test_scheduler_ns_changed(void *arg)
  851. {
  852. (void) arg;
  853. /*
  854. * Currently no scheduler implementations use the old/new consensuses passed
  855. * in scheduler_notify_networkstatus_changed, so it is okay to pass NULL.
  856. *
  857. * "But then what does test actually exercise???" It tests that
  858. * scheduler_notify_networkstatus_changed fetches the correct value from the
  859. * consensus, and then switches the scheduler if necessasry.
  860. */
  861. MOCK(get_options, mock_get_options);
  862. clear_options();
  863. set_scheduler_options(SCHEDULER_KIST);
  864. set_scheduler_options(SCHEDULER_VANILLA);
  865. tt_ptr_op(the_scheduler, ==, NULL);
  866. /* Change from vanilla to kist via consensus */
  867. the_scheduler = get_vanilla_scheduler();
  868. MOCK(networkstatus_get_param, mock_kist_networkstatus_get_param);
  869. scheduler_notify_networkstatus_changed();
  870. UNMOCK(networkstatus_get_param);
  871. #ifdef HAVE_KIST_SUPPORT
  872. tt_ptr_op(the_scheduler, ==, get_kist_scheduler());
  873. #else
  874. tt_ptr_op(the_scheduler, ==, get_vanilla_scheduler());
  875. #endif
  876. /* Change from kist to vanilla via consensus */
  877. the_scheduler = get_kist_scheduler();
  878. MOCK(networkstatus_get_param, mock_vanilla_networkstatus_get_param);
  879. scheduler_notify_networkstatus_changed();
  880. UNMOCK(networkstatus_get_param);
  881. tt_ptr_op(the_scheduler, ==, get_vanilla_scheduler());
  882. /* Doesn't change when using KIST */
  883. the_scheduler = get_kist_scheduler();
  884. MOCK(networkstatus_get_param, mock_kist_networkstatus_get_param);
  885. scheduler_notify_networkstatus_changed();
  886. UNMOCK(networkstatus_get_param);
  887. #ifdef HAVE_KIST_SUPPORT
  888. tt_ptr_op(the_scheduler, ==, get_kist_scheduler());
  889. #else
  890. tt_ptr_op(the_scheduler, ==, get_vanilla_scheduler());
  891. #endif
  892. /* Doesn't change when using vanilla */
  893. the_scheduler = get_vanilla_scheduler();
  894. MOCK(networkstatus_get_param, mock_vanilla_networkstatus_get_param);
  895. scheduler_notify_networkstatus_changed();
  896. UNMOCK(networkstatus_get_param);
  897. tt_ptr_op(the_scheduler, ==, get_vanilla_scheduler());
  898. done:
  899. UNMOCK(get_options);
  900. cleanup_scheduler_options();
  901. return;
  902. }
  903. /*
  904. * Mocked functions for the kist_pending_list test.
  905. */
  906. static int mock_flush_some_cells_num = 1;
  907. static int mock_more_to_flush = 0;
  908. static int mock_update_socket_info_limit = 0;
  909. static ssize_t
  910. channel_flush_some_cells_mock_var(channel_t *chan, ssize_t num_cells)
  911. {
  912. (void) chan;
  913. (void) num_cells;
  914. return mock_flush_some_cells_num;
  915. }
  916. /* Because when we flush cells, it is possible that the connection outbuf gets
  917. * fully drained, the wants to write scheduler event is fired back while we
  918. * are in the scheduler loop so this mock function does it for us.
  919. * Furthermore, the socket limit is set to 0 so once this is triggered, it
  920. * informs the scheduler that it can't write on the socket anymore. */
  921. static void
  922. channel_write_to_kernel_mock_trigger_24700(channel_t *chan)
  923. {
  924. static int chan_id_seen[2] = {0};
  925. if (++chan_id_seen[chan->global_identifier - 1] > 1) {
  926. tt_assert(0);
  927. }
  928. scheduler_channel_wants_writes(chan);
  929. done:
  930. return;
  931. }
  932. static int
  933. channel_more_to_flush_mock_var(channel_t *chan)
  934. {
  935. (void) chan;
  936. return mock_more_to_flush;
  937. }
  938. static void
  939. update_socket_info_impl_mock_var(socket_table_ent_t *ent)
  940. {
  941. ent->cwnd = ent->unacked = ent->mss = ent->notsent = 0;
  942. ent->limit = mock_update_socket_info_limit;
  943. }
  944. static void
  945. test_scheduler_kist_pending_list(void *arg)
  946. {
  947. (void) arg;
  948. #ifndef HAVE_KIST_SUPPORT
  949. return;
  950. #endif
  951. /* This is for testing the channel flow with the pending list that is
  952. * depending on the channel state, what will be the expected behavior of the
  953. * scheduler with that list.
  954. *
  955. * For instance, we want to catch double channel add or removing a channel
  956. * that doesn't exists, or putting a channel in the list in a wrong state.
  957. * Essentially, this will articifically test cases of the KIST main loop and
  958. * entry point in the channel subsystem.
  959. *
  960. * In part, this is to also catch things like #24700 and provide a test bed
  961. * for more testing in the future like so. */
  962. /* Mocking a series of scheduler function to control the flow of the
  963. * scheduler loop to test every use cases and assess the pending list. */
  964. MOCK(get_options, mock_get_options);
  965. MOCK(channel_flush_some_cells, channel_flush_some_cells_mock_var);
  966. MOCK(channel_more_to_flush, channel_more_to_flush_mock_var);
  967. MOCK(update_socket_info_impl, update_socket_info_impl_mock_var);
  968. MOCK(channel_write_to_kernel, channel_write_to_kernel_mock);
  969. MOCK(channel_should_write_to_kernel, channel_should_write_to_kernel_mock);
  970. /* Setup options so we're sure about what sched we are running */
  971. mocked_options.KISTSchedRunInterval = 10;
  972. set_scheduler_options(SCHEDULER_KIST);
  973. /* Init scheduler. */
  974. scheduler_init();
  975. /* Initialize a channel. We'll need a second channel for the #24700 bug
  976. * test. */
  977. channel_t *chan1 = new_fake_channel();
  978. channel_t *chan2 = new_fake_channel();
  979. tt_assert(chan1);
  980. tt_assert(chan2);
  981. chan1->magic = chan2->magic = TLS_CHAN_MAGIC;
  982. channel_register(chan1);
  983. channel_register(chan2);
  984. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  985. tt_int_op(chan1->sched_heap_idx, OP_EQ, -1);
  986. tt_int_op(chan2->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  987. tt_int_op(chan2->sched_heap_idx, OP_EQ, -1);
  988. /* Once a channel becomes OPEN, it always have at least one cell in it so
  989. * the scheduler is notified that the channel wants to write so this is the
  990. * first step. Might not make sense to you but it is the way it is. */
  991. scheduler_channel_wants_writes(chan1);
  992. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  993. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  994. /* Signal the scheduler that it has waiting cells which means the channel
  995. * will get scheduled. */
  996. scheduler_channel_has_waiting_cells(chan1);
  997. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  998. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 1);
  999. /* Subsequent call should not add it more times. It is possible we add many
  1000. * cells in rapid succession before the channel is scheduled. */
  1001. scheduler_channel_has_waiting_cells(chan1);
  1002. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1003. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 1);
  1004. scheduler_channel_has_waiting_cells(chan1);
  1005. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1006. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 1);
  1007. /* We'll flush one cell and make it that the socket can write but no more to
  1008. * flush else we end up in an infinite loop. We expect the channel to be put
  1009. * in waiting for cells state and the pending list empty. */
  1010. mock_update_socket_info_limit = INT_MAX;
  1011. mock_more_to_flush = 0;
  1012. the_scheduler->run();
  1013. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  1014. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  1015. /* Lets make believe that a cell is now in the channel but this time the
  1016. * channel can't write so obviously it has more to flush. We expect the
  1017. * channel to be back in the pending list. */
  1018. scheduler_channel_has_waiting_cells(chan1);
  1019. mock_update_socket_info_limit = 0;
  1020. mock_more_to_flush = 1;
  1021. the_scheduler->run();
  1022. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 1);
  1023. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1024. /* Channel is in the pending list now, during that time, we'll trigger a
  1025. * wants to write event because maybe the channel buffers were emptied in
  1026. * the meantime. This is possible because once the connection outbuf is
  1027. * flushed down the low watermark, the scheduler is notified.
  1028. *
  1029. * We expect the channel to NOT be added in the pending list again and stay
  1030. * in PENDING state. */
  1031. scheduler_channel_wants_writes(chan1);
  1032. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 1);
  1033. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1034. /* Make it that the channel can write now but has nothing else to flush. We
  1035. * expect that it is removed from the pending list and waiting for cells. */
  1036. mock_update_socket_info_limit = INT_MAX;
  1037. mock_more_to_flush = 0;
  1038. the_scheduler->run();
  1039. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  1040. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  1041. /* While waiting for cells, lets say we were able to write more things on
  1042. * the connection outbuf (unlikely that this can happen but let say it
  1043. * does). We expect the channel to stay in waiting for cells. */
  1044. scheduler_channel_wants_writes(chan1);
  1045. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  1046. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  1047. /* We'll not put it in the pending list and make the flush cell fail with 0
  1048. * cell flushed. We expect that it is put back in waiting for cells. */
  1049. scheduler_channel_has_waiting_cells(chan1);
  1050. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 1);
  1051. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1052. mock_flush_some_cells_num = 0;
  1053. the_scheduler->run();
  1054. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  1055. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_FOR_CELLS);
  1056. /* Set the channel to a state where it doesn't want to write more. We expect
  1057. * that the channel becomes idle. */
  1058. scheduler_channel_doesnt_want_writes(chan1);
  1059. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  1060. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_IDLE);
  1061. /* Some cells arrive on the channel now. We expect it to go back in waiting
  1062. * to write. You might wonder why it is not put in the pending list? Because
  1063. * once the channel becomes OPEN again (the doesn't want to write event only
  1064. * occurs if the channel goes in MAINT mode), if there are cells in the
  1065. * channel, the wants to write event is triggered thus putting the channel
  1066. * in pending mode.
  1067. *
  1068. * Else, if no cells, it stays IDLE and then once a cell comes in, it should
  1069. * go in waiting to write which is a BUG itself because the channel can't be
  1070. * scheduled until a second cell comes in. Hopefully, #24554 will fix that
  1071. * for KIST. */
  1072. scheduler_channel_has_waiting_cells(chan1);
  1073. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  1074. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_TO_WRITE);
  1075. /* Second cell comes in, unfortunately, it won't get scheduled until a wants
  1076. * to write event occurs like described above. */
  1077. scheduler_channel_has_waiting_cells(chan1);
  1078. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 0);
  1079. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_WAITING_TO_WRITE);
  1080. /* Unblock everything putting the channel in the pending list. */
  1081. scheduler_channel_wants_writes(chan1);
  1082. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 1);
  1083. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1084. /* Testing bug #24700 which is the situation where we have at least two
  1085. * different channels in the pending list. The first one gets flushed and
  1086. * bytes are written on the wire which triggers a wants to write event
  1087. * because the outbuf is below the low watermark. The bug was that this
  1088. * exact channel was added back in the pending list because its state wasn't
  1089. * PENDING.
  1090. *
  1091. * The following does some ninja-tsu to try to make it happen. We need two
  1092. * different channels so we create a second one and add it to the pending
  1093. * list. Then, we have a custom function when we write to kernel that does
  1094. * two important things:
  1095. *
  1096. * 1) Calls scheduler_channel_wants_writes(chan) on the channel.
  1097. * 2) Keeps track of how many times it sees the channel going through. If
  1098. * that limit goes > 1, it means we've added the channel twice in the
  1099. * pending list.
  1100. *
  1101. * In the end, we expect both channels to be in the pending list after this
  1102. * scheduler run. */
  1103. /* Put the second channel in the pending list. */
  1104. scheduler_channel_wants_writes(chan2);
  1105. scheduler_channel_has_waiting_cells(chan2);
  1106. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 2);
  1107. tt_int_op(chan2->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1108. /* This makes it that the first pass on socket_can_write() will be true but
  1109. * then when a single cell is flushed (514 + 29 bytes), the second call to
  1110. * socket_can_write() will be false. If it wasn't sending back false on the
  1111. * second run, we end up in an infinite loop of the scheduler. */
  1112. mock_update_socket_info_limit = 600;
  1113. /* We want to hit "Case 3:" of the scheduler so channel_more_to_flush() is
  1114. * true but socket_can_write() has to be false on the second check on the
  1115. * channel. */
  1116. mock_more_to_flush = 1;
  1117. mock_flush_some_cells_num = 1;
  1118. MOCK(channel_write_to_kernel, channel_write_to_kernel_mock_trigger_24700);
  1119. the_scheduler->run();
  1120. tt_int_op(smartlist_len(get_channels_pending()), OP_EQ, 2);
  1121. tt_int_op(chan1->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1122. tt_int_op(chan2->scheduler_state, OP_EQ, SCHED_CHAN_PENDING);
  1123. done:
  1124. chan1->state = chan2->state = CHANNEL_STATE_CLOSED;
  1125. chan1->registered = chan2->registered = 0;
  1126. channel_free(chan1);
  1127. channel_free(chan2);
  1128. scheduler_free_all();
  1129. UNMOCK(get_options);
  1130. UNMOCK(channel_flush_some_cells);
  1131. UNMOCK(channel_more_to_flush);
  1132. UNMOCK(update_socket_info_impl);
  1133. UNMOCK(channel_write_to_kernel);
  1134. UNMOCK(channel_should_write_to_kernel);
  1135. }
  1136. struct testcase_t scheduler_tests[] = {
  1137. { "compare_channels", test_scheduler_compare_channels,
  1138. TT_FORK, NULL, NULL },
  1139. { "channel_states", test_scheduler_channel_states, TT_FORK, NULL, NULL },
  1140. { "initfree", test_scheduler_initfree, TT_FORK, NULL, NULL },
  1141. { "loop_vanilla", test_scheduler_loop_vanilla, TT_FORK, NULL, NULL },
  1142. { "loop_kist", test_scheduler_loop_kist, TT_FORK, NULL, NULL },
  1143. { "ns_changed", test_scheduler_ns_changed, TT_FORK, NULL, NULL},
  1144. { "should_use_kist", test_scheduler_can_use_kist, TT_FORK, NULL, NULL },
  1145. { "kist_pending_list", test_scheduler_kist_pending_list, TT_FORK,
  1146. NULL, NULL },
  1147. END_OF_TESTCASES
  1148. };