test_channel.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /* Copyright (c) 2013, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #define TOR_CHANNEL_INTERNAL_
  4. #define CHANNEL_PRIVATE_
  5. #include "or.h"
  6. #include "channel.h"
  7. /* For channel_note_destroy_not_pending */
  8. #include "circuitlist.h"
  9. #include "circuitmux.h"
  10. /* For var_cell_free */
  11. #include "connection_or.h"
  12. /* For packed_cell stuff */
  13. #define RELAY_PRIVATE
  14. #include "relay.h"
  15. /* For init/free stuff */
  16. #include "scheduler.h"
  17. /* Test suite stuff */
  18. #include "test.h"
  19. #include "fakechans.h"
  20. /* This comes from channel.c */
  21. extern uint64_t estimated_total_queue_size;
  22. static int test_chan_accept_cells = 0;
  23. static int test_chan_fixed_cells_recved = 0;
  24. static int test_chan_var_cells_recved = 0;
  25. static int test_cells_written = 0;
  26. static int test_destroy_not_pending_calls = 0;
  27. static int test_doesnt_want_writes_count = 0;
  28. static int test_has_waiting_cells_count = 0;
  29. static double test_overhead_estimate = 1.0f;
  30. static int test_releases_count = 0;
  31. static circuitmux_t *test_target_cmux = NULL;
  32. static unsigned int test_cmux_cells = 0;
  33. static int chan_test_channel_flush_from_first_active_circuit_mock(
  34. channel_t *chan, int max);
  35. static unsigned int chan_test_circuitmux_num_cells_mock(circuitmux_t *cmux);
  36. static void channel_note_destroy_not_pending_mock(channel_t *ch,
  37. circid_t circid);
  38. static void chan_test_cell_handler(channel_t *ch,
  39. cell_t *cell);
  40. static void chan_test_var_cell_handler(channel_t *ch,
  41. var_cell_t *var_cell);
  42. static void chan_test_close(channel_t *ch);
  43. static void chan_test_error(channel_t *ch);
  44. static void chan_test_finish_close(channel_t *ch);
  45. static size_t chan_test_num_bytes_queued(channel_t *ch);
  46. static int chan_test_num_cells_writeable(channel_t *ch);
  47. static int chan_test_write_cell(channel_t *ch, cell_t *cell);
  48. static int chan_test_write_packed_cell(channel_t *ch,
  49. packed_cell_t *packed_cell);
  50. static int chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell);
  51. static void scheduler_channel_doesnt_want_writes_mock(channel_t *ch);
  52. static void test_channel_flush(void *arg);
  53. static void test_channel_flushmux(void *arg);
  54. static void test_channel_incoming(void *arg);
  55. static void test_channel_lifecycle(void *arg);
  56. static void test_channel_multi(void *arg);
  57. static void test_channel_queue_size(void *arg);
  58. static void test_channel_write(void *arg);
  59. static void
  60. channel_note_destroy_not_pending_mock(channel_t *ch,
  61. circid_t circid)
  62. {
  63. (void)ch;
  64. (void)circid;
  65. ++test_destroy_not_pending_calls;
  66. }
  67. /**
  68. * If the target cmux is the cmux for chan, make fake cells up to the
  69. * target number of cells and write them to chan. Otherwise, invoke
  70. * the real channel_flush_from_first_active_circuit().
  71. */
  72. static int
  73. chan_test_channel_flush_from_first_active_circuit_mock(channel_t *chan,
  74. int max)
  75. {
  76. int result = 0, c = 0;
  77. packed_cell_t *cell = NULL;
  78. test_assert(chan != NULL);
  79. if (test_target_cmux != NULL &&
  80. test_target_cmux == chan->cmux) {
  81. while (c <= max && test_cmux_cells > 0) {
  82. cell = packed_cell_new();
  83. channel_write_packed_cell(chan, cell);
  84. ++c;
  85. --test_cmux_cells;
  86. }
  87. result = c;
  88. } else {
  89. result = channel_flush_from_first_active_circuit__real(chan, max);
  90. }
  91. done:
  92. return result;
  93. }
  94. /**
  95. * If we have a target cmux set and this matches it, lie about how
  96. * many cells we have according to the number indicated; otherwise
  97. * pass to the real circuitmux_num_cells().
  98. */
  99. static unsigned int
  100. chan_test_circuitmux_num_cells_mock(circuitmux_t *cmux)
  101. {
  102. unsigned int result = 0;
  103. test_assert(cmux != NULL);
  104. if (cmux != NULL) {
  105. if (cmux == test_target_cmux) {
  106. result = test_cmux_cells;
  107. } else {
  108. result = circuitmux_num_cells__real(cmux);
  109. }
  110. }
  111. done:
  112. return result;
  113. }
  114. /*
  115. * Handle an incoming fixed-size cell for unit tests
  116. */
  117. static void
  118. chan_test_cell_handler(channel_t *ch,
  119. cell_t *cell)
  120. {
  121. test_assert(ch);
  122. test_assert(cell);
  123. tor_free(cell);
  124. ++test_chan_fixed_cells_recved;
  125. done:
  126. return;
  127. }
  128. /*
  129. * Handle an incoming variable-size cell for unit tests
  130. */
  131. static void
  132. chan_test_var_cell_handler(channel_t *ch,
  133. var_cell_t *var_cell)
  134. {
  135. test_assert(ch);
  136. test_assert(var_cell);
  137. tor_free(var_cell);
  138. ++test_chan_var_cells_recved;
  139. done:
  140. return;
  141. }
  142. static void
  143. chan_test_close(channel_t *ch)
  144. {
  145. test_assert(ch);
  146. done:
  147. return;
  148. }
  149. /*
  150. * Close a channel through the error path
  151. */
  152. static void
  153. chan_test_error(channel_t *ch)
  154. {
  155. test_assert(ch);
  156. test_assert(!(ch->state == CHANNEL_STATE_CLOSING ||
  157. ch->state == CHANNEL_STATE_ERROR ||
  158. ch->state == CHANNEL_STATE_CLOSED));
  159. channel_close_for_error(ch);
  160. done:
  161. return;
  162. }
  163. /*
  164. * Finish closing a channel from CHANNEL_STATE_CLOSING
  165. */
  166. static void
  167. chan_test_finish_close(channel_t *ch)
  168. {
  169. test_assert(ch);
  170. test_assert(ch->state == CHANNEL_STATE_CLOSING);
  171. channel_closed(ch);
  172. done:
  173. return;
  174. }
  175. static double
  176. chan_test_get_overhead_estimate(channel_t *ch)
  177. {
  178. test_assert(ch);
  179. done:
  180. return test_overhead_estimate;
  181. }
  182. static size_t
  183. chan_test_num_bytes_queued(channel_t *ch)
  184. {
  185. test_assert(ch);
  186. done:
  187. return 0;
  188. }
  189. static int
  190. chan_test_num_cells_writeable(channel_t *ch)
  191. {
  192. test_assert(ch);
  193. done:
  194. return 32;
  195. }
  196. static int
  197. chan_test_write_cell(channel_t *ch, cell_t *cell)
  198. {
  199. int rv = 0;
  200. test_assert(ch);
  201. test_assert(cell);
  202. if (test_chan_accept_cells) {
  203. /* Free the cell and bump the counter */
  204. tor_free(cell);
  205. ++test_cells_written;
  206. rv = 1;
  207. }
  208. /* else return 0, we didn't accept it */
  209. done:
  210. return rv;
  211. }
  212. static int
  213. chan_test_write_packed_cell(channel_t *ch,
  214. packed_cell_t *packed_cell)
  215. {
  216. int rv = 0;
  217. test_assert(ch);
  218. test_assert(packed_cell);
  219. if (test_chan_accept_cells) {
  220. /* Free the cell and bump the counter */
  221. packed_cell_free(packed_cell);
  222. ++test_cells_written;
  223. rv = 1;
  224. }
  225. /* else return 0, we didn't accept it */
  226. done:
  227. return rv;
  228. }
  229. static int
  230. chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell)
  231. {
  232. int rv = 0;
  233. test_assert(ch);
  234. test_assert(var_cell);
  235. if (test_chan_accept_cells) {
  236. /* Free the cell and bump the counter */
  237. var_cell_free(var_cell);
  238. ++test_cells_written;
  239. rv = 1;
  240. }
  241. /* else return 0, we didn't accept it */
  242. done:
  243. return rv;
  244. }
  245. /**
  246. * Fill out c with a new fake cell for test suite use
  247. */
  248. void
  249. make_fake_cell(cell_t *c)
  250. {
  251. test_assert(c != NULL);
  252. c->circ_id = 1;
  253. c->command = CELL_RELAY;
  254. memset(c->payload, 0, CELL_PAYLOAD_SIZE);
  255. done:
  256. return;
  257. }
  258. /**
  259. * Fill out c with a new fake var_cell for test suite use
  260. */
  261. void
  262. make_fake_var_cell(var_cell_t *c)
  263. {
  264. test_assert(c != NULL);
  265. c->circ_id = 1;
  266. c->command = CELL_VERSIONS;
  267. c->payload_len = CELL_PAYLOAD_SIZE / 2;
  268. memset(c->payload, 0, c->payload_len);
  269. done:
  270. return;
  271. }
  272. /**
  273. * Set up a new fake channel for the test suite
  274. */
  275. channel_t *
  276. new_fake_channel(void)
  277. {
  278. channel_t *chan = tor_malloc_zero(sizeof(channel_t));
  279. channel_init(chan);
  280. chan->close = chan_test_close;
  281. chan->get_overhead_estimate = chan_test_get_overhead_estimate;
  282. chan->num_bytes_queued = chan_test_num_bytes_queued;
  283. chan->num_cells_writeable = chan_test_num_cells_writeable;
  284. chan->write_cell = chan_test_write_cell;
  285. chan->write_packed_cell = chan_test_write_packed_cell;
  286. chan->write_var_cell = chan_test_write_var_cell;
  287. chan->state = CHANNEL_STATE_OPEN;
  288. return chan;
  289. }
  290. /**
  291. * Counter query for scheduler_channel_has_waiting_cells_mock()
  292. */
  293. int
  294. get_mock_scheduler_has_waiting_cells_count(void)
  295. {
  296. return test_has_waiting_cells_count;
  297. }
  298. /**
  299. * Mock for scheduler_channel_has_waiting_cells()
  300. */
  301. void
  302. scheduler_channel_has_waiting_cells_mock(channel_t *ch)
  303. {
  304. (void)ch;
  305. /* Increment counter */
  306. ++test_has_waiting_cells_count;
  307. return;
  308. }
  309. static void
  310. scheduler_channel_doesnt_want_writes_mock(channel_t *ch)
  311. {
  312. (void)ch;
  313. /* Increment counter */
  314. ++test_doesnt_want_writes_count;
  315. return;
  316. }
  317. /**
  318. * Counter query for scheduler_release_channel_mock()
  319. */
  320. int
  321. get_mock_scheduler_release_channel_count(void)
  322. {
  323. return test_releases_count;
  324. }
  325. /**
  326. * Mock for scheduler_release_channel()
  327. */
  328. void
  329. scheduler_release_channel_mock(channel_t *ch)
  330. {
  331. (void)ch;
  332. /* Increment counter */
  333. ++test_releases_count;
  334. return;
  335. }
  336. static void
  337. test_channel_flush(void *arg)
  338. {
  339. channel_t *ch = NULL;
  340. cell_t *cell = NULL;
  341. packed_cell_t *p_cell = NULL;
  342. var_cell_t *v_cell = NULL;
  343. int init_count;
  344. (void)arg;
  345. init_cell_pool();
  346. ch = new_fake_channel();
  347. test_assert(ch);
  348. /* Cache the original count */
  349. init_count = test_cells_written;
  350. /* Stop accepting so we can queue some */
  351. test_chan_accept_cells = 0;
  352. /* Queue a regular cell */
  353. cell = tor_malloc_zero(sizeof(cell_t));
  354. make_fake_cell(cell);
  355. channel_write_cell(ch, cell);
  356. /* It should be queued, so assert that we didn't write it */
  357. test_eq(test_cells_written, init_count);
  358. /* Queue a var cell */
  359. v_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  360. make_fake_var_cell(v_cell);
  361. channel_write_var_cell(ch, v_cell);
  362. /* It should be queued, so assert that we didn't write it */
  363. test_eq(test_cells_written, init_count);
  364. /* Try a packed cell now */
  365. p_cell = packed_cell_new();
  366. test_assert(p_cell);
  367. channel_write_packed_cell(ch, p_cell);
  368. /* It should be queued, so assert that we didn't write it */
  369. test_eq(test_cells_written, init_count);
  370. /* Now allow writes through again */
  371. test_chan_accept_cells = 1;
  372. /* ...and flush */
  373. channel_flush_cells(ch);
  374. /* All three should have gone through */
  375. test_eq(test_cells_written, init_count + 3);
  376. done:
  377. tor_free(ch);
  378. free_cell_pool();
  379. return;
  380. }
  381. /**
  382. * Channel flush tests that require cmux mocking
  383. */
  384. static void
  385. test_channel_flushmux(void *arg)
  386. {
  387. channel_t *ch = NULL;
  388. int old_count, q_len_before, q_len_after;
  389. ssize_t result;
  390. (void)arg;
  391. init_cell_pool();
  392. /* Install mocks we need for this test */
  393. MOCK(channel_flush_from_first_active_circuit,
  394. chan_test_channel_flush_from_first_active_circuit_mock);
  395. MOCK(circuitmux_num_cells,
  396. chan_test_circuitmux_num_cells_mock);
  397. ch = new_fake_channel();
  398. test_assert(ch);
  399. ch->cmux = circuitmux_alloc();
  400. old_count = test_cells_written;
  401. test_target_cmux = ch->cmux;
  402. test_cmux_cells = 1;
  403. /* Enable cell acceptance */
  404. test_chan_accept_cells = 1;
  405. result = channel_flush_some_cells(ch, 1);
  406. test_eq(result, 1);
  407. test_eq(test_cells_written, old_count + 1);
  408. test_eq(test_cmux_cells, 0);
  409. /* Now try it without accepting to force them into the queue */
  410. test_chan_accept_cells = 0;
  411. test_cmux_cells = 1;
  412. q_len_before = chan_cell_queue_len(&(ch->outgoing_queue));
  413. result = channel_flush_some_cells(ch, 1);
  414. /* We should not have actually flushed any */
  415. test_eq(result, 0);
  416. test_eq(test_cells_written, old_count + 1);
  417. /* But we should have gotten to the fake cellgen loop */
  418. test_eq(test_cmux_cells, 0);
  419. /* ...and we should have a queued cell */
  420. q_len_after = chan_cell_queue_len(&(ch->outgoing_queue));
  421. test_eq(q_len_after, q_len_before + 1);
  422. /* Now accept cells again and drain the queue */
  423. test_chan_accept_cells = 1;
  424. channel_flush_cells(ch);
  425. test_eq(test_cells_written, old_count + 2);
  426. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  427. test_target_cmux = NULL;
  428. test_cmux_cells = 0;
  429. done:
  430. tor_free(ch);
  431. UNMOCK(channel_flush_from_first_active_circuit);
  432. UNMOCK(circuitmux_num_cells);
  433. test_chan_accept_cells = 0;
  434. free_cell_pool();
  435. return;
  436. }
  437. static void
  438. test_channel_incoming(void *arg)
  439. {
  440. channel_t *ch = NULL;
  441. cell_t *cell = NULL;
  442. var_cell_t *var_cell = NULL;
  443. int old_count;
  444. (void)arg;
  445. /* Mock these for duration of the test */
  446. MOCK(scheduler_channel_doesnt_want_writes,
  447. scheduler_channel_doesnt_want_writes_mock);
  448. MOCK(scheduler_release_channel,
  449. scheduler_release_channel_mock);
  450. /* Accept cells to lower layer */
  451. test_chan_accept_cells = 1;
  452. /* Use default overhead factor */
  453. test_overhead_estimate = 1.0f;
  454. ch = new_fake_channel();
  455. test_assert(ch);
  456. /* Start it off in OPENING */
  457. ch->state = CHANNEL_STATE_OPENING;
  458. /* We'll need a cmux */
  459. ch->cmux = circuitmux_alloc();
  460. /* Install incoming cell handlers */
  461. channel_set_cell_handlers(ch,
  462. chan_test_cell_handler,
  463. chan_test_var_cell_handler);
  464. /* Test cell handler getters */
  465. test_eq(channel_get_cell_handler(ch), chan_test_cell_handler);
  466. test_eq(channel_get_var_cell_handler(ch), chan_test_var_cell_handler);
  467. /* Try to register it */
  468. channel_register(ch);
  469. test_assert(ch->registered);
  470. /* Open it */
  471. channel_change_state(ch, CHANNEL_STATE_OPEN);
  472. test_eq(ch->state, CHANNEL_STATE_OPEN);
  473. /* Receive a fixed cell */
  474. cell = tor_malloc_zero(sizeof(cell_t));
  475. make_fake_cell(cell);
  476. old_count = test_chan_fixed_cells_recved;
  477. channel_queue_cell(ch, cell);
  478. cell = NULL;
  479. test_eq(test_chan_fixed_cells_recved, old_count + 1);
  480. /* Receive a variable-size cell */
  481. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  482. make_fake_var_cell(var_cell);
  483. old_count = test_chan_var_cells_recved;
  484. channel_queue_var_cell(ch, var_cell);
  485. var_cell = NULL;
  486. test_eq(test_chan_var_cells_recved, old_count + 1);
  487. /* Close it */
  488. channel_mark_for_close(ch);
  489. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  490. chan_test_finish_close(ch);
  491. test_eq(ch->state, CHANNEL_STATE_CLOSED);
  492. channel_run_cleanup();
  493. ch = NULL;
  494. done:
  495. tor_free(ch);
  496. tor_free(cell);
  497. tor_free(var_cell);
  498. UNMOCK(scheduler_channel_doesnt_want_writes);
  499. UNMOCK(scheduler_release_channel);
  500. return;
  501. }
  502. /**
  503. * Normal channel lifecycle test:
  504. *
  505. * OPENING->OPEN->MAINT->OPEN->CLOSING->CLOSED
  506. */
  507. static void
  508. test_channel_lifecycle(void *arg)
  509. {
  510. channel_t *ch1 = NULL, *ch2 = NULL;
  511. cell_t *cell = NULL;
  512. int old_count, init_doesnt_want_writes_count;
  513. int init_releases_count;
  514. (void)arg;
  515. /* Mock these for the whole lifecycle test */
  516. MOCK(scheduler_channel_doesnt_want_writes,
  517. scheduler_channel_doesnt_want_writes_mock);
  518. MOCK(scheduler_release_channel,
  519. scheduler_release_channel_mock);
  520. /* Cache some initial counter values */
  521. init_doesnt_want_writes_count = test_doesnt_want_writes_count;
  522. init_releases_count = test_releases_count;
  523. /* Accept cells to lower layer */
  524. test_chan_accept_cells = 1;
  525. /* Use default overhead factor */
  526. test_overhead_estimate = 1.0f;
  527. ch1 = new_fake_channel();
  528. test_assert(ch1);
  529. /* Start it off in OPENING */
  530. ch1->state = CHANNEL_STATE_OPENING;
  531. /* We'll need a cmux */
  532. ch1->cmux = circuitmux_alloc();
  533. /* Try to register it */
  534. channel_register(ch1);
  535. test_assert(ch1->registered);
  536. /* Try to write a cell through (should queue) */
  537. cell = tor_malloc_zero(sizeof(cell_t));
  538. make_fake_cell(cell);
  539. old_count = test_cells_written;
  540. channel_write_cell(ch1, cell);
  541. test_eq(old_count, test_cells_written);
  542. /* Move it to OPEN and flush */
  543. channel_change_state(ch1, CHANNEL_STATE_OPEN);
  544. /* Queue should drain */
  545. test_eq(old_count + 1, test_cells_written);
  546. /* Get another one */
  547. ch2 = new_fake_channel();
  548. test_assert(ch2);
  549. ch2->state = CHANNEL_STATE_OPENING;
  550. ch2->cmux = circuitmux_alloc();
  551. /* Register */
  552. channel_register(ch2);
  553. test_assert(ch2->registered);
  554. /* Check counters */
  555. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count);
  556. test_eq(test_releases_count, init_releases_count);
  557. /* Move ch1 to MAINT */
  558. channel_change_state(ch1, CHANNEL_STATE_MAINT);
  559. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  560. test_eq(test_releases_count, init_releases_count);
  561. /* Move ch2 to OPEN */
  562. channel_change_state(ch2, CHANNEL_STATE_OPEN);
  563. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  564. test_eq(test_releases_count, init_releases_count);
  565. /* Move ch1 back to OPEN */
  566. channel_change_state(ch1, CHANNEL_STATE_OPEN);
  567. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  568. test_eq(test_releases_count, init_releases_count);
  569. /* Mark ch2 for close */
  570. channel_mark_for_close(ch2);
  571. test_eq(ch2->state, CHANNEL_STATE_CLOSING);
  572. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  573. test_eq(test_releases_count, init_releases_count + 1);
  574. /* Shut down channels */
  575. channel_free_all();
  576. ch1 = ch2 = NULL;
  577. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  578. /* channel_free() calls scheduler_release_channel() */
  579. test_eq(test_releases_count, init_releases_count + 4);
  580. done:
  581. tor_free(ch1);
  582. tor_free(ch2);
  583. UNMOCK(scheduler_channel_doesnt_want_writes);
  584. UNMOCK(scheduler_release_channel);
  585. return;
  586. }
  587. /**
  588. * Weird channel lifecycle test:
  589. *
  590. * OPENING->CLOSING->CLOSED
  591. * OPENING->OPEN->CLOSING->ERROR
  592. * OPENING->OPEN->MAINT->CLOSING->CLOSED
  593. * OPENING->OPEN->MAINT->CLOSING->ERROR
  594. */
  595. static void
  596. test_channel_lifecycle_2(void *arg)
  597. {
  598. channel_t *ch = NULL;
  599. (void)arg;
  600. /* Mock these for the whole lifecycle test */
  601. MOCK(scheduler_channel_doesnt_want_writes,
  602. scheduler_channel_doesnt_want_writes_mock);
  603. MOCK(scheduler_release_channel,
  604. scheduler_release_channel_mock);
  605. /* Accept cells to lower layer */
  606. test_chan_accept_cells = 1;
  607. /* Use default overhead factor */
  608. test_overhead_estimate = 1.0f;
  609. ch = new_fake_channel();
  610. test_assert(ch);
  611. /* Start it off in OPENING */
  612. ch->state = CHANNEL_STATE_OPENING;
  613. /* The full lifecycle test needs a cmux */
  614. ch->cmux = circuitmux_alloc();
  615. /* Try to register it */
  616. channel_register(ch);
  617. test_assert(ch->registered);
  618. /* Try to close it */
  619. channel_mark_for_close(ch);
  620. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  621. /* Finish closing it */
  622. chan_test_finish_close(ch);
  623. test_eq(ch->state, CHANNEL_STATE_CLOSED);
  624. channel_run_cleanup();
  625. ch = NULL;
  626. /* Now try OPENING->OPEN->CLOSING->ERROR */
  627. ch = new_fake_channel();
  628. test_assert(ch);
  629. ch->state = CHANNEL_STATE_OPENING;
  630. ch->cmux = circuitmux_alloc();
  631. channel_register(ch);
  632. test_assert(ch->registered);
  633. /* Finish opening it */
  634. channel_change_state(ch, CHANNEL_STATE_OPEN);
  635. /* Error exit from lower layer */
  636. chan_test_error(ch);
  637. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  638. chan_test_finish_close(ch);
  639. test_eq(ch->state, CHANNEL_STATE_ERROR);
  640. channel_run_cleanup();
  641. ch = NULL;
  642. /* OPENING->OPEN->MAINT->CLOSING->CLOSED close from maintenance state */
  643. ch = new_fake_channel();
  644. test_assert(ch);
  645. ch->state = CHANNEL_STATE_OPENING;
  646. ch->cmux = circuitmux_alloc();
  647. channel_register(ch);
  648. test_assert(ch->registered);
  649. /* Finish opening it */
  650. channel_change_state(ch, CHANNEL_STATE_OPEN);
  651. test_eq(ch->state, CHANNEL_STATE_OPEN);
  652. /* Go to maintenance state */
  653. channel_change_state(ch, CHANNEL_STATE_MAINT);
  654. test_eq(ch->state, CHANNEL_STATE_MAINT);
  655. /* Lower layer close */
  656. channel_mark_for_close(ch);
  657. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  658. /* Finish */
  659. chan_test_finish_close(ch);
  660. test_eq(ch->state, CHANNEL_STATE_CLOSED);
  661. channel_run_cleanup();
  662. ch = NULL;
  663. /*
  664. * OPENING->OPEN->MAINT->CLOSING->CLOSED lower-layer close during
  665. * maintenance state
  666. */
  667. ch = new_fake_channel();
  668. test_assert(ch);
  669. ch->state = CHANNEL_STATE_OPENING;
  670. ch->cmux = circuitmux_alloc();
  671. channel_register(ch);
  672. test_assert(ch->registered);
  673. /* Finish opening it */
  674. channel_change_state(ch, CHANNEL_STATE_OPEN);
  675. test_eq(ch->state, CHANNEL_STATE_OPEN);
  676. /* Go to maintenance state */
  677. channel_change_state(ch, CHANNEL_STATE_MAINT);
  678. test_eq(ch->state, CHANNEL_STATE_MAINT);
  679. /* Lower layer close */
  680. channel_close_from_lower_layer(ch);
  681. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  682. /* Finish */
  683. chan_test_finish_close(ch);
  684. test_eq(ch->state, CHANNEL_STATE_CLOSED);
  685. channel_run_cleanup();
  686. ch = NULL;
  687. /* OPENING->OPEN->MAINT->CLOSING->ERROR */
  688. ch = new_fake_channel();
  689. test_assert(ch);
  690. ch->state = CHANNEL_STATE_OPENING;
  691. ch->cmux = circuitmux_alloc();
  692. channel_register(ch);
  693. test_assert(ch->registered);
  694. /* Finish opening it */
  695. channel_change_state(ch, CHANNEL_STATE_OPEN);
  696. test_eq(ch->state, CHANNEL_STATE_OPEN);
  697. /* Go to maintenance state */
  698. channel_change_state(ch, CHANNEL_STATE_MAINT);
  699. test_eq(ch->state, CHANNEL_STATE_MAINT);
  700. /* Lower layer close */
  701. chan_test_error(ch);
  702. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  703. /* Finish */
  704. chan_test_finish_close(ch);
  705. test_eq(ch->state, CHANNEL_STATE_ERROR);
  706. channel_run_cleanup();
  707. ch = NULL;
  708. /* Shut down channels */
  709. channel_free_all();
  710. done:
  711. tor_free(ch);
  712. UNMOCK(scheduler_channel_doesnt_want_writes);
  713. UNMOCK(scheduler_release_channel);
  714. return;
  715. }
  716. static void
  717. test_channel_multi(void *arg)
  718. {
  719. channel_t *ch1 = NULL, *ch2 = NULL;
  720. uint64_t global_queue_estimate;
  721. cell_t *cell = NULL;
  722. (void)arg;
  723. /* Accept cells to lower layer */
  724. test_chan_accept_cells = 1;
  725. /* Use default overhead factor */
  726. test_overhead_estimate = 1.0f;
  727. ch1 = new_fake_channel();
  728. test_assert(ch1);
  729. ch2 = new_fake_channel();
  730. test_assert(ch2);
  731. /* Initial queue size update */
  732. channel_update_xmit_queue_size(ch1);
  733. test_eq(ch1->bytes_queued_for_xmit, 0);
  734. channel_update_xmit_queue_size(ch2);
  735. test_eq(ch2->bytes_queued_for_xmit, 0);
  736. global_queue_estimate = channel_get_global_queue_estimate();
  737. test_eq(global_queue_estimate, 0);
  738. /* Queue some cells, check queue estimates */
  739. cell = tor_malloc_zero(sizeof(cell_t));
  740. make_fake_cell(cell);
  741. channel_write_cell(ch1, cell);
  742. cell = tor_malloc_zero(sizeof(cell_t));
  743. make_fake_cell(cell);
  744. channel_write_cell(ch2, cell);
  745. channel_update_xmit_queue_size(ch1);
  746. channel_update_xmit_queue_size(ch2);
  747. test_eq(ch1->bytes_queued_for_xmit, 0);
  748. test_eq(ch2->bytes_queued_for_xmit, 0);
  749. global_queue_estimate = channel_get_global_queue_estimate();
  750. test_eq(global_queue_estimate, 0);
  751. /* Stop accepting cells at lower layer */
  752. test_chan_accept_cells = 0;
  753. /* Queue some cells and check queue estimates */
  754. cell = tor_malloc_zero(sizeof(cell_t));
  755. make_fake_cell(cell);
  756. channel_write_cell(ch1, cell);
  757. channel_update_xmit_queue_size(ch1);
  758. test_eq(ch1->bytes_queued_for_xmit, 512);
  759. global_queue_estimate = channel_get_global_queue_estimate();
  760. test_eq(global_queue_estimate, 512);
  761. cell = tor_malloc_zero(sizeof(cell_t));
  762. make_fake_cell(cell);
  763. channel_write_cell(ch2, cell);
  764. channel_update_xmit_queue_size(ch2);
  765. test_eq(ch2->bytes_queued_for_xmit, 512);
  766. global_queue_estimate = channel_get_global_queue_estimate();
  767. test_eq(global_queue_estimate, 1024);
  768. /* Allow cells through again */
  769. test_chan_accept_cells = 1;
  770. /* Flush chan 2 */
  771. channel_flush_cells(ch2);
  772. /* Update and check queue sizes */
  773. channel_update_xmit_queue_size(ch1);
  774. channel_update_xmit_queue_size(ch2);
  775. test_eq(ch1->bytes_queued_for_xmit, 512);
  776. test_eq(ch2->bytes_queued_for_xmit, 0);
  777. global_queue_estimate = channel_get_global_queue_estimate();
  778. test_eq(global_queue_estimate, 512);
  779. /* Flush chan 1 */
  780. channel_flush_cells(ch1);
  781. /* Update and check queue sizes */
  782. channel_update_xmit_queue_size(ch1);
  783. channel_update_xmit_queue_size(ch2);
  784. test_eq(ch1->bytes_queued_for_xmit, 0);
  785. test_eq(ch2->bytes_queued_for_xmit, 0);
  786. global_queue_estimate = channel_get_global_queue_estimate();
  787. test_eq(global_queue_estimate, 0);
  788. /* Now block again */
  789. test_chan_accept_cells = 0;
  790. /* Queue some cells */
  791. cell = tor_malloc_zero(sizeof(cell_t));
  792. make_fake_cell(cell);
  793. channel_write_cell(ch1, cell);
  794. cell = tor_malloc_zero(sizeof(cell_t));
  795. make_fake_cell(cell);
  796. channel_write_cell(ch2, cell);
  797. /* Check the estimates */
  798. channel_update_xmit_queue_size(ch1);
  799. channel_update_xmit_queue_size(ch2);
  800. test_eq(ch1->bytes_queued_for_xmit, 512);
  801. test_eq(ch2->bytes_queued_for_xmit, 512);
  802. global_queue_estimate = channel_get_global_queue_estimate();
  803. test_eq(global_queue_estimate, 1024);
  804. /* Now close channel 2; it should be subtracted from the global queue */
  805. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  806. channel_mark_for_close(ch2);
  807. UNMOCK(scheduler_release_channel);
  808. global_queue_estimate = channel_get_global_queue_estimate();
  809. test_eq(global_queue_estimate, 512);
  810. /*
  811. * Since the fake channels aren't registered, channel_free_all() can't
  812. * see them properly.
  813. */
  814. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  815. channel_mark_for_close(ch1);
  816. UNMOCK(scheduler_release_channel);
  817. global_queue_estimate = channel_get_global_queue_estimate();
  818. test_eq(global_queue_estimate, 0);
  819. /* Now free everything */
  820. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  821. channel_free_all();
  822. UNMOCK(scheduler_release_channel);
  823. done:
  824. tor_free(ch1);
  825. tor_free(ch2);
  826. return;
  827. }
  828. /**
  829. * Check some hopefully-impossible edge cases in the channel queue we
  830. * can only trigger by doing evil things to the queue directly.
  831. */
  832. static void
  833. test_channel_queue_impossible(void *arg)
  834. {
  835. channel_t *ch = NULL;
  836. cell_t *cell = NULL;
  837. packed_cell_t *packed_cell = NULL;
  838. var_cell_t *var_cell = NULL;
  839. int old_count;
  840. cell_queue_entry_t *q = NULL;
  841. uint64_t global_queue_estimate;
  842. /* Cache the global queue size (see below) */
  843. global_queue_estimate = channel_get_global_queue_estimate();
  844. (void)arg;
  845. init_cell_pool();
  846. packed_cell = packed_cell_new();
  847. test_assert(packed_cell);
  848. ch = new_fake_channel();
  849. test_assert(ch);
  850. /* We test queueing here; tell it not to accept cells */
  851. test_chan_accept_cells = 0;
  852. /* ...and keep it from trying to flush the queue */
  853. ch->state = CHANNEL_STATE_MAINT;
  854. /* Cache the cell written count */
  855. old_count = test_cells_written;
  856. /* Assert that the queue is initially empty */
  857. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  858. /* Get a fresh cell and write it to the channel*/
  859. cell = tor_malloc_zero(sizeof(cell_t));
  860. make_fake_cell(cell);
  861. channel_write_cell(ch, cell);
  862. /* Now it should be queued */
  863. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)),1);
  864. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  865. test_assert(q);
  866. if (q) {
  867. test_eq(q->type, CELL_QUEUE_FIXED);
  868. test_eq(q->u.fixed.cell, cell);
  869. }
  870. /* Do perverse things to it */
  871. tor_free(q->u.fixed.cell);
  872. q->u.fixed.cell = NULL;
  873. /*
  874. * Now change back to open with channel_change_state() and assert that it
  875. * gets thrown away properly.
  876. */
  877. test_chan_accept_cells = 1;
  878. channel_change_state(ch, CHANNEL_STATE_OPEN);
  879. test_assert(test_cells_written == old_count);
  880. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  881. /* Same thing but for a var_cell */
  882. test_chan_accept_cells = 0;
  883. ch->state = CHANNEL_STATE_MAINT;
  884. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  885. make_fake_var_cell(var_cell);
  886. channel_write_var_cell(ch, var_cell);
  887. /* Check that it's queued */
  888. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 1);
  889. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  890. test_assert(q);
  891. if (q) {
  892. test_eq(q->type, CELL_QUEUE_VAR);
  893. test_eq(q->u.var.var_cell, var_cell);
  894. }
  895. /* Remove the cell from the queue entry */
  896. tor_free(q->u.var.var_cell);
  897. q->u.var.var_cell = NULL;
  898. /* Let it drain and check that the bad entry is discarded */
  899. test_chan_accept_cells = 1;
  900. channel_change_state(ch, CHANNEL_STATE_OPEN);
  901. test_assert(test_cells_written == old_count);
  902. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  903. /* Same thing with a packed_cell */
  904. test_chan_accept_cells = 0;
  905. ch->state = CHANNEL_STATE_MAINT;
  906. packed_cell = packed_cell_new();
  907. test_assert(packed_cell);
  908. channel_write_packed_cell(ch, packed_cell);
  909. /* Check that it's queued */
  910. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 1);
  911. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  912. test_assert(q);
  913. if (q) {
  914. test_eq(q->type, CELL_QUEUE_PACKED);
  915. test_eq(q->u.packed.packed_cell, packed_cell);
  916. }
  917. /* Remove the cell from the queue entry */
  918. packed_cell_free(q->u.packed.packed_cell);
  919. q->u.packed.packed_cell = NULL;
  920. /* Let it drain and check that the bad entry is discarded */
  921. test_chan_accept_cells = 1;
  922. channel_change_state(ch, CHANNEL_STATE_OPEN);
  923. test_assert(test_cells_written == old_count);
  924. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  925. /* Unknown cell type case */
  926. test_chan_accept_cells = 0;
  927. ch->state = CHANNEL_STATE_MAINT;
  928. cell = tor_malloc_zero(sizeof(cell_t));
  929. make_fake_cell(cell);
  930. channel_write_cell(ch, cell);
  931. /* Check that it's queued */
  932. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)),1);
  933. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  934. test_assert(q);
  935. if (q) {
  936. test_eq(q->type, CELL_QUEUE_FIXED);
  937. test_eq(q->u.fixed.cell, cell);
  938. }
  939. /* Clobber it, including the queue entry type */
  940. tor_free(q->u.fixed.cell);
  941. q->u.fixed.cell = NULL;
  942. q->type = CELL_QUEUE_PACKED + 1;
  943. /* Let it drain and check that the bad entry is discarded */
  944. test_chan_accept_cells = 1;
  945. channel_change_state(ch, CHANNEL_STATE_OPEN);
  946. test_assert(test_cells_written == old_count);
  947. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  948. done:
  949. tor_free(ch);
  950. free_cell_pool();
  951. /*
  952. * Doing that meant that we couldn't correctly adjust the queue size
  953. * for the var cell, so manually reset the global queue size estimate
  954. * so the next test doesn't break if we run with --no-fork.
  955. */
  956. estimated_total_queue_size = global_queue_estimate;
  957. return;
  958. }
  959. static void
  960. test_channel_queue_size(void *arg)
  961. {
  962. channel_t *ch = NULL;
  963. cell_t *cell = NULL;
  964. int n, old_count;
  965. uint64_t global_queue_estimate;
  966. (void)arg;
  967. ch = new_fake_channel();
  968. test_assert(ch);
  969. /* Initial queue size update */
  970. channel_update_xmit_queue_size(ch);
  971. test_eq(ch->bytes_queued_for_xmit, 0);
  972. global_queue_estimate = channel_get_global_queue_estimate();
  973. test_eq(global_queue_estimate, 0);
  974. /* Test the call-through to our fake lower layer */
  975. n = channel_num_cells_writeable(ch);
  976. /* chan_test_num_cells_writeable() always returns 32 */
  977. test_eq(n, 32);
  978. /*
  979. * Now we queue some cells and check that channel_num_cells_writeable()
  980. * adjusts properly
  981. */
  982. /* tell it not to accept cells */
  983. test_chan_accept_cells = 0;
  984. /* ...and keep it from trying to flush the queue */
  985. ch->state = CHANNEL_STATE_MAINT;
  986. /* Get a fresh cell */
  987. cell = tor_malloc_zero(sizeof(cell_t));
  988. make_fake_cell(cell);
  989. old_count = test_cells_written;
  990. channel_write_cell(ch, cell);
  991. /* Assert that it got queued, not written through, correctly */
  992. test_eq(test_cells_written, old_count);
  993. /* Now check chan_test_num_cells_writeable() again */
  994. n = channel_num_cells_writeable(ch);
  995. test_eq(n, 0); /* Should return 0 since we're in CHANNEL_STATE_MAINT */
  996. /* Update queue size estimates */
  997. channel_update_xmit_queue_size(ch);
  998. /* One cell, times an overhead factor of 1.0 */
  999. test_eq(ch->bytes_queued_for_xmit, 512);
  1000. /* Try a different overhead factor */
  1001. test_overhead_estimate = 0.5f;
  1002. /* This one should be ignored since it's below 1.0 */
  1003. channel_update_xmit_queue_size(ch);
  1004. test_eq(ch->bytes_queued_for_xmit, 512);
  1005. /* Now try a larger one */
  1006. test_overhead_estimate = 2.0f;
  1007. channel_update_xmit_queue_size(ch);
  1008. test_eq(ch->bytes_queued_for_xmit, 1024);
  1009. /* Go back to 1.0 */
  1010. test_overhead_estimate = 1.0f;
  1011. channel_update_xmit_queue_size(ch);
  1012. test_eq(ch->bytes_queued_for_xmit, 512);
  1013. /* Check the global estimate too */
  1014. global_queue_estimate = channel_get_global_queue_estimate();
  1015. test_eq(global_queue_estimate, 512);
  1016. /* Go to open */
  1017. old_count = test_cells_written;
  1018. channel_change_state(ch, CHANNEL_STATE_OPEN);
  1019. /*
  1020. * It should try to write, but we aren't accepting cells right now, so
  1021. * it'll requeue
  1022. */
  1023. test_eq(test_cells_written, old_count);
  1024. /* Check the queue size again */
  1025. channel_update_xmit_queue_size(ch);
  1026. test_eq(ch->bytes_queued_for_xmit, 512);
  1027. global_queue_estimate = channel_get_global_queue_estimate();
  1028. test_eq(global_queue_estimate, 512);
  1029. /*
  1030. * Now the cell is in the queue, and we're open, so we should get 31
  1031. * writeable cells.
  1032. */
  1033. n = channel_num_cells_writeable(ch);
  1034. test_eq(n, 31);
  1035. /* Accept cells again */
  1036. test_chan_accept_cells = 1;
  1037. /* ...and re-process the queue */
  1038. old_count = test_cells_written;
  1039. channel_flush_cells(ch);
  1040. test_eq(test_cells_written, old_count + 1);
  1041. /* Should have 32 writeable now */
  1042. n = channel_num_cells_writeable(ch);
  1043. test_eq(n, 32);
  1044. /* Should have queue size estimate of zero */
  1045. channel_update_xmit_queue_size(ch);
  1046. test_eq(ch->bytes_queued_for_xmit, 0);
  1047. global_queue_estimate = channel_get_global_queue_estimate();
  1048. test_eq(global_queue_estimate, 0);
  1049. /* Okay, now we're done with this one */
  1050. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  1051. channel_mark_for_close(ch);
  1052. UNMOCK(scheduler_release_channel);
  1053. done:
  1054. tor_free(ch);
  1055. return;
  1056. }
  1057. static void
  1058. test_channel_write(void *arg)
  1059. {
  1060. channel_t *ch = NULL;
  1061. cell_t *cell = tor_malloc_zero(sizeof(cell_t));
  1062. packed_cell_t *packed_cell = NULL;
  1063. var_cell_t *var_cell =
  1064. tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  1065. int old_count;
  1066. (void)arg;
  1067. init_cell_pool();
  1068. packed_cell = packed_cell_new();
  1069. test_assert(packed_cell);
  1070. ch = new_fake_channel();
  1071. test_assert(ch);
  1072. make_fake_cell(cell);
  1073. make_fake_var_cell(var_cell);
  1074. /* Tell it to accept cells */
  1075. test_chan_accept_cells = 1;
  1076. old_count = test_cells_written;
  1077. channel_write_cell(ch, cell);
  1078. test_assert(test_cells_written == old_count + 1);
  1079. channel_write_var_cell(ch, var_cell);
  1080. test_assert(test_cells_written == old_count + 2);
  1081. channel_write_packed_cell(ch, packed_cell);
  1082. test_assert(test_cells_written == old_count + 3);
  1083. /* Now we test queueing; tell it not to accept cells */
  1084. test_chan_accept_cells = 0;
  1085. /* ...and keep it from trying to flush the queue */
  1086. ch->state = CHANNEL_STATE_MAINT;
  1087. /* Get a fresh cell */
  1088. cell = tor_malloc_zero(sizeof(cell_t));
  1089. make_fake_cell(cell);
  1090. old_count = test_cells_written;
  1091. channel_write_cell(ch, cell);
  1092. test_assert(test_cells_written == old_count);
  1093. /*
  1094. * Now change back to open with channel_change_state() and assert that it
  1095. * gets drained from the queue.
  1096. */
  1097. test_chan_accept_cells = 1;
  1098. channel_change_state(ch, CHANNEL_STATE_OPEN);
  1099. test_assert(test_cells_written == old_count + 1);
  1100. /*
  1101. * Check the note destroy case
  1102. */
  1103. cell = tor_malloc_zero(sizeof(cell_t));
  1104. make_fake_cell(cell);
  1105. cell->command = CELL_DESTROY;
  1106. /* Set up the mock */
  1107. MOCK(channel_note_destroy_not_pending,
  1108. channel_note_destroy_not_pending_mock);
  1109. old_count = test_destroy_not_pending_calls;
  1110. channel_write_cell(ch, cell);
  1111. test_assert(test_destroy_not_pending_calls == old_count + 1);
  1112. /* Now send a non-destroy and check we don't call it */
  1113. cell = tor_malloc_zero(sizeof(cell_t));
  1114. make_fake_cell(cell);
  1115. channel_write_cell(ch, cell);
  1116. test_assert(test_destroy_not_pending_calls == old_count + 1);
  1117. UNMOCK(channel_note_destroy_not_pending);
  1118. /*
  1119. * Now switch it to CLOSING so we can test the discard-cells case
  1120. * in the channel_write_*() functions.
  1121. */
  1122. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  1123. channel_mark_for_close(ch);
  1124. UNMOCK(scheduler_release_channel);
  1125. /* Send cells that will drop in the closing state */
  1126. old_count = test_cells_written;
  1127. cell = tor_malloc_zero(sizeof(cell_t));
  1128. make_fake_cell(cell);
  1129. channel_write_cell(ch, cell);
  1130. test_assert(test_cells_written == old_count);
  1131. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  1132. make_fake_var_cell(var_cell);
  1133. channel_write_var_cell(ch, var_cell);
  1134. test_assert(test_cells_written == old_count);
  1135. packed_cell = packed_cell_new();
  1136. channel_write_packed_cell(ch, packed_cell);
  1137. test_assert(test_cells_written == old_count);
  1138. free_cell_pool();
  1139. done:
  1140. tor_free(ch);
  1141. return;
  1142. }
  1143. struct testcase_t channel_tests[] = {
  1144. { "flush", test_channel_flush, TT_FORK, NULL, NULL },
  1145. { "flushmux", test_channel_flushmux, TT_FORK, NULL, NULL },
  1146. { "incoming", test_channel_incoming, TT_FORK, NULL, NULL },
  1147. { "lifecycle", test_channel_lifecycle, TT_FORK, NULL, NULL },
  1148. { "lifecycle_2", test_channel_lifecycle_2, TT_FORK, NULL, NULL },
  1149. { "multi", test_channel_multi, TT_FORK, NULL, NULL },
  1150. { "queue_impossible", test_channel_queue_impossible, TT_FORK, NULL, NULL },
  1151. { "queue_size", test_channel_queue_size, TT_FORK, NULL, NULL },
  1152. { "write", test_channel_write, TT_FORK, NULL, NULL },
  1153. END_OF_TESTCASES
  1154. };