test_channel.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768
  1. /* Copyright (c) 2013, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #define TOR_CHANNEL_INTERNAL_
  4. #include "or.h"
  5. #include "channel.h"
  6. /* For channel_note_destroy_not_pending */
  7. #include "circuitlist.h"
  8. /* For var_cell_free */
  9. #include "connection_or.h"
  10. /* For packed_cell stuff */
  11. #define RELAY_PRIVATE
  12. #include "relay.h"
  13. /* For init/free stuff */
  14. #include "scheduler.h"
  15. /* Test suite stuff */
  16. #include "test.h"
  17. #include "fakechans.h"
  18. static int test_chan_accept_cells = 0;
  19. static int test_cells_written = 0;
  20. static int test_destroy_not_pending_calls = 0;
  21. static int test_doesnt_want_writes_count = 0;
  22. static double test_overhead_estimate = 1.0f;
  23. static int test_releases_count = 0;
  24. static void channel_note_destroy_not_pending_mock(channel_t *ch,
  25. circid_t circid);
  26. static void chan_test_close(channel_t *ch);
  27. static size_t chan_test_num_bytes_queued(channel_t *ch);
  28. static int chan_test_num_cells_writeable(channel_t *ch);
  29. static int chan_test_write_cell(channel_t *ch, cell_t *cell);
  30. static int chan_test_write_packed_cell(channel_t *ch,
  31. packed_cell_t *packed_cell);
  32. static int chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell);
  33. static void scheduler_channel_doesnt_want_writes_mock(channel_t *ch);
  34. static void test_channel_flush(void *arg);
  35. static void test_channel_lifecycle(void *arg);
  36. static void test_channel_multi(void *arg);
  37. static void test_channel_queue_size(void *arg);
  38. static void test_channel_write(void *arg);
  39. static void
  40. channel_note_destroy_not_pending_mock(channel_t *ch,
  41. circid_t circid)
  42. {
  43. (void)ch;
  44. (void)circid;
  45. ++test_destroy_not_pending_calls;
  46. }
  47. static void
  48. chan_test_close(channel_t *ch)
  49. {
  50. test_assert(ch);
  51. done:
  52. return;
  53. }
  54. static double
  55. chan_test_get_overhead_estimate(channel_t *ch)
  56. {
  57. test_assert(ch);
  58. done:
  59. return test_overhead_estimate;
  60. }
  61. static size_t
  62. chan_test_num_bytes_queued(channel_t *ch)
  63. {
  64. test_assert(ch);
  65. done:
  66. return 0;
  67. }
  68. static int
  69. chan_test_num_cells_writeable(channel_t *ch)
  70. {
  71. test_assert(ch);
  72. done:
  73. return 32;
  74. }
  75. static int
  76. chan_test_write_cell(channel_t *ch, cell_t *cell)
  77. {
  78. int rv = 0;
  79. test_assert(ch);
  80. test_assert(cell);
  81. if (test_chan_accept_cells) {
  82. /* Free the cell and bump the counter */
  83. tor_free(cell);
  84. ++test_cells_written;
  85. rv = 1;
  86. }
  87. /* else return 0, we didn't accept it */
  88. done:
  89. return rv;
  90. }
  91. static int
  92. chan_test_write_packed_cell(channel_t *ch,
  93. packed_cell_t *packed_cell)
  94. {
  95. int rv = 0;
  96. test_assert(ch);
  97. test_assert(packed_cell);
  98. if (test_chan_accept_cells) {
  99. /* Free the cell and bump the counter */
  100. packed_cell_free(packed_cell);
  101. ++test_cells_written;
  102. rv = 1;
  103. }
  104. /* else return 0, we didn't accept it */
  105. done:
  106. return rv;
  107. }
  108. static int
  109. chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell)
  110. {
  111. int rv = 0;
  112. test_assert(ch);
  113. test_assert(var_cell);
  114. if (test_chan_accept_cells) {
  115. /* Free the cell and bump the counter */
  116. var_cell_free(var_cell);
  117. ++test_cells_written;
  118. rv = 1;
  119. }
  120. /* else return 0, we didn't accept it */
  121. done:
  122. return rv;
  123. }
  124. /**
  125. * Fill out c with a new fake cell for test suite use
  126. */
  127. void
  128. make_fake_cell(cell_t *c)
  129. {
  130. test_assert(c != NULL);
  131. c->circ_id = 1;
  132. c->command = CELL_RELAY;
  133. memset(c->payload, 0, CELL_PAYLOAD_SIZE);
  134. done:
  135. return;
  136. }
  137. /**
  138. * Fill out c with a new fake var_cell for test suite use
  139. */
  140. void
  141. make_fake_var_cell(var_cell_t *c)
  142. {
  143. test_assert(c != NULL);
  144. c->circ_id = 1;
  145. c->command = CELL_VERSIONS;
  146. c->payload_len = CELL_PAYLOAD_SIZE / 2;
  147. memset(c->payload, 0, c->payload_len);
  148. done:
  149. return;
  150. }
  151. /**
  152. * Set up a new fake channel for the test suite
  153. */
  154. channel_t *
  155. new_fake_channel(void)
  156. {
  157. channel_t *chan = tor_malloc_zero(sizeof(channel_t));
  158. channel_init(chan);
  159. chan->close = chan_test_close;
  160. chan->get_overhead_estimate = chan_test_get_overhead_estimate;
  161. chan->num_bytes_queued = chan_test_num_bytes_queued;
  162. chan->num_cells_writeable = chan_test_num_cells_writeable;
  163. chan->write_cell = chan_test_write_cell;
  164. chan->write_packed_cell = chan_test_write_packed_cell;
  165. chan->write_var_cell = chan_test_write_var_cell;
  166. chan->state = CHANNEL_STATE_OPEN;
  167. return chan;
  168. }
  169. static void
  170. scheduler_channel_doesnt_want_writes_mock(channel_t *ch)
  171. {
  172. (void)ch;
  173. /* Increment counter */
  174. ++test_doesnt_want_writes_count;
  175. return;
  176. }
  177. void
  178. scheduler_release_channel_mock(channel_t *ch)
  179. {
  180. (void)ch;
  181. /* Increment counter */
  182. ++test_releases_count;
  183. return;
  184. }
  185. static void
  186. test_channel_flush(void *arg)
  187. {
  188. channel_t *ch = NULL;
  189. cell_t *cell = NULL;
  190. packed_cell_t *p_cell = NULL;
  191. var_cell_t *v_cell = NULL;
  192. int init_count;
  193. (void)arg;
  194. init_cell_pool();
  195. ch = new_fake_channel();
  196. test_assert(ch);
  197. /* Cache the original count */
  198. init_count = test_cells_written;
  199. /* Stop accepting so we can queue some */
  200. test_chan_accept_cells = 0;
  201. /* Queue a regular cell */
  202. cell = tor_malloc_zero(sizeof(cell_t));
  203. make_fake_cell(cell);
  204. channel_write_cell(ch, cell);
  205. /* It should be queued, so assert that we didn't write it */
  206. test_eq(test_cells_written, init_count);
  207. /* Queue a var cell */
  208. v_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  209. make_fake_var_cell(v_cell);
  210. channel_write_var_cell(ch, v_cell);
  211. /* It should be queued, so assert that we didn't write it */
  212. test_eq(test_cells_written, init_count);
  213. /* Try a packed cell now */
  214. p_cell = packed_cell_new();
  215. test_assert(p_cell);
  216. channel_write_packed_cell(ch, p_cell);
  217. /* It should be queued, so assert that we didn't write it */
  218. test_eq(test_cells_written, init_count);
  219. /* Now allow writes through again */
  220. test_chan_accept_cells = 1;
  221. /* ...and flush */
  222. channel_flush_cells(ch);
  223. /* All three should have gone through */
  224. test_eq(test_cells_written, init_count + 3);
  225. done:
  226. tor_free(ch);
  227. free_cell_pool();
  228. return;
  229. }
  230. static void
  231. test_channel_lifecycle(void *arg)
  232. {
  233. channel_t *ch1 = NULL, *ch2 = NULL;
  234. cell_t *cell = NULL;
  235. int old_count, init_doesnt_want_writes_count;
  236. int init_releases_count;
  237. (void)arg;
  238. /* Mock these for the whole lifecycle test */
  239. MOCK(scheduler_channel_doesnt_want_writes,
  240. scheduler_channel_doesnt_want_writes_mock);
  241. MOCK(scheduler_release_channel,
  242. scheduler_release_channel_mock);
  243. /* Cache some initial counter values */
  244. init_doesnt_want_writes_count = test_doesnt_want_writes_count;
  245. init_releases_count = test_releases_count;
  246. /* Accept cells to lower layer */
  247. test_chan_accept_cells = 1;
  248. /* Use default overhead factor */
  249. test_overhead_estimate = 1.0f;
  250. ch1 = new_fake_channel();
  251. test_assert(ch1);
  252. /* Start it off in OPENING */
  253. ch1->state = CHANNEL_STATE_OPENING;
  254. /* Try to register it */
  255. channel_register(ch1);
  256. test_assert(ch1->registered);
  257. /* Try to write a cell through (should queue) */
  258. cell = tor_malloc_zero(sizeof(cell_t));
  259. make_fake_cell(cell);
  260. old_count = test_cells_written;
  261. channel_write_cell(ch1, cell);
  262. test_eq(old_count, test_cells_written);
  263. /* Move it to OPEN and flush */
  264. channel_change_state(ch1, CHANNEL_STATE_OPEN);
  265. /* Queue should drain */
  266. test_eq(old_count + 1, test_cells_written);
  267. /* Get another one */
  268. ch2 = new_fake_channel();
  269. test_assert(ch2);
  270. ch2->state = CHANNEL_STATE_OPENING;
  271. /* Register */
  272. channel_register(ch2);
  273. test_assert(ch2->registered);
  274. /* Check counters */
  275. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count);
  276. test_eq(test_releases_count, init_releases_count);
  277. /* Move ch1 to MAINT */
  278. channel_change_state(ch1, CHANNEL_STATE_MAINT);
  279. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  280. test_eq(test_releases_count, init_releases_count);
  281. /* Move ch2 to OPEN */
  282. channel_change_state(ch2, CHANNEL_STATE_OPEN);
  283. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  284. test_eq(test_releases_count, init_releases_count);
  285. /* Move ch1 back to OPEN */
  286. channel_change_state(ch1, CHANNEL_STATE_OPEN);
  287. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  288. test_eq(test_releases_count, init_releases_count);
  289. /* Mark ch2 for close */
  290. channel_mark_for_close(ch2);
  291. test_eq(ch2->state, CHANNEL_STATE_CLOSING);
  292. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  293. test_eq(test_releases_count, init_releases_count + 1);
  294. /* Shut down channels */
  295. channel_free_all();
  296. ch1 = ch2 = NULL;
  297. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  298. /* channel_free() calls scheduler_release_channel() */
  299. test_eq(test_releases_count, init_releases_count + 4);
  300. done:
  301. tor_free(ch1);
  302. tor_free(ch2);
  303. UNMOCK(scheduler_channel_doesnt_want_writes);
  304. UNMOCK(scheduler_release_channel);
  305. return;
  306. }
  307. static void
  308. test_channel_multi(void *arg)
  309. {
  310. channel_t *ch1 = NULL, *ch2 = NULL;
  311. uint64_t global_queue_estimate;
  312. cell_t *cell = NULL;
  313. (void)arg;
  314. /* Accept cells to lower layer */
  315. test_chan_accept_cells = 1;
  316. /* Use default overhead factor */
  317. test_overhead_estimate = 1.0f;
  318. ch1 = new_fake_channel();
  319. test_assert(ch1);
  320. ch2 = new_fake_channel();
  321. test_assert(ch2);
  322. /* Initial queue size update */
  323. channel_update_xmit_queue_size(ch1);
  324. test_eq(ch1->bytes_queued_for_xmit, 0);
  325. channel_update_xmit_queue_size(ch2);
  326. test_eq(ch2->bytes_queued_for_xmit, 0);
  327. global_queue_estimate = channel_get_global_queue_estimate();
  328. test_eq(global_queue_estimate, 0);
  329. /* Queue some cells, check queue estimates */
  330. cell = tor_malloc_zero(sizeof(cell_t));
  331. make_fake_cell(cell);
  332. channel_write_cell(ch1, cell);
  333. cell = tor_malloc_zero(sizeof(cell_t));
  334. make_fake_cell(cell);
  335. channel_write_cell(ch2, cell);
  336. channel_update_xmit_queue_size(ch1);
  337. channel_update_xmit_queue_size(ch2);
  338. test_eq(ch1->bytes_queued_for_xmit, 0);
  339. test_eq(ch2->bytes_queued_for_xmit, 0);
  340. global_queue_estimate = channel_get_global_queue_estimate();
  341. test_eq(global_queue_estimate, 0);
  342. /* Stop accepting cells at lower layer */
  343. test_chan_accept_cells = 0;
  344. /* Queue some cells and check queue estimates */
  345. cell = tor_malloc_zero(sizeof(cell_t));
  346. make_fake_cell(cell);
  347. channel_write_cell(ch1, cell);
  348. channel_update_xmit_queue_size(ch1);
  349. test_eq(ch1->bytes_queued_for_xmit, 512);
  350. global_queue_estimate = channel_get_global_queue_estimate();
  351. test_eq(global_queue_estimate, 512);
  352. cell = tor_malloc_zero(sizeof(cell_t));
  353. make_fake_cell(cell);
  354. channel_write_cell(ch2, cell);
  355. channel_update_xmit_queue_size(ch2);
  356. test_eq(ch2->bytes_queued_for_xmit, 512);
  357. global_queue_estimate = channel_get_global_queue_estimate();
  358. test_eq(global_queue_estimate, 1024);
  359. /* Allow cells through again */
  360. test_chan_accept_cells = 1;
  361. /* Flush chan 2 */
  362. channel_flush_cells(ch2);
  363. /* Update and check queue sizes */
  364. channel_update_xmit_queue_size(ch1);
  365. channel_update_xmit_queue_size(ch2);
  366. test_eq(ch1->bytes_queued_for_xmit, 512);
  367. test_eq(ch2->bytes_queued_for_xmit, 0);
  368. global_queue_estimate = channel_get_global_queue_estimate();
  369. test_eq(global_queue_estimate, 512);
  370. /* Flush chan 1 */
  371. channel_flush_cells(ch1);
  372. /* Update and check queue sizes */
  373. channel_update_xmit_queue_size(ch1);
  374. channel_update_xmit_queue_size(ch2);
  375. test_eq(ch1->bytes_queued_for_xmit, 0);
  376. test_eq(ch2->bytes_queued_for_xmit, 0);
  377. global_queue_estimate = channel_get_global_queue_estimate();
  378. test_eq(global_queue_estimate, 0);
  379. /* Now block again */
  380. test_chan_accept_cells = 0;
  381. /* Queue some cells */
  382. cell = tor_malloc_zero(sizeof(cell_t));
  383. make_fake_cell(cell);
  384. channel_write_cell(ch1, cell);
  385. cell = tor_malloc_zero(sizeof(cell_t));
  386. make_fake_cell(cell);
  387. channel_write_cell(ch2, cell);
  388. /* Check the estimates */
  389. channel_update_xmit_queue_size(ch1);
  390. channel_update_xmit_queue_size(ch2);
  391. test_eq(ch1->bytes_queued_for_xmit, 512);
  392. test_eq(ch2->bytes_queued_for_xmit, 512);
  393. global_queue_estimate = channel_get_global_queue_estimate();
  394. test_eq(global_queue_estimate, 1024);
  395. /* Now close channel 2; it should be subtracted from the global queue */
  396. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  397. channel_mark_for_close(ch2);
  398. UNMOCK(scheduler_release_channel);
  399. global_queue_estimate = channel_get_global_queue_estimate();
  400. test_eq(global_queue_estimate, 512);
  401. /*
  402. * Since the fake channels aren't registered, channel_free_all() can't
  403. * see them properly.
  404. */
  405. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  406. channel_mark_for_close(ch1);
  407. UNMOCK(scheduler_release_channel);
  408. global_queue_estimate = channel_get_global_queue_estimate();
  409. test_eq(global_queue_estimate, 0);
  410. /* Now free everything */
  411. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  412. channel_free_all();
  413. UNMOCK(scheduler_release_channel);
  414. done:
  415. tor_free(ch1);
  416. tor_free(ch2);
  417. return;
  418. }
  419. static void
  420. test_channel_queue_size(void *arg)
  421. {
  422. channel_t *ch = NULL;
  423. cell_t *cell = NULL;
  424. int n, old_count;
  425. uint64_t global_queue_estimate;
  426. (void)arg;
  427. ch = new_fake_channel();
  428. test_assert(ch);
  429. /* Initial queue size update */
  430. channel_update_xmit_queue_size(ch);
  431. test_eq(ch->bytes_queued_for_xmit, 0);
  432. global_queue_estimate = channel_get_global_queue_estimate();
  433. test_eq(global_queue_estimate, 0);
  434. /* Test the call-through to our fake lower layer */
  435. n = channel_num_cells_writeable(ch);
  436. /* chan_test_num_cells_writeable() always returns 32 */
  437. test_eq(n, 32);
  438. /*
  439. * Now we queue some cells and check that channel_num_cells_writeable()
  440. * adjusts properly
  441. */
  442. /* tell it not to accept cells */
  443. test_chan_accept_cells = 0;
  444. /* ...and keep it from trying to flush the queue */
  445. ch->state = CHANNEL_STATE_MAINT;
  446. /* Get a fresh cell */
  447. cell = tor_malloc_zero(sizeof(cell_t));
  448. make_fake_cell(cell);
  449. old_count = test_cells_written;
  450. channel_write_cell(ch, cell);
  451. /* Assert that it got queued, not written through, correctly */
  452. test_eq(test_cells_written, old_count);
  453. /* Now check chan_test_num_cells_writeable() again */
  454. n = channel_num_cells_writeable(ch);
  455. test_eq(n, 0); /* Should return 0 since we're in CHANNEL_STATE_MAINT */
  456. /* Update queue size estimates */
  457. channel_update_xmit_queue_size(ch);
  458. /* One cell, times an overhead factor of 1.0 */
  459. test_eq(ch->bytes_queued_for_xmit, 512);
  460. /* Try a different overhead factor */
  461. test_overhead_estimate = 0.5f;
  462. /* This one should be ignored since it's below 1.0 */
  463. channel_update_xmit_queue_size(ch);
  464. test_eq(ch->bytes_queued_for_xmit, 512);
  465. /* Now try a larger one */
  466. test_overhead_estimate = 2.0f;
  467. channel_update_xmit_queue_size(ch);
  468. test_eq(ch->bytes_queued_for_xmit, 1024);
  469. /* Go back to 1.0 */
  470. test_overhead_estimate = 1.0f;
  471. channel_update_xmit_queue_size(ch);
  472. test_eq(ch->bytes_queued_for_xmit, 512);
  473. /* Check the global estimate too */
  474. global_queue_estimate = channel_get_global_queue_estimate();
  475. test_eq(global_queue_estimate, 512);
  476. /* Go to open */
  477. old_count = test_cells_written;
  478. channel_change_state(ch, CHANNEL_STATE_OPEN);
  479. /*
  480. * It should try to write, but we aren't accepting cells right now, so
  481. * it'll requeue
  482. */
  483. test_eq(test_cells_written, old_count);
  484. /* Check the queue size again */
  485. channel_update_xmit_queue_size(ch);
  486. test_eq(ch->bytes_queued_for_xmit, 512);
  487. global_queue_estimate = channel_get_global_queue_estimate();
  488. test_eq(global_queue_estimate, 512);
  489. /*
  490. * Now the cell is in the queue, and we're open, so we should get 31
  491. * writeable cells.
  492. */
  493. n = channel_num_cells_writeable(ch);
  494. test_eq(n, 31);
  495. /* Accept cells again */
  496. test_chan_accept_cells = 1;
  497. /* ...and re-process the queue */
  498. old_count = test_cells_written;
  499. channel_flush_cells(ch);
  500. test_eq(test_cells_written, old_count + 1);
  501. /* Should have 32 writeable now */
  502. n = channel_num_cells_writeable(ch);
  503. test_eq(n, 32);
  504. /* Should have queue size estimate of zero */
  505. channel_update_xmit_queue_size(ch);
  506. test_eq(ch->bytes_queued_for_xmit, 0);
  507. global_queue_estimate = channel_get_global_queue_estimate();
  508. test_eq(global_queue_estimate, 0);
  509. /* Okay, now we're done with this one */
  510. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  511. channel_mark_for_close(ch);
  512. UNMOCK(scheduler_release_channel);
  513. done:
  514. tor_free(ch);
  515. return;
  516. }
  517. static void
  518. test_channel_write(void *arg)
  519. {
  520. channel_t *ch = NULL;
  521. cell_t *cell = tor_malloc_zero(sizeof(cell_t));
  522. packed_cell_t *packed_cell = NULL;
  523. var_cell_t *var_cell =
  524. tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  525. int old_count;
  526. (void)arg;
  527. init_cell_pool();
  528. packed_cell = packed_cell_new();
  529. test_assert(packed_cell);
  530. ch = new_fake_channel();
  531. test_assert(ch);
  532. make_fake_cell(cell);
  533. make_fake_var_cell(var_cell);
  534. /* Tell it to accept cells */
  535. test_chan_accept_cells = 1;
  536. old_count = test_cells_written;
  537. channel_write_cell(ch, cell);
  538. test_assert(test_cells_written == old_count + 1);
  539. channel_write_var_cell(ch, var_cell);
  540. test_assert(test_cells_written == old_count + 2);
  541. channel_write_packed_cell(ch, packed_cell);
  542. test_assert(test_cells_written == old_count + 3);
  543. /* Now we test queueing; tell it not to accept cells */
  544. test_chan_accept_cells = 0;
  545. /* ...and keep it from trying to flush the queue */
  546. ch->state = CHANNEL_STATE_MAINT;
  547. /* Get a fresh cell */
  548. cell = tor_malloc_zero(sizeof(cell_t));
  549. make_fake_cell(cell);
  550. old_count = test_cells_written;
  551. channel_write_cell(ch, cell);
  552. test_assert(test_cells_written == old_count);
  553. /*
  554. * Now change back to open with channel_change_state() and assert that it
  555. * gets drained from the queue.
  556. */
  557. test_chan_accept_cells = 1;
  558. channel_change_state(ch, CHANNEL_STATE_OPEN);
  559. test_assert(test_cells_written == old_count + 1);
  560. /*
  561. * Check the note destroy case
  562. */
  563. cell = tor_malloc_zero(sizeof(cell_t));
  564. make_fake_cell(cell);
  565. cell->command = CELL_DESTROY;
  566. /* Set up the mock */
  567. MOCK(channel_note_destroy_not_pending,
  568. channel_note_destroy_not_pending_mock);
  569. old_count = test_destroy_not_pending_calls;
  570. channel_write_cell(ch, cell);
  571. test_assert(test_destroy_not_pending_calls == old_count + 1);
  572. /* Now send a non-destroy and check we don't call it */
  573. cell = tor_malloc_zero(sizeof(cell_t));
  574. make_fake_cell(cell);
  575. channel_write_cell(ch, cell);
  576. test_assert(test_destroy_not_pending_calls == old_count + 1);
  577. UNMOCK(channel_note_destroy_not_pending);
  578. /*
  579. * Now switch it to CLOSING so we can test the discard-cells case
  580. * in the channel_write_*() functions.
  581. */
  582. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  583. channel_mark_for_close(ch);
  584. UNMOCK(scheduler_release_channel);
  585. /* Send cells that will drop in the closing state */
  586. old_count = test_cells_written;
  587. cell = tor_malloc_zero(sizeof(cell_t));
  588. make_fake_cell(cell);
  589. channel_write_cell(ch, cell);
  590. test_assert(test_cells_written == old_count);
  591. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  592. make_fake_var_cell(var_cell);
  593. channel_write_var_cell(ch, var_cell);
  594. test_assert(test_cells_written == old_count);
  595. packed_cell = packed_cell_new();
  596. channel_write_packed_cell(ch, packed_cell);
  597. test_assert(test_cells_written == old_count);
  598. free_cell_pool();
  599. done:
  600. tor_free(ch);
  601. return;
  602. }
  603. struct testcase_t channel_tests[] = {
  604. { "flush", test_channel_flush, TT_FORK, NULL, NULL },
  605. { "lifecycle", test_channel_lifecycle, TT_FORK, NULL, NULL },
  606. { "multi", test_channel_multi, TT_FORK, NULL, NULL },
  607. { "queue_size", test_channel_queue_size, TT_FORK, NULL, NULL },
  608. { "write", test_channel_write, TT_FORK, NULL, NULL },
  609. END_OF_TESTCASES
  610. };