test_channel.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154
  1. /* Copyright (c) 2013, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #define TOR_CHANNEL_INTERNAL_
  4. #define CHANNEL_PRIVATE_
  5. #include "or.h"
  6. #include "channel.h"
  7. /* For channel_note_destroy_not_pending */
  8. #include "circuitlist.h"
  9. #include "circuitmux.h"
  10. /* For var_cell_free */
  11. #include "connection_or.h"
  12. /* For packed_cell stuff */
  13. #define RELAY_PRIVATE
  14. #include "relay.h"
  15. /* For init/free stuff */
  16. #include "scheduler.h"
  17. /* Test suite stuff */
  18. #include "test.h"
  19. #include "fakechans.h"
  20. /* This comes from channel.c */
  21. extern uint64_t estimated_total_queue_size;
  22. static int test_chan_accept_cells = 0;
  23. static int test_cells_written = 0;
  24. static int test_destroy_not_pending_calls = 0;
  25. static int test_doesnt_want_writes_count = 0;
  26. static int test_has_waiting_cells_count = 0;
  27. static double test_overhead_estimate = 1.0f;
  28. static int test_releases_count = 0;
  29. static void channel_note_destroy_not_pending_mock(channel_t *ch,
  30. circid_t circid);
  31. static void chan_test_close(channel_t *ch);
  32. static void chan_test_error(channel_t *ch);
  33. static void chan_test_finish_close(channel_t *ch);
  34. static size_t chan_test_num_bytes_queued(channel_t *ch);
  35. static int chan_test_num_cells_writeable(channel_t *ch);
  36. static int chan_test_write_cell(channel_t *ch, cell_t *cell);
  37. static int chan_test_write_packed_cell(channel_t *ch,
  38. packed_cell_t *packed_cell);
  39. static int chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell);
  40. static void scheduler_channel_doesnt_want_writes_mock(channel_t *ch);
  41. static void test_channel_flush(void *arg);
  42. static void test_channel_lifecycle(void *arg);
  43. static void test_channel_multi(void *arg);
  44. static void test_channel_queue_size(void *arg);
  45. static void test_channel_write(void *arg);
  46. static void
  47. channel_note_destroy_not_pending_mock(channel_t *ch,
  48. circid_t circid)
  49. {
  50. (void)ch;
  51. (void)circid;
  52. ++test_destroy_not_pending_calls;
  53. }
  54. static void
  55. chan_test_close(channel_t *ch)
  56. {
  57. test_assert(ch);
  58. done:
  59. return;
  60. }
  61. /*
  62. * Close a channel through the error path
  63. */
  64. static void
  65. chan_test_error(channel_t *ch)
  66. {
  67. test_assert(ch);
  68. test_assert(!(ch->state == CHANNEL_STATE_CLOSING ||
  69. ch->state == CHANNEL_STATE_ERROR ||
  70. ch->state == CHANNEL_STATE_CLOSED));
  71. channel_close_for_error(ch);
  72. done:
  73. return;
  74. }
  75. /*
  76. * Finish closing a channel from CHANNEL_STATE_CLOSING
  77. */
  78. static void
  79. chan_test_finish_close(channel_t *ch)
  80. {
  81. test_assert(ch);
  82. test_assert(ch->state == CHANNEL_STATE_CLOSING);
  83. channel_closed(ch);
  84. done:
  85. return;
  86. }
  87. static double
  88. chan_test_get_overhead_estimate(channel_t *ch)
  89. {
  90. test_assert(ch);
  91. done:
  92. return test_overhead_estimate;
  93. }
  94. static size_t
  95. chan_test_num_bytes_queued(channel_t *ch)
  96. {
  97. test_assert(ch);
  98. done:
  99. return 0;
  100. }
  101. static int
  102. chan_test_num_cells_writeable(channel_t *ch)
  103. {
  104. test_assert(ch);
  105. done:
  106. return 32;
  107. }
  108. static int
  109. chan_test_write_cell(channel_t *ch, cell_t *cell)
  110. {
  111. int rv = 0;
  112. test_assert(ch);
  113. test_assert(cell);
  114. if (test_chan_accept_cells) {
  115. /* Free the cell and bump the counter */
  116. tor_free(cell);
  117. ++test_cells_written;
  118. rv = 1;
  119. }
  120. /* else return 0, we didn't accept it */
  121. done:
  122. return rv;
  123. }
  124. static int
  125. chan_test_write_packed_cell(channel_t *ch,
  126. packed_cell_t *packed_cell)
  127. {
  128. int rv = 0;
  129. test_assert(ch);
  130. test_assert(packed_cell);
  131. if (test_chan_accept_cells) {
  132. /* Free the cell and bump the counter */
  133. packed_cell_free(packed_cell);
  134. ++test_cells_written;
  135. rv = 1;
  136. }
  137. /* else return 0, we didn't accept it */
  138. done:
  139. return rv;
  140. }
  141. static int
  142. chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell)
  143. {
  144. int rv = 0;
  145. test_assert(ch);
  146. test_assert(var_cell);
  147. if (test_chan_accept_cells) {
  148. /* Free the cell and bump the counter */
  149. var_cell_free(var_cell);
  150. ++test_cells_written;
  151. rv = 1;
  152. }
  153. /* else return 0, we didn't accept it */
  154. done:
  155. return rv;
  156. }
  157. /**
  158. * Fill out c with a new fake cell for test suite use
  159. */
  160. void
  161. make_fake_cell(cell_t *c)
  162. {
  163. test_assert(c != NULL);
  164. c->circ_id = 1;
  165. c->command = CELL_RELAY;
  166. memset(c->payload, 0, CELL_PAYLOAD_SIZE);
  167. done:
  168. return;
  169. }
  170. /**
  171. * Fill out c with a new fake var_cell for test suite use
  172. */
  173. void
  174. make_fake_var_cell(var_cell_t *c)
  175. {
  176. test_assert(c != NULL);
  177. c->circ_id = 1;
  178. c->command = CELL_VERSIONS;
  179. c->payload_len = CELL_PAYLOAD_SIZE / 2;
  180. memset(c->payload, 0, c->payload_len);
  181. done:
  182. return;
  183. }
  184. /**
  185. * Set up a new fake channel for the test suite
  186. */
  187. channel_t *
  188. new_fake_channel(void)
  189. {
  190. channel_t *chan = tor_malloc_zero(sizeof(channel_t));
  191. channel_init(chan);
  192. chan->close = chan_test_close;
  193. chan->get_overhead_estimate = chan_test_get_overhead_estimate;
  194. chan->num_bytes_queued = chan_test_num_bytes_queued;
  195. chan->num_cells_writeable = chan_test_num_cells_writeable;
  196. chan->write_cell = chan_test_write_cell;
  197. chan->write_packed_cell = chan_test_write_packed_cell;
  198. chan->write_var_cell = chan_test_write_var_cell;
  199. chan->state = CHANNEL_STATE_OPEN;
  200. return chan;
  201. }
  202. /**
  203. * Counter query for scheduler_channel_has_waiting_cells_mock()
  204. */
  205. int
  206. get_mock_scheduler_has_waiting_cells_count(void)
  207. {
  208. return test_has_waiting_cells_count;
  209. }
  210. /**
  211. * Mock for scheduler_channel_has_waiting_cells()
  212. */
  213. void
  214. scheduler_channel_has_waiting_cells_mock(channel_t *ch)
  215. {
  216. (void)ch;
  217. /* Increment counter */
  218. ++test_has_waiting_cells_count;
  219. return;
  220. }
  221. static void
  222. scheduler_channel_doesnt_want_writes_mock(channel_t *ch)
  223. {
  224. (void)ch;
  225. /* Increment counter */
  226. ++test_doesnt_want_writes_count;
  227. return;
  228. }
  229. /**
  230. * Counter query for scheduler_release_channel_mock()
  231. */
  232. int
  233. get_mock_scheduler_release_channel_count(void)
  234. {
  235. return test_releases_count;
  236. }
  237. /**
  238. * Mock for scheduler_release_channel()
  239. */
  240. void
  241. scheduler_release_channel_mock(channel_t *ch)
  242. {
  243. (void)ch;
  244. /* Increment counter */
  245. ++test_releases_count;
  246. return;
  247. }
  248. static void
  249. test_channel_flush(void *arg)
  250. {
  251. channel_t *ch = NULL;
  252. cell_t *cell = NULL;
  253. packed_cell_t *p_cell = NULL;
  254. var_cell_t *v_cell = NULL;
  255. int init_count;
  256. (void)arg;
  257. init_cell_pool();
  258. ch = new_fake_channel();
  259. test_assert(ch);
  260. /* Cache the original count */
  261. init_count = test_cells_written;
  262. /* Stop accepting so we can queue some */
  263. test_chan_accept_cells = 0;
  264. /* Queue a regular cell */
  265. cell = tor_malloc_zero(sizeof(cell_t));
  266. make_fake_cell(cell);
  267. channel_write_cell(ch, cell);
  268. /* It should be queued, so assert that we didn't write it */
  269. test_eq(test_cells_written, init_count);
  270. /* Queue a var cell */
  271. v_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  272. make_fake_var_cell(v_cell);
  273. channel_write_var_cell(ch, v_cell);
  274. /* It should be queued, so assert that we didn't write it */
  275. test_eq(test_cells_written, init_count);
  276. /* Try a packed cell now */
  277. p_cell = packed_cell_new();
  278. test_assert(p_cell);
  279. channel_write_packed_cell(ch, p_cell);
  280. /* It should be queued, so assert that we didn't write it */
  281. test_eq(test_cells_written, init_count);
  282. /* Now allow writes through again */
  283. test_chan_accept_cells = 1;
  284. /* ...and flush */
  285. channel_flush_cells(ch);
  286. /* All three should have gone through */
  287. test_eq(test_cells_written, init_count + 3);
  288. done:
  289. tor_free(ch);
  290. free_cell_pool();
  291. return;
  292. }
  293. /**
  294. * Normal channel lifecycle test:
  295. *
  296. * OPENING->OPEN->MAINT->OPEN->CLOSING->CLOSED
  297. */
  298. static void
  299. test_channel_lifecycle(void *arg)
  300. {
  301. channel_t *ch1 = NULL, *ch2 = NULL;
  302. cell_t *cell = NULL;
  303. int old_count, init_doesnt_want_writes_count;
  304. int init_releases_count;
  305. (void)arg;
  306. /* Mock these for the whole lifecycle test */
  307. MOCK(scheduler_channel_doesnt_want_writes,
  308. scheduler_channel_doesnt_want_writes_mock);
  309. MOCK(scheduler_release_channel,
  310. scheduler_release_channel_mock);
  311. /* Cache some initial counter values */
  312. init_doesnt_want_writes_count = test_doesnt_want_writes_count;
  313. init_releases_count = test_releases_count;
  314. /* Accept cells to lower layer */
  315. test_chan_accept_cells = 1;
  316. /* Use default overhead factor */
  317. test_overhead_estimate = 1.0f;
  318. ch1 = new_fake_channel();
  319. test_assert(ch1);
  320. /* Start it off in OPENING */
  321. ch1->state = CHANNEL_STATE_OPENING;
  322. /* We'll need a cmux */
  323. ch1->cmux = circuitmux_alloc();
  324. /* Try to register it */
  325. channel_register(ch1);
  326. test_assert(ch1->registered);
  327. /* Try to write a cell through (should queue) */
  328. cell = tor_malloc_zero(sizeof(cell_t));
  329. make_fake_cell(cell);
  330. old_count = test_cells_written;
  331. channel_write_cell(ch1, cell);
  332. test_eq(old_count, test_cells_written);
  333. /* Move it to OPEN and flush */
  334. channel_change_state(ch1, CHANNEL_STATE_OPEN);
  335. /* Queue should drain */
  336. test_eq(old_count + 1, test_cells_written);
  337. /* Get another one */
  338. ch2 = new_fake_channel();
  339. test_assert(ch2);
  340. ch2->state = CHANNEL_STATE_OPENING;
  341. ch2->cmux = circuitmux_alloc();
  342. /* Register */
  343. channel_register(ch2);
  344. test_assert(ch2->registered);
  345. /* Check counters */
  346. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count);
  347. test_eq(test_releases_count, init_releases_count);
  348. /* Move ch1 to MAINT */
  349. channel_change_state(ch1, CHANNEL_STATE_MAINT);
  350. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  351. test_eq(test_releases_count, init_releases_count);
  352. /* Move ch2 to OPEN */
  353. channel_change_state(ch2, CHANNEL_STATE_OPEN);
  354. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  355. test_eq(test_releases_count, init_releases_count);
  356. /* Move ch1 back to OPEN */
  357. channel_change_state(ch1, CHANNEL_STATE_OPEN);
  358. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  359. test_eq(test_releases_count, init_releases_count);
  360. /* Mark ch2 for close */
  361. channel_mark_for_close(ch2);
  362. test_eq(ch2->state, CHANNEL_STATE_CLOSING);
  363. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  364. test_eq(test_releases_count, init_releases_count + 1);
  365. /* Shut down channels */
  366. channel_free_all();
  367. ch1 = ch2 = NULL;
  368. test_eq(test_doesnt_want_writes_count, init_doesnt_want_writes_count + 1);
  369. /* channel_free() calls scheduler_release_channel() */
  370. test_eq(test_releases_count, init_releases_count + 4);
  371. done:
  372. tor_free(ch1);
  373. tor_free(ch2);
  374. UNMOCK(scheduler_channel_doesnt_want_writes);
  375. UNMOCK(scheduler_release_channel);
  376. return;
  377. }
  378. /**
  379. * Weird channel lifecycle test:
  380. *
  381. * OPENING->CLOSING->CLOSED
  382. * OPENING->OPEN->CLOSING->ERROR
  383. * OPENING->OPEN->MAINT->CLOSING->CLOSED
  384. * OPENING->OPEN->MAINT->CLOSING->ERROR
  385. */
  386. static void
  387. test_channel_lifecycle_2(void *arg)
  388. {
  389. channel_t *ch = NULL;
  390. (void)arg;
  391. /* Mock these for the whole lifecycle test */
  392. MOCK(scheduler_channel_doesnt_want_writes,
  393. scheduler_channel_doesnt_want_writes_mock);
  394. MOCK(scheduler_release_channel,
  395. scheduler_release_channel_mock);
  396. /* Accept cells to lower layer */
  397. test_chan_accept_cells = 1;
  398. /* Use default overhead factor */
  399. test_overhead_estimate = 1.0f;
  400. ch = new_fake_channel();
  401. test_assert(ch);
  402. /* Start it off in OPENING */
  403. ch->state = CHANNEL_STATE_OPENING;
  404. /* The full lifecycle test needs a cmux */
  405. ch->cmux = circuitmux_alloc();
  406. /* Try to register it */
  407. channel_register(ch);
  408. test_assert(ch->registered);
  409. /* Try to close it */
  410. channel_mark_for_close(ch);
  411. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  412. /* Finish closing it */
  413. chan_test_finish_close(ch);
  414. test_eq(ch->state, CHANNEL_STATE_CLOSED);
  415. channel_run_cleanup();
  416. ch = NULL;
  417. /* Now try OPENING->OPEN->CLOSING->ERROR */
  418. ch = new_fake_channel();
  419. test_assert(ch);
  420. ch->state = CHANNEL_STATE_OPENING;
  421. ch->cmux = circuitmux_alloc();
  422. channel_register(ch);
  423. test_assert(ch->registered);
  424. /* Finish opening it */
  425. channel_change_state(ch, CHANNEL_STATE_OPEN);
  426. /* Error exit from lower layer */
  427. chan_test_error(ch);
  428. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  429. chan_test_finish_close(ch);
  430. test_eq(ch->state, CHANNEL_STATE_ERROR);
  431. channel_run_cleanup();
  432. ch = NULL;
  433. /* OPENING->OPEN->MAINT->CLOSING->CLOSED close from maintenance state */
  434. ch = new_fake_channel();
  435. test_assert(ch);
  436. ch->state = CHANNEL_STATE_OPENING;
  437. ch->cmux = circuitmux_alloc();
  438. channel_register(ch);
  439. test_assert(ch->registered);
  440. /* Finish opening it */
  441. channel_change_state(ch, CHANNEL_STATE_OPEN);
  442. test_eq(ch->state, CHANNEL_STATE_OPEN);
  443. /* Go to maintenance state */
  444. channel_change_state(ch, CHANNEL_STATE_MAINT);
  445. test_eq(ch->state, CHANNEL_STATE_MAINT);
  446. /* Lower layer close */
  447. channel_mark_for_close(ch);
  448. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  449. /* Finish */
  450. chan_test_finish_close(ch);
  451. test_eq(ch->state, CHANNEL_STATE_CLOSED);
  452. channel_run_cleanup();
  453. ch = NULL;
  454. /*
  455. * OPENING->OPEN->MAINT->CLOSING->CLOSED lower-layer close during
  456. * maintenance state
  457. */
  458. ch = new_fake_channel();
  459. test_assert(ch);
  460. ch->state = CHANNEL_STATE_OPENING;
  461. ch->cmux = circuitmux_alloc();
  462. channel_register(ch);
  463. test_assert(ch->registered);
  464. /* Finish opening it */
  465. channel_change_state(ch, CHANNEL_STATE_OPEN);
  466. test_eq(ch->state, CHANNEL_STATE_OPEN);
  467. /* Go to maintenance state */
  468. channel_change_state(ch, CHANNEL_STATE_MAINT);
  469. test_eq(ch->state, CHANNEL_STATE_MAINT);
  470. /* Lower layer close */
  471. channel_close_from_lower_layer(ch);
  472. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  473. /* Finish */
  474. chan_test_finish_close(ch);
  475. test_eq(ch->state, CHANNEL_STATE_CLOSED);
  476. channel_run_cleanup();
  477. ch = NULL;
  478. /* OPENING->OPEN->MAINT->CLOSING->ERROR */
  479. ch = new_fake_channel();
  480. test_assert(ch);
  481. ch->state = CHANNEL_STATE_OPENING;
  482. ch->cmux = circuitmux_alloc();
  483. channel_register(ch);
  484. test_assert(ch->registered);
  485. /* Finish opening it */
  486. channel_change_state(ch, CHANNEL_STATE_OPEN);
  487. test_eq(ch->state, CHANNEL_STATE_OPEN);
  488. /* Go to maintenance state */
  489. channel_change_state(ch, CHANNEL_STATE_MAINT);
  490. test_eq(ch->state, CHANNEL_STATE_MAINT);
  491. /* Lower layer close */
  492. chan_test_error(ch);
  493. test_eq(ch->state, CHANNEL_STATE_CLOSING);
  494. /* Finish */
  495. chan_test_finish_close(ch);
  496. test_eq(ch->state, CHANNEL_STATE_ERROR);
  497. channel_run_cleanup();
  498. ch = NULL;
  499. /* Shut down channels */
  500. channel_free_all();
  501. done:
  502. tor_free(ch);
  503. UNMOCK(scheduler_channel_doesnt_want_writes);
  504. UNMOCK(scheduler_release_channel);
  505. return;
  506. }
  507. static void
  508. test_channel_multi(void *arg)
  509. {
  510. channel_t *ch1 = NULL, *ch2 = NULL;
  511. uint64_t global_queue_estimate;
  512. cell_t *cell = NULL;
  513. (void)arg;
  514. /* Accept cells to lower layer */
  515. test_chan_accept_cells = 1;
  516. /* Use default overhead factor */
  517. test_overhead_estimate = 1.0f;
  518. ch1 = new_fake_channel();
  519. test_assert(ch1);
  520. ch2 = new_fake_channel();
  521. test_assert(ch2);
  522. /* Initial queue size update */
  523. channel_update_xmit_queue_size(ch1);
  524. test_eq(ch1->bytes_queued_for_xmit, 0);
  525. channel_update_xmit_queue_size(ch2);
  526. test_eq(ch2->bytes_queued_for_xmit, 0);
  527. global_queue_estimate = channel_get_global_queue_estimate();
  528. test_eq(global_queue_estimate, 0);
  529. /* Queue some cells, check queue estimates */
  530. cell = tor_malloc_zero(sizeof(cell_t));
  531. make_fake_cell(cell);
  532. channel_write_cell(ch1, cell);
  533. cell = tor_malloc_zero(sizeof(cell_t));
  534. make_fake_cell(cell);
  535. channel_write_cell(ch2, cell);
  536. channel_update_xmit_queue_size(ch1);
  537. channel_update_xmit_queue_size(ch2);
  538. test_eq(ch1->bytes_queued_for_xmit, 0);
  539. test_eq(ch2->bytes_queued_for_xmit, 0);
  540. global_queue_estimate = channel_get_global_queue_estimate();
  541. test_eq(global_queue_estimate, 0);
  542. /* Stop accepting cells at lower layer */
  543. test_chan_accept_cells = 0;
  544. /* Queue some cells and check queue estimates */
  545. cell = tor_malloc_zero(sizeof(cell_t));
  546. make_fake_cell(cell);
  547. channel_write_cell(ch1, cell);
  548. channel_update_xmit_queue_size(ch1);
  549. test_eq(ch1->bytes_queued_for_xmit, 512);
  550. global_queue_estimate = channel_get_global_queue_estimate();
  551. test_eq(global_queue_estimate, 512);
  552. cell = tor_malloc_zero(sizeof(cell_t));
  553. make_fake_cell(cell);
  554. channel_write_cell(ch2, cell);
  555. channel_update_xmit_queue_size(ch2);
  556. test_eq(ch2->bytes_queued_for_xmit, 512);
  557. global_queue_estimate = channel_get_global_queue_estimate();
  558. test_eq(global_queue_estimate, 1024);
  559. /* Allow cells through again */
  560. test_chan_accept_cells = 1;
  561. /* Flush chan 2 */
  562. channel_flush_cells(ch2);
  563. /* Update and check queue sizes */
  564. channel_update_xmit_queue_size(ch1);
  565. channel_update_xmit_queue_size(ch2);
  566. test_eq(ch1->bytes_queued_for_xmit, 512);
  567. test_eq(ch2->bytes_queued_for_xmit, 0);
  568. global_queue_estimate = channel_get_global_queue_estimate();
  569. test_eq(global_queue_estimate, 512);
  570. /* Flush chan 1 */
  571. channel_flush_cells(ch1);
  572. /* Update and check queue sizes */
  573. channel_update_xmit_queue_size(ch1);
  574. channel_update_xmit_queue_size(ch2);
  575. test_eq(ch1->bytes_queued_for_xmit, 0);
  576. test_eq(ch2->bytes_queued_for_xmit, 0);
  577. global_queue_estimate = channel_get_global_queue_estimate();
  578. test_eq(global_queue_estimate, 0);
  579. /* Now block again */
  580. test_chan_accept_cells = 0;
  581. /* Queue some cells */
  582. cell = tor_malloc_zero(sizeof(cell_t));
  583. make_fake_cell(cell);
  584. channel_write_cell(ch1, cell);
  585. cell = tor_malloc_zero(sizeof(cell_t));
  586. make_fake_cell(cell);
  587. channel_write_cell(ch2, cell);
  588. /* Check the estimates */
  589. channel_update_xmit_queue_size(ch1);
  590. channel_update_xmit_queue_size(ch2);
  591. test_eq(ch1->bytes_queued_for_xmit, 512);
  592. test_eq(ch2->bytes_queued_for_xmit, 512);
  593. global_queue_estimate = channel_get_global_queue_estimate();
  594. test_eq(global_queue_estimate, 1024);
  595. /* Now close channel 2; it should be subtracted from the global queue */
  596. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  597. channel_mark_for_close(ch2);
  598. UNMOCK(scheduler_release_channel);
  599. global_queue_estimate = channel_get_global_queue_estimate();
  600. test_eq(global_queue_estimate, 512);
  601. /*
  602. * Since the fake channels aren't registered, channel_free_all() can't
  603. * see them properly.
  604. */
  605. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  606. channel_mark_for_close(ch1);
  607. UNMOCK(scheduler_release_channel);
  608. global_queue_estimate = channel_get_global_queue_estimate();
  609. test_eq(global_queue_estimate, 0);
  610. /* Now free everything */
  611. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  612. channel_free_all();
  613. UNMOCK(scheduler_release_channel);
  614. done:
  615. tor_free(ch1);
  616. tor_free(ch2);
  617. return;
  618. }
  619. /**
  620. * Check some hopefully-impossible edge cases in the channel queue we
  621. * can only trigger by doing evil things to the queue directly.
  622. */
  623. static void
  624. test_channel_queue_impossible(void *arg)
  625. {
  626. channel_t *ch = NULL;
  627. cell_t *cell = NULL;
  628. packed_cell_t *packed_cell = NULL;
  629. var_cell_t *var_cell = NULL;
  630. int old_count;
  631. cell_queue_entry_t *q = NULL;
  632. uint64_t global_queue_estimate;
  633. /* Cache the global queue size (see below) */
  634. global_queue_estimate = channel_get_global_queue_estimate();
  635. (void)arg;
  636. init_cell_pool();
  637. packed_cell = packed_cell_new();
  638. test_assert(packed_cell);
  639. ch = new_fake_channel();
  640. test_assert(ch);
  641. /* We test queueing here; tell it not to accept cells */
  642. test_chan_accept_cells = 0;
  643. /* ...and keep it from trying to flush the queue */
  644. ch->state = CHANNEL_STATE_MAINT;
  645. /* Cache the cell written count */
  646. old_count = test_cells_written;
  647. /* Assert that the queue is initially empty */
  648. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  649. /* Get a fresh cell and write it to the channel*/
  650. cell = tor_malloc_zero(sizeof(cell_t));
  651. make_fake_cell(cell);
  652. channel_write_cell(ch, cell);
  653. /* Now it should be queued */
  654. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)),1);
  655. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  656. test_assert(q);
  657. if (q) {
  658. test_eq(q->type, CELL_QUEUE_FIXED);
  659. test_eq(q->u.fixed.cell, cell);
  660. }
  661. /* Do perverse things to it */
  662. tor_free(q->u.fixed.cell);
  663. q->u.fixed.cell = NULL;
  664. /*
  665. * Now change back to open with channel_change_state() and assert that it
  666. * gets thrown away properly.
  667. */
  668. test_chan_accept_cells = 1;
  669. channel_change_state(ch, CHANNEL_STATE_OPEN);
  670. test_assert(test_cells_written == old_count);
  671. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  672. /* Same thing but for a var_cell */
  673. test_chan_accept_cells = 0;
  674. ch->state = CHANNEL_STATE_MAINT;
  675. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  676. make_fake_var_cell(var_cell);
  677. channel_write_var_cell(ch, var_cell);
  678. /* Check that it's queued */
  679. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 1);
  680. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  681. test_assert(q);
  682. if (q) {
  683. test_eq(q->type, CELL_QUEUE_VAR);
  684. test_eq(q->u.var.var_cell, var_cell);
  685. }
  686. /* Remove the cell from the queue entry */
  687. tor_free(q->u.var.var_cell);
  688. q->u.var.var_cell = NULL;
  689. /* Let it drain and check that the bad entry is discarded */
  690. test_chan_accept_cells = 1;
  691. channel_change_state(ch, CHANNEL_STATE_OPEN);
  692. test_assert(test_cells_written == old_count);
  693. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  694. /* Same thing with a packed_cell */
  695. test_chan_accept_cells = 0;
  696. ch->state = CHANNEL_STATE_MAINT;
  697. packed_cell = packed_cell_new();
  698. test_assert(packed_cell);
  699. channel_write_packed_cell(ch, packed_cell);
  700. /* Check that it's queued */
  701. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 1);
  702. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  703. test_assert(q);
  704. if (q) {
  705. test_eq(q->type, CELL_QUEUE_PACKED);
  706. test_eq(q->u.packed.packed_cell, packed_cell);
  707. }
  708. /* Remove the cell from the queue entry */
  709. packed_cell_free(q->u.packed.packed_cell);
  710. q->u.packed.packed_cell = NULL;
  711. /* Let it drain and check that the bad entry is discarded */
  712. test_chan_accept_cells = 1;
  713. channel_change_state(ch, CHANNEL_STATE_OPEN);
  714. test_assert(test_cells_written == old_count);
  715. test_eq(chan_cell_queue_len(&(ch->outgoing_queue)), 0);
  716. done:
  717. tor_free(ch);
  718. free_cell_pool();
  719. /*
  720. * Doing that meant that we couldn't correctly adjust the queue size
  721. * for the var cell, so manually reset the global queue size estimate
  722. * so the next test doesn't break if we run with --no-fork.
  723. */
  724. estimated_total_queue_size = global_queue_estimate;
  725. return;
  726. }
  727. static void
  728. test_channel_queue_size(void *arg)
  729. {
  730. channel_t *ch = NULL;
  731. cell_t *cell = NULL;
  732. int n, old_count;
  733. uint64_t global_queue_estimate;
  734. (void)arg;
  735. ch = new_fake_channel();
  736. test_assert(ch);
  737. /* Initial queue size update */
  738. channel_update_xmit_queue_size(ch);
  739. test_eq(ch->bytes_queued_for_xmit, 0);
  740. global_queue_estimate = channel_get_global_queue_estimate();
  741. test_eq(global_queue_estimate, 0);
  742. /* Test the call-through to our fake lower layer */
  743. n = channel_num_cells_writeable(ch);
  744. /* chan_test_num_cells_writeable() always returns 32 */
  745. test_eq(n, 32);
  746. /*
  747. * Now we queue some cells and check that channel_num_cells_writeable()
  748. * adjusts properly
  749. */
  750. /* tell it not to accept cells */
  751. test_chan_accept_cells = 0;
  752. /* ...and keep it from trying to flush the queue */
  753. ch->state = CHANNEL_STATE_MAINT;
  754. /* Get a fresh cell */
  755. cell = tor_malloc_zero(sizeof(cell_t));
  756. make_fake_cell(cell);
  757. old_count = test_cells_written;
  758. channel_write_cell(ch, cell);
  759. /* Assert that it got queued, not written through, correctly */
  760. test_eq(test_cells_written, old_count);
  761. /* Now check chan_test_num_cells_writeable() again */
  762. n = channel_num_cells_writeable(ch);
  763. test_eq(n, 0); /* Should return 0 since we're in CHANNEL_STATE_MAINT */
  764. /* Update queue size estimates */
  765. channel_update_xmit_queue_size(ch);
  766. /* One cell, times an overhead factor of 1.0 */
  767. test_eq(ch->bytes_queued_for_xmit, 512);
  768. /* Try a different overhead factor */
  769. test_overhead_estimate = 0.5f;
  770. /* This one should be ignored since it's below 1.0 */
  771. channel_update_xmit_queue_size(ch);
  772. test_eq(ch->bytes_queued_for_xmit, 512);
  773. /* Now try a larger one */
  774. test_overhead_estimate = 2.0f;
  775. channel_update_xmit_queue_size(ch);
  776. test_eq(ch->bytes_queued_for_xmit, 1024);
  777. /* Go back to 1.0 */
  778. test_overhead_estimate = 1.0f;
  779. channel_update_xmit_queue_size(ch);
  780. test_eq(ch->bytes_queued_for_xmit, 512);
  781. /* Check the global estimate too */
  782. global_queue_estimate = channel_get_global_queue_estimate();
  783. test_eq(global_queue_estimate, 512);
  784. /* Go to open */
  785. old_count = test_cells_written;
  786. channel_change_state(ch, CHANNEL_STATE_OPEN);
  787. /*
  788. * It should try to write, but we aren't accepting cells right now, so
  789. * it'll requeue
  790. */
  791. test_eq(test_cells_written, old_count);
  792. /* Check the queue size again */
  793. channel_update_xmit_queue_size(ch);
  794. test_eq(ch->bytes_queued_for_xmit, 512);
  795. global_queue_estimate = channel_get_global_queue_estimate();
  796. test_eq(global_queue_estimate, 512);
  797. /*
  798. * Now the cell is in the queue, and we're open, so we should get 31
  799. * writeable cells.
  800. */
  801. n = channel_num_cells_writeable(ch);
  802. test_eq(n, 31);
  803. /* Accept cells again */
  804. test_chan_accept_cells = 1;
  805. /* ...and re-process the queue */
  806. old_count = test_cells_written;
  807. channel_flush_cells(ch);
  808. test_eq(test_cells_written, old_count + 1);
  809. /* Should have 32 writeable now */
  810. n = channel_num_cells_writeable(ch);
  811. test_eq(n, 32);
  812. /* Should have queue size estimate of zero */
  813. channel_update_xmit_queue_size(ch);
  814. test_eq(ch->bytes_queued_for_xmit, 0);
  815. global_queue_estimate = channel_get_global_queue_estimate();
  816. test_eq(global_queue_estimate, 0);
  817. /* Okay, now we're done with this one */
  818. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  819. channel_mark_for_close(ch);
  820. UNMOCK(scheduler_release_channel);
  821. done:
  822. tor_free(ch);
  823. return;
  824. }
  825. static void
  826. test_channel_write(void *arg)
  827. {
  828. channel_t *ch = NULL;
  829. cell_t *cell = tor_malloc_zero(sizeof(cell_t));
  830. packed_cell_t *packed_cell = NULL;
  831. var_cell_t *var_cell =
  832. tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  833. int old_count;
  834. (void)arg;
  835. init_cell_pool();
  836. packed_cell = packed_cell_new();
  837. test_assert(packed_cell);
  838. ch = new_fake_channel();
  839. test_assert(ch);
  840. make_fake_cell(cell);
  841. make_fake_var_cell(var_cell);
  842. /* Tell it to accept cells */
  843. test_chan_accept_cells = 1;
  844. old_count = test_cells_written;
  845. channel_write_cell(ch, cell);
  846. test_assert(test_cells_written == old_count + 1);
  847. channel_write_var_cell(ch, var_cell);
  848. test_assert(test_cells_written == old_count + 2);
  849. channel_write_packed_cell(ch, packed_cell);
  850. test_assert(test_cells_written == old_count + 3);
  851. /* Now we test queueing; tell it not to accept cells */
  852. test_chan_accept_cells = 0;
  853. /* ...and keep it from trying to flush the queue */
  854. ch->state = CHANNEL_STATE_MAINT;
  855. /* Get a fresh cell */
  856. cell = tor_malloc_zero(sizeof(cell_t));
  857. make_fake_cell(cell);
  858. old_count = test_cells_written;
  859. channel_write_cell(ch, cell);
  860. test_assert(test_cells_written == old_count);
  861. /*
  862. * Now change back to open with channel_change_state() and assert that it
  863. * gets drained from the queue.
  864. */
  865. test_chan_accept_cells = 1;
  866. channel_change_state(ch, CHANNEL_STATE_OPEN);
  867. test_assert(test_cells_written == old_count + 1);
  868. /*
  869. * Check the note destroy case
  870. */
  871. cell = tor_malloc_zero(sizeof(cell_t));
  872. make_fake_cell(cell);
  873. cell->command = CELL_DESTROY;
  874. /* Set up the mock */
  875. MOCK(channel_note_destroy_not_pending,
  876. channel_note_destroy_not_pending_mock);
  877. old_count = test_destroy_not_pending_calls;
  878. channel_write_cell(ch, cell);
  879. test_assert(test_destroy_not_pending_calls == old_count + 1);
  880. /* Now send a non-destroy and check we don't call it */
  881. cell = tor_malloc_zero(sizeof(cell_t));
  882. make_fake_cell(cell);
  883. channel_write_cell(ch, cell);
  884. test_assert(test_destroy_not_pending_calls == old_count + 1);
  885. UNMOCK(channel_note_destroy_not_pending);
  886. /*
  887. * Now switch it to CLOSING so we can test the discard-cells case
  888. * in the channel_write_*() functions.
  889. */
  890. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  891. channel_mark_for_close(ch);
  892. UNMOCK(scheduler_release_channel);
  893. /* Send cells that will drop in the closing state */
  894. old_count = test_cells_written;
  895. cell = tor_malloc_zero(sizeof(cell_t));
  896. make_fake_cell(cell);
  897. channel_write_cell(ch, cell);
  898. test_assert(test_cells_written == old_count);
  899. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  900. make_fake_var_cell(var_cell);
  901. channel_write_var_cell(ch, var_cell);
  902. test_assert(test_cells_written == old_count);
  903. packed_cell = packed_cell_new();
  904. channel_write_packed_cell(ch, packed_cell);
  905. test_assert(test_cells_written == old_count);
  906. free_cell_pool();
  907. done:
  908. tor_free(ch);
  909. return;
  910. }
  911. struct testcase_t channel_tests[] = {
  912. { "flush", test_channel_flush, TT_FORK, NULL, NULL },
  913. { "lifecycle", test_channel_lifecycle, TT_FORK, NULL, NULL },
  914. { "lifecycle_2", test_channel_lifecycle_2, TT_FORK, NULL, NULL },
  915. { "multi", test_channel_multi, TT_FORK, NULL, NULL },
  916. { "queue_impossible", test_channel_queue_impossible, TT_FORK, NULL, NULL },
  917. { "queue_size", test_channel_queue_size, TT_FORK, NULL, NULL },
  918. { "write", test_channel_write, TT_FORK, NULL, NULL },
  919. END_OF_TESTCASES
  920. };