test_channel.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408
  1. /* Copyright (c) 2013-2017, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #define TOR_CHANNEL_INTERNAL_
  4. #define CHANNEL_PRIVATE_
  5. #include "or.h"
  6. #include "channel.h"
  7. /* For channel_note_destroy_not_pending */
  8. #include "circuitlist.h"
  9. #include "circuitmux.h"
  10. /* For var_cell_free */
  11. #include "connection_or.h"
  12. /* For packed_cell stuff */
  13. #define RELAY_PRIVATE
  14. #include "relay.h"
  15. /* For init/free stuff */
  16. #include "scheduler.h"
  17. /* Test suite stuff */
  18. #include "test.h"
  19. #include "fakechans.h"
  20. static int test_chan_accept_cells = 0;
  21. static int test_chan_fixed_cells_recved = 0;
  22. static cell_t * test_chan_last_seen_fixed_cell_ptr = NULL;
  23. static int test_chan_var_cells_recved = 0;
  24. static var_cell_t * test_chan_last_seen_var_cell_ptr = NULL;
  25. static int test_cells_written = 0;
  26. static int test_destroy_not_pending_calls = 0;
  27. static int test_doesnt_want_writes_count = 0;
  28. static int test_dumpstats_calls = 0;
  29. static int test_has_waiting_cells_count = 0;
  30. static double test_overhead_estimate = 1.0;
  31. static int test_releases_count = 0;
  32. static channel_t *dump_statistics_mock_target = NULL;
  33. static int dump_statistics_mock_matches = 0;
  34. static void chan_test_channel_dump_statistics_mock(
  35. channel_t *chan, int severity);
  36. static void channel_note_destroy_not_pending_mock(channel_t *ch,
  37. circid_t circid);
  38. static void chan_test_cell_handler(channel_t *ch,
  39. cell_t *cell);
  40. static const char * chan_test_describe_transport(channel_t *ch);
  41. static void chan_test_dumpstats(channel_t *ch, int severity);
  42. static void chan_test_var_cell_handler(channel_t *ch,
  43. var_cell_t *var_cell);
  44. static void chan_test_close(channel_t *ch);
  45. static void chan_test_error(channel_t *ch);
  46. static void chan_test_finish_close(channel_t *ch);
  47. static const char * chan_test_get_remote_descr(channel_t *ch, int flags);
  48. static int chan_test_is_canonical(channel_t *ch, int req);
  49. static size_t chan_test_num_bytes_queued(channel_t *ch);
  50. static int chan_test_num_cells_writeable(channel_t *ch);
  51. static int chan_test_write_cell(channel_t *ch, cell_t *cell);
  52. static int chan_test_write_packed_cell(channel_t *ch,
  53. packed_cell_t *packed_cell);
  54. static int chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell);
  55. static void scheduler_channel_doesnt_want_writes_mock(channel_t *ch);
  56. static void test_channel_dumpstats(void *arg);
  57. static void test_channel_incoming(void *arg);
  58. static void test_channel_lifecycle(void *arg);
  59. static void test_channel_multi(void *arg);
  60. static void test_channel_queue_size(void *arg);
  61. static void test_channel_write(void *arg);
  62. static void
  63. channel_note_destroy_not_pending_mock(channel_t *ch,
  64. circid_t circid)
  65. {
  66. (void)ch;
  67. (void)circid;
  68. ++test_destroy_not_pending_calls;
  69. }
  70. static const char *
  71. chan_test_describe_transport(channel_t *ch)
  72. {
  73. tt_ptr_op(ch, OP_NE, NULL);
  74. done:
  75. return "Fake channel for unit tests";
  76. }
  77. /**
  78. * Mock for channel_dump_statistics(); if the channel matches the
  79. * target, bump a counter - otherwise ignore.
  80. */
  81. static void
  82. chan_test_channel_dump_statistics_mock(channel_t *chan, int severity)
  83. {
  84. tt_ptr_op(chan, OP_NE, NULL);
  85. (void)severity;
  86. if (chan != NULL && chan == dump_statistics_mock_target) {
  87. ++dump_statistics_mock_matches;
  88. }
  89. done:
  90. return;
  91. }
  92. /*
  93. * Handle an incoming fixed-size cell for unit tests
  94. */
  95. static void
  96. chan_test_cell_handler(channel_t *ch,
  97. cell_t *cell)
  98. {
  99. tt_assert(ch);
  100. tt_assert(cell);
  101. test_chan_last_seen_fixed_cell_ptr = cell;
  102. ++test_chan_fixed_cells_recved;
  103. done:
  104. return;
  105. }
  106. /*
  107. * Fake transport-specific stats call
  108. */
  109. static void
  110. chan_test_dumpstats(channel_t *ch, int severity)
  111. {
  112. tt_ptr_op(ch, OP_NE, NULL);
  113. (void)severity;
  114. ++test_dumpstats_calls;
  115. done:
  116. return;
  117. }
  118. /*
  119. * Handle an incoming variable-size cell for unit tests
  120. */
  121. static void
  122. chan_test_var_cell_handler(channel_t *ch,
  123. var_cell_t *var_cell)
  124. {
  125. tt_assert(ch);
  126. tt_assert(var_cell);
  127. test_chan_last_seen_var_cell_ptr = var_cell;
  128. ++test_chan_var_cells_recved;
  129. done:
  130. return;
  131. }
  132. static void
  133. chan_test_close(channel_t *ch)
  134. {
  135. tt_assert(ch);
  136. done:
  137. return;
  138. }
  139. /*
  140. * Close a channel through the error path
  141. */
  142. static void
  143. chan_test_error(channel_t *ch)
  144. {
  145. tt_assert(ch);
  146. tt_assert(!(ch->state == CHANNEL_STATE_CLOSING ||
  147. ch->state == CHANNEL_STATE_ERROR ||
  148. ch->state == CHANNEL_STATE_CLOSED));
  149. channel_close_for_error(ch);
  150. done:
  151. return;
  152. }
  153. /*
  154. * Finish closing a channel from CHANNEL_STATE_CLOSING
  155. */
  156. static void
  157. chan_test_finish_close(channel_t *ch)
  158. {
  159. tt_assert(ch);
  160. tt_assert(ch->state == CHANNEL_STATE_CLOSING);
  161. channel_closed(ch);
  162. done:
  163. return;
  164. }
  165. static const char *
  166. chan_test_get_remote_descr(channel_t *ch, int flags)
  167. {
  168. tt_assert(ch);
  169. tt_int_op(flags & ~(GRD_FLAG_ORIGINAL | GRD_FLAG_ADDR_ONLY), OP_EQ, 0);
  170. done:
  171. return "Fake channel for unit tests; no real endpoint";
  172. }
  173. static double
  174. chan_test_get_overhead_estimate(channel_t *ch)
  175. {
  176. tt_assert(ch);
  177. done:
  178. return test_overhead_estimate;
  179. }
  180. static int
  181. chan_test_is_canonical(channel_t *ch, int req)
  182. {
  183. tt_ptr_op(ch, OP_NE, NULL);
  184. tt_assert(req == 0 || req == 1);
  185. done:
  186. /* Fake channels are always canonical */
  187. return 1;
  188. }
  189. static size_t
  190. chan_test_num_bytes_queued(channel_t *ch)
  191. {
  192. tt_assert(ch);
  193. done:
  194. return 0;
  195. }
  196. static int
  197. chan_test_num_cells_writeable(channel_t *ch)
  198. {
  199. tt_assert(ch);
  200. done:
  201. return 32;
  202. }
  203. static int
  204. chan_test_write_cell(channel_t *ch, cell_t *cell)
  205. {
  206. int rv = 0;
  207. tt_assert(ch);
  208. tt_assert(cell);
  209. if (test_chan_accept_cells) {
  210. /* Free the cell and bump the counter */
  211. tor_free(cell);
  212. ++test_cells_written;
  213. rv = 1;
  214. }
  215. /* else return 0, we didn't accept it */
  216. done:
  217. return rv;
  218. }
  219. static int
  220. chan_test_write_packed_cell(channel_t *ch,
  221. packed_cell_t *packed_cell)
  222. {
  223. int rv = 0;
  224. tt_assert(ch);
  225. tt_assert(packed_cell);
  226. if (test_chan_accept_cells) {
  227. /* Free the cell and bump the counter */
  228. packed_cell_free(packed_cell);
  229. ++test_cells_written;
  230. rv = 1;
  231. }
  232. /* else return 0, we didn't accept it */
  233. done:
  234. return rv;
  235. }
  236. static int
  237. chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell)
  238. {
  239. int rv = 0;
  240. tt_assert(ch);
  241. tt_assert(var_cell);
  242. if (test_chan_accept_cells) {
  243. /* Free the cell and bump the counter */
  244. var_cell_free(var_cell);
  245. ++test_cells_written;
  246. rv = 1;
  247. }
  248. /* else return 0, we didn't accept it */
  249. done:
  250. return rv;
  251. }
  252. /**
  253. * Fill out c with a new fake cell for test suite use
  254. */
  255. void
  256. make_fake_cell(cell_t *c)
  257. {
  258. tt_ptr_op(c, OP_NE, NULL);
  259. c->circ_id = 1;
  260. c->command = CELL_RELAY;
  261. memset(c->payload, 0, CELL_PAYLOAD_SIZE);
  262. done:
  263. return;
  264. }
  265. /**
  266. * Fill out c with a new fake var_cell for test suite use
  267. */
  268. void
  269. make_fake_var_cell(var_cell_t *c)
  270. {
  271. tt_ptr_op(c, OP_NE, NULL);
  272. c->circ_id = 1;
  273. c->command = CELL_VERSIONS;
  274. c->payload_len = CELL_PAYLOAD_SIZE / 2;
  275. memset(c->payload, 0, c->payload_len);
  276. done:
  277. return;
  278. }
  279. /**
  280. * Set up a new fake channel for the test suite
  281. */
  282. channel_t *
  283. new_fake_channel(void)
  284. {
  285. channel_t *chan = tor_malloc_zero(sizeof(channel_t));
  286. channel_init(chan);
  287. chan->close = chan_test_close;
  288. chan->get_overhead_estimate = chan_test_get_overhead_estimate;
  289. chan->get_remote_descr = chan_test_get_remote_descr;
  290. chan->num_bytes_queued = chan_test_num_bytes_queued;
  291. chan->num_cells_writeable = chan_test_num_cells_writeable;
  292. chan->write_cell = chan_test_write_cell;
  293. chan->write_packed_cell = chan_test_write_packed_cell;
  294. chan->write_var_cell = chan_test_write_var_cell;
  295. chan->state = CHANNEL_STATE_OPEN;
  296. return chan;
  297. }
  298. void
  299. free_fake_channel(channel_t *chan)
  300. {
  301. if (! chan)
  302. return;
  303. if (chan->cmux)
  304. circuitmux_free(chan->cmux);
  305. tor_free(chan);
  306. }
  307. /**
  308. * Counter query for scheduler_channel_has_waiting_cells_mock()
  309. */
  310. int
  311. get_mock_scheduler_has_waiting_cells_count(void)
  312. {
  313. return test_has_waiting_cells_count;
  314. }
  315. /**
  316. * Mock for scheduler_channel_has_waiting_cells()
  317. */
  318. void
  319. scheduler_channel_has_waiting_cells_mock(channel_t *ch)
  320. {
  321. (void)ch;
  322. /* Increment counter */
  323. ++test_has_waiting_cells_count;
  324. return;
  325. }
  326. static void
  327. scheduler_channel_doesnt_want_writes_mock(channel_t *ch)
  328. {
  329. (void)ch;
  330. /* Increment counter */
  331. ++test_doesnt_want_writes_count;
  332. return;
  333. }
  334. /**
  335. * Counter query for scheduler_release_channel_mock()
  336. */
  337. int
  338. get_mock_scheduler_release_channel_count(void)
  339. {
  340. return test_releases_count;
  341. }
  342. /**
  343. * Mock for scheduler_release_channel()
  344. */
  345. void
  346. scheduler_release_channel_mock(channel_t *ch)
  347. {
  348. (void)ch;
  349. /* Increment counter */
  350. ++test_releases_count;
  351. return;
  352. }
  353. /**
  354. * Test for channel_dumpstats() and limited test for
  355. * channel_dump_statistics()
  356. */
  357. static void
  358. test_channel_dumpstats(void *arg)
  359. {
  360. channel_t *ch = NULL;
  361. cell_t *cell = NULL;
  362. int old_count;
  363. (void)arg;
  364. /* Mock these for duration of the test */
  365. MOCK(scheduler_channel_doesnt_want_writes,
  366. scheduler_channel_doesnt_want_writes_mock);
  367. MOCK(scheduler_release_channel,
  368. scheduler_release_channel_mock);
  369. /* Set up a new fake channel */
  370. ch = new_fake_channel();
  371. tt_assert(ch);
  372. ch->cmux = circuitmux_alloc();
  373. /* Try to register it */
  374. channel_register(ch);
  375. tt_assert(ch->registered);
  376. /* Set up mock */
  377. dump_statistics_mock_target = ch;
  378. dump_statistics_mock_matches = 0;
  379. MOCK(channel_dump_statistics,
  380. chan_test_channel_dump_statistics_mock);
  381. /* Call channel_dumpstats() */
  382. channel_dumpstats(LOG_DEBUG);
  383. /* Assert that we hit the mock */
  384. tt_int_op(dump_statistics_mock_matches, OP_EQ, 1);
  385. /* Close the channel */
  386. channel_mark_for_close(ch);
  387. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  388. chan_test_finish_close(ch);
  389. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  390. /* Try again and hit the finished channel */
  391. channel_dumpstats(LOG_DEBUG);
  392. tt_int_op(dump_statistics_mock_matches, OP_EQ, 2);
  393. channel_run_cleanup();
  394. ch = NULL;
  395. /* Now we should hit nothing */
  396. channel_dumpstats(LOG_DEBUG);
  397. tt_int_op(dump_statistics_mock_matches, OP_EQ, 2);
  398. /* Unmock */
  399. UNMOCK(channel_dump_statistics);
  400. dump_statistics_mock_target = NULL;
  401. dump_statistics_mock_matches = 0;
  402. /* Now make another channel */
  403. ch = new_fake_channel();
  404. tt_assert(ch);
  405. ch->cmux = circuitmux_alloc();
  406. channel_register(ch);
  407. tt_assert(ch->registered);
  408. /* Lie about its age so dumpstats gets coverage for rate calculations */
  409. ch->timestamp_created = time(NULL) - 30;
  410. tt_assert(ch->timestamp_created > 0);
  411. tt_assert(time(NULL) > ch->timestamp_created);
  412. /* Put cells through it both ways to make the counters non-zero */
  413. cell = tor_malloc_zero(sizeof(*cell));
  414. make_fake_cell(cell);
  415. test_chan_accept_cells = 1;
  416. old_count = test_cells_written;
  417. channel_write_cell(ch, cell);
  418. cell = NULL;
  419. tt_int_op(test_cells_written, OP_EQ, old_count + 1);
  420. tt_assert(ch->n_bytes_xmitted > 0);
  421. tt_assert(ch->n_cells_xmitted > 0);
  422. /* Receive path */
  423. channel_set_cell_handlers(ch,
  424. chan_test_cell_handler,
  425. chan_test_var_cell_handler);
  426. tt_ptr_op(channel_get_cell_handler(ch), OP_EQ, chan_test_cell_handler);
  427. tt_ptr_op(channel_get_var_cell_handler(ch), OP_EQ,
  428. chan_test_var_cell_handler);
  429. cell = tor_malloc_zero(sizeof(cell_t));
  430. make_fake_cell(cell);
  431. old_count = test_chan_fixed_cells_recved;
  432. tor_free(cell);
  433. tt_int_op(test_chan_fixed_cells_recved, OP_EQ, old_count + 1);
  434. tt_assert(ch->n_bytes_recved > 0);
  435. tt_assert(ch->n_cells_recved > 0);
  436. /* Test channel_dump_statistics */
  437. ch->describe_transport = chan_test_describe_transport;
  438. ch->dumpstats = chan_test_dumpstats;
  439. ch->is_canonical = chan_test_is_canonical;
  440. old_count = test_dumpstats_calls;
  441. channel_dump_statistics(ch, LOG_DEBUG);
  442. tt_int_op(test_dumpstats_calls, OP_EQ, old_count + 1);
  443. /* Close the channel */
  444. channel_mark_for_close(ch);
  445. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  446. chan_test_finish_close(ch);
  447. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  448. channel_run_cleanup();
  449. ch = NULL;
  450. done:
  451. tor_free(cell);
  452. free_fake_channel(ch);
  453. UNMOCK(scheduler_channel_doesnt_want_writes);
  454. UNMOCK(scheduler_release_channel);
  455. return;
  456. }
  457. static void
  458. test_channel_incoming(void *arg)
  459. {
  460. channel_t *ch = NULL;
  461. cell_t *cell = NULL;
  462. var_cell_t *var_cell = NULL;
  463. int old_count;
  464. (void)arg;
  465. /* Mock these for duration of the test */
  466. MOCK(scheduler_channel_doesnt_want_writes,
  467. scheduler_channel_doesnt_want_writes_mock);
  468. MOCK(scheduler_release_channel,
  469. scheduler_release_channel_mock);
  470. /* Accept cells to lower layer */
  471. test_chan_accept_cells = 1;
  472. /* Use default overhead factor */
  473. test_overhead_estimate = 1.0;
  474. ch = new_fake_channel();
  475. tt_assert(ch);
  476. /* Start it off in OPENING */
  477. ch->state = CHANNEL_STATE_OPENING;
  478. /* We'll need a cmux */
  479. ch->cmux = circuitmux_alloc();
  480. /* Install incoming cell handlers */
  481. channel_set_cell_handlers(ch,
  482. chan_test_cell_handler,
  483. chan_test_var_cell_handler);
  484. /* Test cell handler getters */
  485. tt_ptr_op(channel_get_cell_handler(ch), OP_EQ, chan_test_cell_handler);
  486. tt_ptr_op(channel_get_var_cell_handler(ch), OP_EQ,
  487. chan_test_var_cell_handler);
  488. /* Try to register it */
  489. channel_register(ch);
  490. tt_assert(ch->registered);
  491. /* Open it */
  492. channel_change_state_open(ch);
  493. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  494. /* Receive a fixed cell */
  495. cell = tor_malloc_zero(sizeof(cell_t));
  496. make_fake_cell(cell);
  497. old_count = test_chan_fixed_cells_recved;
  498. tor_free(cell);
  499. tt_int_op(test_chan_fixed_cells_recved, OP_EQ, old_count + 1);
  500. /* Receive a variable-size cell */
  501. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  502. make_fake_var_cell(var_cell);
  503. old_count = test_chan_var_cells_recved;
  504. tor_free(cell);
  505. tt_int_op(test_chan_var_cells_recved, OP_EQ, old_count + 1);
  506. /* Close it */
  507. channel_mark_for_close(ch);
  508. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  509. chan_test_finish_close(ch);
  510. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  511. channel_run_cleanup();
  512. ch = NULL;
  513. done:
  514. free_fake_channel(ch);
  515. tor_free(cell);
  516. tor_free(var_cell);
  517. UNMOCK(scheduler_channel_doesnt_want_writes);
  518. UNMOCK(scheduler_release_channel);
  519. return;
  520. }
  521. /**
  522. * Normal channel lifecycle test:
  523. *
  524. * OPENING->OPEN->MAINT->OPEN->CLOSING->CLOSED
  525. */
  526. static void
  527. test_channel_lifecycle(void *arg)
  528. {
  529. channel_t *ch1 = NULL, *ch2 = NULL;
  530. cell_t *cell = NULL;
  531. int old_count, init_doesnt_want_writes_count;
  532. int init_releases_count;
  533. (void)arg;
  534. /* Mock these for the whole lifecycle test */
  535. MOCK(scheduler_channel_doesnt_want_writes,
  536. scheduler_channel_doesnt_want_writes_mock);
  537. MOCK(scheduler_release_channel,
  538. scheduler_release_channel_mock);
  539. /* Cache some initial counter values */
  540. init_doesnt_want_writes_count = test_doesnt_want_writes_count;
  541. init_releases_count = test_releases_count;
  542. /* Accept cells to lower layer */
  543. test_chan_accept_cells = 1;
  544. /* Use default overhead factor */
  545. test_overhead_estimate = 1.0;
  546. ch1 = new_fake_channel();
  547. tt_assert(ch1);
  548. /* Start it off in OPENING */
  549. ch1->state = CHANNEL_STATE_OPENING;
  550. /* We'll need a cmux */
  551. ch1->cmux = circuitmux_alloc();
  552. /* Try to register it */
  553. channel_register(ch1);
  554. tt_assert(ch1->registered);
  555. /* Try to write a cell through (should queue) */
  556. cell = tor_malloc_zero(sizeof(cell_t));
  557. make_fake_cell(cell);
  558. old_count = test_cells_written;
  559. channel_write_cell(ch1, cell);
  560. tt_int_op(old_count, OP_EQ, test_cells_written);
  561. /* Move it to OPEN and flush */
  562. channel_change_state_open(ch1);
  563. /* Queue should drain */
  564. tt_int_op(old_count + 1, OP_EQ, test_cells_written);
  565. /* Get another one */
  566. ch2 = new_fake_channel();
  567. tt_assert(ch2);
  568. ch2->state = CHANNEL_STATE_OPENING;
  569. ch2->cmux = circuitmux_alloc();
  570. /* Register */
  571. channel_register(ch2);
  572. tt_assert(ch2->registered);
  573. /* Check counters */
  574. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  575. init_doesnt_want_writes_count);
  576. tt_int_op(test_releases_count, OP_EQ, init_releases_count);
  577. /* Move ch1 to MAINT */
  578. channel_change_state(ch1, CHANNEL_STATE_MAINT);
  579. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  580. init_doesnt_want_writes_count + 1);
  581. tt_int_op(test_releases_count, OP_EQ, init_releases_count);
  582. /* Move ch2 to OPEN */
  583. channel_change_state_open(ch2);
  584. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  585. init_doesnt_want_writes_count + 1);
  586. tt_int_op(test_releases_count, OP_EQ, init_releases_count);
  587. /* Move ch1 back to OPEN */
  588. channel_change_state_open(ch1);
  589. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  590. init_doesnt_want_writes_count + 1);
  591. tt_int_op(test_releases_count, OP_EQ, init_releases_count);
  592. /* Mark ch2 for close */
  593. channel_mark_for_close(ch2);
  594. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_CLOSING);
  595. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  596. init_doesnt_want_writes_count + 1);
  597. tt_int_op(test_releases_count, OP_EQ, init_releases_count + 1);
  598. /* Shut down channels */
  599. channel_free_all();
  600. ch1 = ch2 = NULL;
  601. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  602. init_doesnt_want_writes_count + 1);
  603. /* channel_free() calls scheduler_release_channel() */
  604. tt_int_op(test_releases_count, OP_EQ, init_releases_count + 4);
  605. done:
  606. free_fake_channel(ch1);
  607. free_fake_channel(ch2);
  608. UNMOCK(scheduler_channel_doesnt_want_writes);
  609. UNMOCK(scheduler_release_channel);
  610. return;
  611. }
  612. /**
  613. * Weird channel lifecycle test:
  614. *
  615. * OPENING->CLOSING->CLOSED
  616. * OPENING->OPEN->CLOSING->ERROR
  617. * OPENING->OPEN->MAINT->CLOSING->CLOSED
  618. * OPENING->OPEN->MAINT->CLOSING->ERROR
  619. */
  620. static void
  621. test_channel_lifecycle_2(void *arg)
  622. {
  623. channel_t *ch = NULL;
  624. (void)arg;
  625. /* Mock these for the whole lifecycle test */
  626. MOCK(scheduler_channel_doesnt_want_writes,
  627. scheduler_channel_doesnt_want_writes_mock);
  628. MOCK(scheduler_release_channel,
  629. scheduler_release_channel_mock);
  630. /* Accept cells to lower layer */
  631. test_chan_accept_cells = 1;
  632. /* Use default overhead factor */
  633. test_overhead_estimate = 1.0;
  634. ch = new_fake_channel();
  635. tt_assert(ch);
  636. /* Start it off in OPENING */
  637. ch->state = CHANNEL_STATE_OPENING;
  638. /* The full lifecycle test needs a cmux */
  639. ch->cmux = circuitmux_alloc();
  640. /* Try to register it */
  641. channel_register(ch);
  642. tt_assert(ch->registered);
  643. /* Try to close it */
  644. channel_mark_for_close(ch);
  645. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  646. /* Finish closing it */
  647. chan_test_finish_close(ch);
  648. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  649. channel_run_cleanup();
  650. ch = NULL;
  651. /* Now try OPENING->OPEN->CLOSING->ERROR */
  652. ch = new_fake_channel();
  653. tt_assert(ch);
  654. ch->state = CHANNEL_STATE_OPENING;
  655. ch->cmux = circuitmux_alloc();
  656. channel_register(ch);
  657. tt_assert(ch->registered);
  658. /* Finish opening it */
  659. channel_change_state_open(ch);
  660. /* Error exit from lower layer */
  661. chan_test_error(ch);
  662. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  663. chan_test_finish_close(ch);
  664. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_ERROR);
  665. channel_run_cleanup();
  666. ch = NULL;
  667. /* OPENING->OPEN->MAINT->CLOSING->CLOSED close from maintenance state */
  668. ch = new_fake_channel();
  669. tt_assert(ch);
  670. ch->state = CHANNEL_STATE_OPENING;
  671. ch->cmux = circuitmux_alloc();
  672. channel_register(ch);
  673. tt_assert(ch->registered);
  674. /* Finish opening it */
  675. channel_change_state_open(ch);
  676. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  677. /* Go to maintenance state */
  678. channel_change_state(ch, CHANNEL_STATE_MAINT);
  679. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_MAINT);
  680. /* Lower layer close */
  681. channel_mark_for_close(ch);
  682. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  683. /* Finish */
  684. chan_test_finish_close(ch);
  685. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  686. channel_run_cleanup();
  687. ch = NULL;
  688. /*
  689. * OPENING->OPEN->MAINT->CLOSING->CLOSED lower-layer close during
  690. * maintenance state
  691. */
  692. ch = new_fake_channel();
  693. tt_assert(ch);
  694. ch->state = CHANNEL_STATE_OPENING;
  695. ch->cmux = circuitmux_alloc();
  696. channel_register(ch);
  697. tt_assert(ch->registered);
  698. /* Finish opening it */
  699. channel_change_state_open(ch);
  700. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  701. /* Go to maintenance state */
  702. channel_change_state(ch, CHANNEL_STATE_MAINT);
  703. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_MAINT);
  704. /* Lower layer close */
  705. channel_close_from_lower_layer(ch);
  706. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  707. /* Finish */
  708. chan_test_finish_close(ch);
  709. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  710. channel_run_cleanup();
  711. ch = NULL;
  712. /* OPENING->OPEN->MAINT->CLOSING->ERROR */
  713. ch = new_fake_channel();
  714. tt_assert(ch);
  715. ch->state = CHANNEL_STATE_OPENING;
  716. ch->cmux = circuitmux_alloc();
  717. channel_register(ch);
  718. tt_assert(ch->registered);
  719. /* Finish opening it */
  720. channel_change_state_open(ch);
  721. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  722. /* Go to maintenance state */
  723. channel_change_state(ch, CHANNEL_STATE_MAINT);
  724. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_MAINT);
  725. /* Lower layer close */
  726. chan_test_error(ch);
  727. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  728. /* Finish */
  729. chan_test_finish_close(ch);
  730. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_ERROR);
  731. channel_run_cleanup();
  732. ch = NULL;
  733. /* Shut down channels */
  734. channel_free_all();
  735. done:
  736. tor_free(ch);
  737. UNMOCK(scheduler_channel_doesnt_want_writes);
  738. UNMOCK(scheduler_release_channel);
  739. return;
  740. }
  741. static void
  742. test_channel_multi(void *arg)
  743. {
  744. channel_t *ch1 = NULL, *ch2 = NULL;
  745. uint64_t global_queue_estimate;
  746. cell_t *cell = NULL;
  747. (void)arg;
  748. /* Accept cells to lower layer */
  749. test_chan_accept_cells = 1;
  750. /* Use default overhead factor */
  751. test_overhead_estimate = 1.0;
  752. ch1 = new_fake_channel();
  753. tt_assert(ch1);
  754. ch2 = new_fake_channel();
  755. tt_assert(ch2);
  756. /* Initial queue size update */
  757. channel_update_xmit_queue_size(ch1);
  758. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 0);
  759. channel_update_xmit_queue_size(ch2);
  760. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 0);
  761. global_queue_estimate = channel_get_global_queue_estimate();
  762. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  763. /* Queue some cells, check queue estimates */
  764. cell = tor_malloc_zero(sizeof(cell_t));
  765. make_fake_cell(cell);
  766. channel_write_cell(ch1, cell);
  767. cell = tor_malloc_zero(sizeof(cell_t));
  768. make_fake_cell(cell);
  769. channel_write_cell(ch2, cell);
  770. channel_update_xmit_queue_size(ch1);
  771. channel_update_xmit_queue_size(ch2);
  772. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 0);
  773. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 0);
  774. global_queue_estimate = channel_get_global_queue_estimate();
  775. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  776. /* Stop accepting cells at lower layer */
  777. test_chan_accept_cells = 0;
  778. /* Queue some cells and check queue estimates */
  779. cell = tor_malloc_zero(sizeof(cell_t));
  780. make_fake_cell(cell);
  781. channel_write_cell(ch1, cell);
  782. channel_update_xmit_queue_size(ch1);
  783. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 512);
  784. global_queue_estimate = channel_get_global_queue_estimate();
  785. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  786. cell = tor_malloc_zero(sizeof(cell_t));
  787. make_fake_cell(cell);
  788. channel_write_cell(ch2, cell);
  789. channel_update_xmit_queue_size(ch2);
  790. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 512);
  791. global_queue_estimate = channel_get_global_queue_estimate();
  792. tt_u64_op(global_queue_estimate, OP_EQ, 1024);
  793. /* Allow cells through again */
  794. test_chan_accept_cells = 1;
  795. /* Update and check queue sizes */
  796. channel_update_xmit_queue_size(ch1);
  797. channel_update_xmit_queue_size(ch2);
  798. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 512);
  799. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 0);
  800. global_queue_estimate = channel_get_global_queue_estimate();
  801. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  802. /* Update and check queue sizes */
  803. channel_update_xmit_queue_size(ch1);
  804. channel_update_xmit_queue_size(ch2);
  805. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 0);
  806. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 0);
  807. global_queue_estimate = channel_get_global_queue_estimate();
  808. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  809. /* Now block again */
  810. test_chan_accept_cells = 0;
  811. /* Queue some cells */
  812. cell = tor_malloc_zero(sizeof(cell_t));
  813. make_fake_cell(cell);
  814. channel_write_cell(ch1, cell);
  815. cell = tor_malloc_zero(sizeof(cell_t));
  816. make_fake_cell(cell);
  817. channel_write_cell(ch2, cell);
  818. cell = NULL;
  819. /* Check the estimates */
  820. channel_update_xmit_queue_size(ch1);
  821. channel_update_xmit_queue_size(ch2);
  822. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 512);
  823. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 512);
  824. global_queue_estimate = channel_get_global_queue_estimate();
  825. tt_u64_op(global_queue_estimate, OP_EQ, 1024);
  826. /* Now close channel 2; it should be subtracted from the global queue */
  827. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  828. channel_mark_for_close(ch2);
  829. UNMOCK(scheduler_release_channel);
  830. global_queue_estimate = channel_get_global_queue_estimate();
  831. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  832. /*
  833. * Since the fake channels aren't registered, channel_free_all() can't
  834. * see them properly.
  835. */
  836. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  837. channel_mark_for_close(ch1);
  838. UNMOCK(scheduler_release_channel);
  839. global_queue_estimate = channel_get_global_queue_estimate();
  840. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  841. /* Now free everything */
  842. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  843. channel_free_all();
  844. UNMOCK(scheduler_release_channel);
  845. done:
  846. free_fake_channel(ch1);
  847. free_fake_channel(ch2);
  848. return;
  849. }
  850. static void
  851. test_channel_queue_size(void *arg)
  852. {
  853. channel_t *ch = NULL;
  854. cell_t *cell = NULL;
  855. int n, old_count;
  856. uint64_t global_queue_estimate;
  857. (void)arg;
  858. ch = new_fake_channel();
  859. tt_assert(ch);
  860. /* Initial queue size update */
  861. channel_update_xmit_queue_size(ch);
  862. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 0);
  863. global_queue_estimate = channel_get_global_queue_estimate();
  864. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  865. /* Test the call-through to our fake lower layer */
  866. n = channel_num_cells_writeable(ch);
  867. /* chan_test_num_cells_writeable() always returns 32 */
  868. tt_int_op(n, OP_EQ, 32);
  869. /*
  870. * Now we queue some cells and check that channel_num_cells_writeable()
  871. * adjusts properly
  872. */
  873. /* tell it not to accept cells */
  874. test_chan_accept_cells = 0;
  875. /* ...and keep it from trying to flush the queue */
  876. ch->state = CHANNEL_STATE_MAINT;
  877. /* Get a fresh cell */
  878. cell = tor_malloc_zero(sizeof(cell_t));
  879. make_fake_cell(cell);
  880. old_count = test_cells_written;
  881. channel_write_cell(ch, cell);
  882. /* Assert that it got queued, not written through, correctly */
  883. tt_int_op(test_cells_written, OP_EQ, old_count);
  884. /* Now check chan_test_num_cells_writeable() again */
  885. n = channel_num_cells_writeable(ch);
  886. /* Should return 0 since we're in CHANNEL_STATE_MAINT */
  887. tt_int_op(n, OP_EQ, 0);
  888. /* Update queue size estimates */
  889. channel_update_xmit_queue_size(ch);
  890. /* One cell, times an overhead factor of 1.0 */
  891. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 512);
  892. /* Try a different overhead factor */
  893. test_overhead_estimate = 0.5;
  894. /* This one should be ignored since it's below 1.0 */
  895. channel_update_xmit_queue_size(ch);
  896. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 512);
  897. /* Now try a larger one */
  898. test_overhead_estimate = 2.0;
  899. channel_update_xmit_queue_size(ch);
  900. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 1024);
  901. /* Go back to 1.0 */
  902. test_overhead_estimate = 1.0;
  903. channel_update_xmit_queue_size(ch);
  904. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 512);
  905. /* Check the global estimate too */
  906. global_queue_estimate = channel_get_global_queue_estimate();
  907. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  908. /* Go to open */
  909. old_count = test_cells_written;
  910. channel_change_state_open(ch);
  911. /*
  912. * It should try to write, but we aren't accepting cells right now, so
  913. * it'll requeue
  914. */
  915. tt_int_op(test_cells_written, OP_EQ, old_count);
  916. /* Check the queue size again */
  917. channel_update_xmit_queue_size(ch);
  918. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 512);
  919. global_queue_estimate = channel_get_global_queue_estimate();
  920. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  921. /*
  922. * Now the cell is in the queue, and we're open, so we should get 31
  923. * writeable cells.
  924. */
  925. n = channel_num_cells_writeable(ch);
  926. tt_int_op(n, OP_EQ, 31);
  927. /* Accept cells again */
  928. test_chan_accept_cells = 1;
  929. /* ...and re-process the queue */
  930. old_count = test_cells_written;
  931. tt_int_op(test_cells_written, OP_EQ, old_count + 1);
  932. /* Should have 32 writeable now */
  933. n = channel_num_cells_writeable(ch);
  934. tt_int_op(n, OP_EQ, 32);
  935. /* Should have queue size estimate of zero */
  936. channel_update_xmit_queue_size(ch);
  937. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 0);
  938. global_queue_estimate = channel_get_global_queue_estimate();
  939. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  940. /* Okay, now we're done with this one */
  941. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  942. channel_mark_for_close(ch);
  943. UNMOCK(scheduler_release_channel);
  944. done:
  945. free_fake_channel(ch);
  946. return;
  947. }
  948. static void
  949. test_channel_write(void *arg)
  950. {
  951. channel_t *ch = NULL;
  952. cell_t *cell = tor_malloc_zero(sizeof(cell_t));
  953. packed_cell_t *packed_cell = NULL;
  954. var_cell_t *var_cell =
  955. tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  956. int old_count;
  957. (void)arg;
  958. packed_cell = packed_cell_new();
  959. tt_assert(packed_cell);
  960. ch = new_fake_channel();
  961. tt_assert(ch);
  962. make_fake_cell(cell);
  963. make_fake_var_cell(var_cell);
  964. /* Tell it to accept cells */
  965. test_chan_accept_cells = 1;
  966. old_count = test_cells_written;
  967. channel_write_cell(ch, cell);
  968. cell = NULL;
  969. tt_assert(test_cells_written == old_count + 1);
  970. channel_write_var_cell(ch, var_cell);
  971. var_cell = NULL;
  972. tt_assert(test_cells_written == old_count + 2);
  973. channel_write_packed_cell(ch, packed_cell);
  974. packed_cell = NULL;
  975. tt_assert(test_cells_written == old_count + 3);
  976. /* Now we test queueing; tell it not to accept cells */
  977. test_chan_accept_cells = 0;
  978. /* ...and keep it from trying to flush the queue */
  979. ch->state = CHANNEL_STATE_MAINT;
  980. /* Get a fresh cell */
  981. cell = tor_malloc_zero(sizeof(cell_t));
  982. make_fake_cell(cell);
  983. old_count = test_cells_written;
  984. channel_write_cell(ch, cell);
  985. tt_assert(test_cells_written == old_count);
  986. /*
  987. * Now change back to open with channel_change_state() and assert that it
  988. * gets drained from the queue.
  989. */
  990. test_chan_accept_cells = 1;
  991. channel_change_state_open(ch);
  992. tt_assert(test_cells_written == old_count + 1);
  993. /*
  994. * Check the note destroy case
  995. */
  996. cell = tor_malloc_zero(sizeof(cell_t));
  997. make_fake_cell(cell);
  998. cell->command = CELL_DESTROY;
  999. /* Set up the mock */
  1000. MOCK(channel_note_destroy_not_pending,
  1001. channel_note_destroy_not_pending_mock);
  1002. old_count = test_destroy_not_pending_calls;
  1003. channel_write_cell(ch, cell);
  1004. tt_assert(test_destroy_not_pending_calls == old_count + 1);
  1005. /* Now send a non-destroy and check we don't call it */
  1006. cell = tor_malloc_zero(sizeof(cell_t));
  1007. make_fake_cell(cell);
  1008. channel_write_cell(ch, cell);
  1009. tt_assert(test_destroy_not_pending_calls == old_count + 1);
  1010. UNMOCK(channel_note_destroy_not_pending);
  1011. /*
  1012. * Now switch it to CLOSING so we can test the discard-cells case
  1013. * in the channel_write_*() functions.
  1014. */
  1015. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  1016. channel_mark_for_close(ch);
  1017. UNMOCK(scheduler_release_channel);
  1018. /* Send cells that will drop in the closing state */
  1019. old_count = test_cells_written;
  1020. cell = tor_malloc_zero(sizeof(cell_t));
  1021. make_fake_cell(cell);
  1022. channel_write_cell(ch, cell);
  1023. cell = NULL;
  1024. tt_assert(test_cells_written == old_count);
  1025. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  1026. make_fake_var_cell(var_cell);
  1027. channel_write_var_cell(ch, var_cell);
  1028. var_cell = NULL;
  1029. tt_assert(test_cells_written == old_count);
  1030. packed_cell = packed_cell_new();
  1031. channel_write_packed_cell(ch, packed_cell);
  1032. packed_cell = NULL;
  1033. tt_assert(test_cells_written == old_count);
  1034. done:
  1035. free_fake_channel(ch);
  1036. tor_free(var_cell);
  1037. tor_free(cell);
  1038. packed_cell_free(packed_cell);
  1039. return;
  1040. }
  1041. static void
  1042. test_channel_id_map(void *arg)
  1043. {
  1044. (void)arg;
  1045. #define N_CHAN 6
  1046. char rsa_id[N_CHAN][DIGEST_LEN];
  1047. ed25519_public_key_t *ed_id[N_CHAN];
  1048. channel_t *chan[N_CHAN];
  1049. int i;
  1050. ed25519_public_key_t ed_zero;
  1051. memset(&ed_zero, 0, sizeof(ed_zero));
  1052. tt_int_op(DIGEST_LEN, OP_EQ, sizeof(rsa_id[0])); // Do I remember C?
  1053. for (i = 0; i < N_CHAN; ++i) {
  1054. crypto_rand(rsa_id[i], DIGEST_LEN);
  1055. ed_id[i] = tor_malloc_zero(sizeof(*ed_id[i]));
  1056. crypto_rand((char*)ed_id[i]->pubkey, sizeof(ed_id[i]->pubkey));
  1057. }
  1058. /* For channel 3, have no Ed identity. */
  1059. tor_free(ed_id[3]);
  1060. /* Channel 2 and 4 have same ROSA identity */
  1061. memcpy(rsa_id[4], rsa_id[2], DIGEST_LEN);
  1062. /* Channel 2 and 4 and 5 have same RSA identity */
  1063. memcpy(rsa_id[4], rsa_id[2], DIGEST_LEN);
  1064. memcpy(rsa_id[5], rsa_id[2], DIGEST_LEN);
  1065. /* Channels 2 and 5 have same Ed25519 identity */
  1066. memcpy(ed_id[5], ed_id[2], sizeof(*ed_id[2]));
  1067. for (i = 0; i < N_CHAN; ++i) {
  1068. chan[i] = new_fake_channel();
  1069. channel_register(chan[i]);
  1070. channel_set_identity_digest(chan[i], rsa_id[i], ed_id[i]);
  1071. }
  1072. /* Lookup by RSA id only */
  1073. tt_ptr_op(chan[0], OP_EQ,
  1074. channel_find_by_remote_identity(rsa_id[0], NULL));
  1075. tt_ptr_op(chan[1], OP_EQ,
  1076. channel_find_by_remote_identity(rsa_id[1], NULL));
  1077. tt_ptr_op(chan[3], OP_EQ,
  1078. channel_find_by_remote_identity(rsa_id[3], NULL));
  1079. channel_t *ch;
  1080. ch = channel_find_by_remote_identity(rsa_id[2], NULL);
  1081. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1082. ch = channel_next_with_rsa_identity(ch);
  1083. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1084. ch = channel_next_with_rsa_identity(ch);
  1085. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1086. ch = channel_next_with_rsa_identity(ch);
  1087. tt_ptr_op(ch, OP_EQ, NULL);
  1088. /* As above, but with zero Ed25519 ID (meaning "any ID") */
  1089. tt_ptr_op(chan[0], OP_EQ,
  1090. channel_find_by_remote_identity(rsa_id[0], &ed_zero));
  1091. tt_ptr_op(chan[1], OP_EQ,
  1092. channel_find_by_remote_identity(rsa_id[1], &ed_zero));
  1093. tt_ptr_op(chan[3], OP_EQ,
  1094. channel_find_by_remote_identity(rsa_id[3], &ed_zero));
  1095. ch = channel_find_by_remote_identity(rsa_id[2], &ed_zero);
  1096. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1097. ch = channel_next_with_rsa_identity(ch);
  1098. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1099. ch = channel_next_with_rsa_identity(ch);
  1100. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1101. ch = channel_next_with_rsa_identity(ch);
  1102. tt_ptr_op(ch, OP_EQ, NULL);
  1103. /* Lookup nonexistent RSA identity */
  1104. tt_ptr_op(NULL, OP_EQ,
  1105. channel_find_by_remote_identity("!!!!!!!!!!!!!!!!!!!!", NULL));
  1106. /* Look up by full identity pair */
  1107. tt_ptr_op(chan[0], OP_EQ,
  1108. channel_find_by_remote_identity(rsa_id[0], ed_id[0]));
  1109. tt_ptr_op(chan[1], OP_EQ,
  1110. channel_find_by_remote_identity(rsa_id[1], ed_id[1]));
  1111. tt_ptr_op(chan[3], OP_EQ,
  1112. channel_find_by_remote_identity(rsa_id[3], ed_id[3] /*NULL*/));
  1113. tt_ptr_op(chan[4], OP_EQ,
  1114. channel_find_by_remote_identity(rsa_id[4], ed_id[4]));
  1115. ch = channel_find_by_remote_identity(rsa_id[2], ed_id[2]);
  1116. tt_assert(ch == chan[2] || ch == chan[5]);
  1117. /* Look up RSA identity with wrong ed25519 identity */
  1118. tt_ptr_op(NULL, OP_EQ,
  1119. channel_find_by_remote_identity(rsa_id[4], ed_id[0]));
  1120. tt_ptr_op(NULL, OP_EQ,
  1121. channel_find_by_remote_identity(rsa_id[2], ed_id[1]));
  1122. tt_ptr_op(NULL, OP_EQ,
  1123. channel_find_by_remote_identity(rsa_id[3], ed_id[1]));
  1124. done:
  1125. for (i = 0; i < N_CHAN; ++i) {
  1126. channel_clear_identity_digest(chan[i]);
  1127. channel_unregister(chan[i]);
  1128. free_fake_channel(chan[i]);
  1129. tor_free(ed_id[i]);
  1130. }
  1131. #undef N_CHAN
  1132. }
  1133. struct testcase_t channel_tests[] = {
  1134. { "dumpstats", test_channel_dumpstats, TT_FORK, NULL, NULL },
  1135. { "incoming", test_channel_incoming, TT_FORK, NULL, NULL },
  1136. { "lifecycle", test_channel_lifecycle, TT_FORK, NULL, NULL },
  1137. { "lifecycle_2", test_channel_lifecycle_2, TT_FORK, NULL, NULL },
  1138. { "multi", test_channel_multi, TT_FORK, NULL, NULL },
  1139. { "queue_size", test_channel_queue_size, TT_FORK, NULL, NULL },
  1140. { "write", test_channel_write, TT_FORK, NULL, NULL },
  1141. { "id_map", test_channel_id_map, TT_FORK, NULL, NULL },
  1142. END_OF_TESTCASES
  1143. };