test_channel.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897
  1. /* Copyright (c) 2013-2017, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. #define TOR_CHANNEL_INTERNAL_
  4. #define CHANNEL_PRIVATE_
  5. #include "or.h"
  6. #include "channel.h"
  7. /* For channel_note_destroy_not_pending */
  8. #include "circuitlist.h"
  9. #include "circuitmux.h"
  10. /* For var_cell_free */
  11. #include "connection_or.h"
  12. /* For packed_cell stuff */
  13. #define RELAY_PRIVATE
  14. #include "relay.h"
  15. /* For init/free stuff */
  16. #include "scheduler.h"
  17. /* Test suite stuff */
  18. #include "test.h"
  19. #include "fakechans.h"
  20. static int test_chan_accept_cells = 0;
  21. static int test_chan_fixed_cells_recved = 0;
  22. static cell_t * test_chan_last_seen_fixed_cell_ptr = NULL;
  23. static int test_chan_var_cells_recved = 0;
  24. static var_cell_t * test_chan_last_seen_var_cell_ptr = NULL;
  25. static int test_cells_written = 0;
  26. static int test_destroy_not_pending_calls = 0;
  27. static int test_doesnt_want_writes_count = 0;
  28. static int test_dumpstats_calls = 0;
  29. static int test_has_waiting_cells_count = 0;
  30. static double test_overhead_estimate = 1.0;
  31. static int test_releases_count = 0;
  32. static circuitmux_t *test_target_cmux = NULL;
  33. static unsigned int test_cmux_cells = 0;
  34. static channel_t *dump_statistics_mock_target = NULL;
  35. static int dump_statistics_mock_matches = 0;
  36. static void chan_test_channel_dump_statistics_mock(
  37. channel_t *chan, int severity);
  38. static int chan_test_channel_flush_from_first_active_circuit_mock(
  39. channel_t *chan, int max);
  40. static unsigned int chan_test_circuitmux_num_cells_mock(circuitmux_t *cmux);
  41. static void channel_note_destroy_not_pending_mock(channel_t *ch,
  42. circid_t circid);
  43. static void chan_test_cell_handler(channel_t *ch,
  44. cell_t *cell);
  45. static const char * chan_test_describe_transport(channel_t *ch);
  46. static void chan_test_dumpstats(channel_t *ch, int severity);
  47. static void chan_test_var_cell_handler(channel_t *ch,
  48. var_cell_t *var_cell);
  49. static void chan_test_close(channel_t *ch);
  50. static void chan_test_error(channel_t *ch);
  51. static void chan_test_finish_close(channel_t *ch);
  52. static const char * chan_test_get_remote_descr(channel_t *ch, int flags);
  53. static int chan_test_is_canonical(channel_t *ch, int req);
  54. static size_t chan_test_num_bytes_queued(channel_t *ch);
  55. static int chan_test_num_cells_writeable(channel_t *ch);
  56. static int chan_test_write_cell(channel_t *ch, cell_t *cell);
  57. static int chan_test_write_packed_cell(channel_t *ch,
  58. packed_cell_t *packed_cell);
  59. static int chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell);
  60. static void scheduler_channel_doesnt_want_writes_mock(channel_t *ch);
  61. static void test_channel_dumpstats(void *arg);
  62. static void test_channel_flush(void *arg);
  63. static void test_channel_flushmux(void *arg);
  64. static void test_channel_incoming(void *arg);
  65. static void test_channel_lifecycle(void *arg);
  66. static void test_channel_multi(void *arg);
  67. static void test_channel_queue_incoming(void *arg);
  68. static void test_channel_queue_size(void *arg);
  69. static void test_channel_write(void *arg);
  70. static void
  71. channel_note_destroy_not_pending_mock(channel_t *ch,
  72. circid_t circid)
  73. {
  74. (void)ch;
  75. (void)circid;
  76. ++test_destroy_not_pending_calls;
  77. }
  78. static const char *
  79. chan_test_describe_transport(channel_t *ch)
  80. {
  81. tt_ptr_op(ch, OP_NE, NULL);
  82. done:
  83. return "Fake channel for unit tests";
  84. }
  85. /**
  86. * Mock for channel_dump_statistics(); if the channel matches the
  87. * target, bump a counter - otherwise ignore.
  88. */
  89. static void
  90. chan_test_channel_dump_statistics_mock(channel_t *chan, int severity)
  91. {
  92. tt_ptr_op(chan, OP_NE, NULL);
  93. (void)severity;
  94. if (chan != NULL && chan == dump_statistics_mock_target) {
  95. ++dump_statistics_mock_matches;
  96. }
  97. done:
  98. return;
  99. }
  100. /**
  101. * If the target cmux is the cmux for chan, make fake cells up to the
  102. * target number of cells and write them to chan. Otherwise, invoke
  103. * the real channel_flush_from_first_active_circuit().
  104. */
  105. static int
  106. chan_test_channel_flush_from_first_active_circuit_mock(channel_t *chan,
  107. int max)
  108. {
  109. int result = 0, c = 0;
  110. packed_cell_t *cell = NULL;
  111. tt_ptr_op(chan, OP_NE, NULL);
  112. if (test_target_cmux != NULL &&
  113. test_target_cmux == chan->cmux) {
  114. while (c <= max && test_cmux_cells > 0) {
  115. cell = packed_cell_new();
  116. channel_write_packed_cell(chan, cell);
  117. ++c;
  118. --test_cmux_cells;
  119. }
  120. result = c;
  121. } else {
  122. result = channel_flush_from_first_active_circuit__real(chan, max);
  123. }
  124. done:
  125. return result;
  126. }
  127. /**
  128. * If we have a target cmux set and this matches it, lie about how
  129. * many cells we have according to the number indicated; otherwise
  130. * pass to the real circuitmux_num_cells().
  131. */
  132. static unsigned int
  133. chan_test_circuitmux_num_cells_mock(circuitmux_t *cmux)
  134. {
  135. unsigned int result = 0;
  136. tt_ptr_op(cmux, OP_NE, NULL);
  137. if (cmux != NULL) {
  138. if (cmux == test_target_cmux) {
  139. result = test_cmux_cells;
  140. } else {
  141. result = circuitmux_num_cells__real(cmux);
  142. }
  143. }
  144. done:
  145. return result;
  146. }
  147. /*
  148. * Handle an incoming fixed-size cell for unit tests
  149. */
  150. static void
  151. chan_test_cell_handler(channel_t *ch,
  152. cell_t *cell)
  153. {
  154. tt_assert(ch);
  155. tt_assert(cell);
  156. test_chan_last_seen_fixed_cell_ptr = cell;
  157. ++test_chan_fixed_cells_recved;
  158. done:
  159. return;
  160. }
  161. /*
  162. * Fake transport-specific stats call
  163. */
  164. static void
  165. chan_test_dumpstats(channel_t *ch, int severity)
  166. {
  167. tt_ptr_op(ch, OP_NE, NULL);
  168. (void)severity;
  169. ++test_dumpstats_calls;
  170. done:
  171. return;
  172. }
  173. /*
  174. * Handle an incoming variable-size cell for unit tests
  175. */
  176. static void
  177. chan_test_var_cell_handler(channel_t *ch,
  178. var_cell_t *var_cell)
  179. {
  180. tt_assert(ch);
  181. tt_assert(var_cell);
  182. test_chan_last_seen_var_cell_ptr = var_cell;
  183. ++test_chan_var_cells_recved;
  184. done:
  185. return;
  186. }
  187. static void
  188. chan_test_close(channel_t *ch)
  189. {
  190. tt_assert(ch);
  191. done:
  192. return;
  193. }
  194. /*
  195. * Close a channel through the error path
  196. */
  197. static void
  198. chan_test_error(channel_t *ch)
  199. {
  200. tt_assert(ch);
  201. tt_assert(!(ch->state == CHANNEL_STATE_CLOSING ||
  202. ch->state == CHANNEL_STATE_ERROR ||
  203. ch->state == CHANNEL_STATE_CLOSED));
  204. channel_close_for_error(ch);
  205. done:
  206. return;
  207. }
  208. /*
  209. * Finish closing a channel from CHANNEL_STATE_CLOSING
  210. */
  211. static void
  212. chan_test_finish_close(channel_t *ch)
  213. {
  214. tt_assert(ch);
  215. tt_assert(ch->state == CHANNEL_STATE_CLOSING);
  216. channel_closed(ch);
  217. done:
  218. return;
  219. }
  220. static const char *
  221. chan_test_get_remote_descr(channel_t *ch, int flags)
  222. {
  223. tt_assert(ch);
  224. tt_int_op(flags & ~(GRD_FLAG_ORIGINAL | GRD_FLAG_ADDR_ONLY), OP_EQ, 0);
  225. done:
  226. return "Fake channel for unit tests; no real endpoint";
  227. }
  228. static double
  229. chan_test_get_overhead_estimate(channel_t *ch)
  230. {
  231. tt_assert(ch);
  232. done:
  233. return test_overhead_estimate;
  234. }
  235. static int
  236. chan_test_is_canonical(channel_t *ch, int req)
  237. {
  238. tt_ptr_op(ch, OP_NE, NULL);
  239. tt_assert(req == 0 || req == 1);
  240. done:
  241. /* Fake channels are always canonical */
  242. return 1;
  243. }
  244. static size_t
  245. chan_test_num_bytes_queued(channel_t *ch)
  246. {
  247. tt_assert(ch);
  248. done:
  249. return 0;
  250. }
  251. static int
  252. chan_test_num_cells_writeable(channel_t *ch)
  253. {
  254. tt_assert(ch);
  255. done:
  256. return 32;
  257. }
  258. static int
  259. chan_test_write_cell(channel_t *ch, cell_t *cell)
  260. {
  261. int rv = 0;
  262. tt_assert(ch);
  263. tt_assert(cell);
  264. if (test_chan_accept_cells) {
  265. /* Free the cell and bump the counter */
  266. tor_free(cell);
  267. ++test_cells_written;
  268. rv = 1;
  269. }
  270. /* else return 0, we didn't accept it */
  271. done:
  272. return rv;
  273. }
  274. static int
  275. chan_test_write_packed_cell(channel_t *ch,
  276. packed_cell_t *packed_cell)
  277. {
  278. int rv = 0;
  279. tt_assert(ch);
  280. tt_assert(packed_cell);
  281. if (test_chan_accept_cells) {
  282. /* Free the cell and bump the counter */
  283. packed_cell_free(packed_cell);
  284. ++test_cells_written;
  285. rv = 1;
  286. }
  287. /* else return 0, we didn't accept it */
  288. done:
  289. return rv;
  290. }
  291. static int
  292. chan_test_write_var_cell(channel_t *ch, var_cell_t *var_cell)
  293. {
  294. int rv = 0;
  295. tt_assert(ch);
  296. tt_assert(var_cell);
  297. if (test_chan_accept_cells) {
  298. /* Free the cell and bump the counter */
  299. var_cell_free(var_cell);
  300. ++test_cells_written;
  301. rv = 1;
  302. }
  303. /* else return 0, we didn't accept it */
  304. done:
  305. return rv;
  306. }
  307. /**
  308. * Fill out c with a new fake cell for test suite use
  309. */
  310. void
  311. make_fake_cell(cell_t *c)
  312. {
  313. tt_ptr_op(c, OP_NE, NULL);
  314. c->circ_id = 1;
  315. c->command = CELL_RELAY;
  316. memset(c->payload, 0, CELL_PAYLOAD_SIZE);
  317. done:
  318. return;
  319. }
  320. /**
  321. * Fill out c with a new fake var_cell for test suite use
  322. */
  323. void
  324. make_fake_var_cell(var_cell_t *c)
  325. {
  326. tt_ptr_op(c, OP_NE, NULL);
  327. c->circ_id = 1;
  328. c->command = CELL_VERSIONS;
  329. c->payload_len = CELL_PAYLOAD_SIZE / 2;
  330. memset(c->payload, 0, c->payload_len);
  331. done:
  332. return;
  333. }
  334. /**
  335. * Set up a new fake channel for the test suite
  336. */
  337. channel_t *
  338. new_fake_channel(void)
  339. {
  340. channel_t *chan = tor_malloc_zero(sizeof(channel_t));
  341. channel_init(chan);
  342. chan->close = chan_test_close;
  343. chan->get_overhead_estimate = chan_test_get_overhead_estimate;
  344. chan->get_remote_descr = chan_test_get_remote_descr;
  345. chan->num_bytes_queued = chan_test_num_bytes_queued;
  346. chan->num_cells_writeable = chan_test_num_cells_writeable;
  347. chan->write_cell = chan_test_write_cell;
  348. chan->write_packed_cell = chan_test_write_packed_cell;
  349. chan->write_var_cell = chan_test_write_var_cell;
  350. chan->state = CHANNEL_STATE_OPEN;
  351. return chan;
  352. }
  353. void
  354. free_fake_channel(channel_t *chan)
  355. {
  356. cell_queue_entry_t *cell, *cell_tmp;
  357. if (! chan)
  358. return;
  359. if (chan->cmux)
  360. circuitmux_free(chan->cmux);
  361. TOR_SIMPLEQ_FOREACH_SAFE(cell, &chan->incoming_queue, next, cell_tmp) {
  362. cell_queue_entry_xfree(cell, 0);
  363. }
  364. TOR_SIMPLEQ_FOREACH_SAFE(cell, &chan->outgoing_queue, next, cell_tmp) {
  365. cell_queue_entry_xfree(cell, 0);
  366. }
  367. tor_free(chan);
  368. }
  369. /**
  370. * Counter query for scheduler_channel_has_waiting_cells_mock()
  371. */
  372. int
  373. get_mock_scheduler_has_waiting_cells_count(void)
  374. {
  375. return test_has_waiting_cells_count;
  376. }
  377. /**
  378. * Mock for scheduler_channel_has_waiting_cells()
  379. */
  380. void
  381. scheduler_channel_has_waiting_cells_mock(channel_t *ch)
  382. {
  383. (void)ch;
  384. /* Increment counter */
  385. ++test_has_waiting_cells_count;
  386. return;
  387. }
  388. static void
  389. scheduler_channel_doesnt_want_writes_mock(channel_t *ch)
  390. {
  391. (void)ch;
  392. /* Increment counter */
  393. ++test_doesnt_want_writes_count;
  394. return;
  395. }
  396. /**
  397. * Counter query for scheduler_release_channel_mock()
  398. */
  399. int
  400. get_mock_scheduler_release_channel_count(void)
  401. {
  402. return test_releases_count;
  403. }
  404. /**
  405. * Mock for scheduler_release_channel()
  406. */
  407. void
  408. scheduler_release_channel_mock(channel_t *ch)
  409. {
  410. (void)ch;
  411. /* Increment counter */
  412. ++test_releases_count;
  413. return;
  414. }
  415. /**
  416. * Test for channel_dumpstats() and limited test for
  417. * channel_dump_statistics()
  418. */
  419. static void
  420. test_channel_dumpstats(void *arg)
  421. {
  422. channel_t *ch = NULL;
  423. cell_t *cell = NULL;
  424. int old_count;
  425. (void)arg;
  426. /* Mock these for duration of the test */
  427. MOCK(scheduler_channel_doesnt_want_writes,
  428. scheduler_channel_doesnt_want_writes_mock);
  429. MOCK(scheduler_release_channel,
  430. scheduler_release_channel_mock);
  431. /* Set up a new fake channel */
  432. ch = new_fake_channel();
  433. tt_assert(ch);
  434. ch->cmux = circuitmux_alloc();
  435. /* Try to register it */
  436. channel_register(ch);
  437. tt_assert(ch->registered);
  438. /* Set up mock */
  439. dump_statistics_mock_target = ch;
  440. dump_statistics_mock_matches = 0;
  441. MOCK(channel_dump_statistics,
  442. chan_test_channel_dump_statistics_mock);
  443. /* Call channel_dumpstats() */
  444. channel_dumpstats(LOG_DEBUG);
  445. /* Assert that we hit the mock */
  446. tt_int_op(dump_statistics_mock_matches, OP_EQ, 1);
  447. /* Close the channel */
  448. channel_mark_for_close(ch);
  449. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  450. chan_test_finish_close(ch);
  451. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  452. /* Try again and hit the finished channel */
  453. channel_dumpstats(LOG_DEBUG);
  454. tt_int_op(dump_statistics_mock_matches, OP_EQ, 2);
  455. channel_run_cleanup();
  456. ch = NULL;
  457. /* Now we should hit nothing */
  458. channel_dumpstats(LOG_DEBUG);
  459. tt_int_op(dump_statistics_mock_matches, OP_EQ, 2);
  460. /* Unmock */
  461. UNMOCK(channel_dump_statistics);
  462. dump_statistics_mock_target = NULL;
  463. dump_statistics_mock_matches = 0;
  464. /* Now make another channel */
  465. ch = new_fake_channel();
  466. tt_assert(ch);
  467. ch->cmux = circuitmux_alloc();
  468. channel_register(ch);
  469. tt_assert(ch->registered);
  470. /* Lie about its age so dumpstats gets coverage for rate calculations */
  471. ch->timestamp_created = time(NULL) - 30;
  472. tt_assert(ch->timestamp_created > 0);
  473. tt_assert(time(NULL) > ch->timestamp_created);
  474. /* Put cells through it both ways to make the counters non-zero */
  475. cell = tor_malloc_zero(sizeof(*cell));
  476. make_fake_cell(cell);
  477. test_chan_accept_cells = 1;
  478. old_count = test_cells_written;
  479. channel_write_cell(ch, cell);
  480. cell = NULL;
  481. tt_int_op(test_cells_written, OP_EQ, old_count + 1);
  482. tt_assert(ch->n_bytes_xmitted > 0);
  483. tt_assert(ch->n_cells_xmitted > 0);
  484. /* Receive path */
  485. channel_set_cell_handlers(ch,
  486. chan_test_cell_handler,
  487. chan_test_var_cell_handler);
  488. tt_ptr_op(channel_get_cell_handler(ch), OP_EQ, chan_test_cell_handler);
  489. tt_ptr_op(channel_get_var_cell_handler(ch), OP_EQ,
  490. chan_test_var_cell_handler);
  491. cell = tor_malloc_zero(sizeof(cell_t));
  492. make_fake_cell(cell);
  493. old_count = test_chan_fixed_cells_recved;
  494. channel_queue_cell(ch, cell);
  495. tor_free(cell);
  496. tt_int_op(test_chan_fixed_cells_recved, OP_EQ, old_count + 1);
  497. tt_assert(ch->n_bytes_recved > 0);
  498. tt_assert(ch->n_cells_recved > 0);
  499. /* Test channel_dump_statistics */
  500. ch->describe_transport = chan_test_describe_transport;
  501. ch->dumpstats = chan_test_dumpstats;
  502. ch->is_canonical = chan_test_is_canonical;
  503. old_count = test_dumpstats_calls;
  504. channel_dump_statistics(ch, LOG_DEBUG);
  505. tt_int_op(test_dumpstats_calls, OP_EQ, old_count + 1);
  506. /* Close the channel */
  507. channel_mark_for_close(ch);
  508. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  509. chan_test_finish_close(ch);
  510. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  511. channel_run_cleanup();
  512. ch = NULL;
  513. done:
  514. tor_free(cell);
  515. free_fake_channel(ch);
  516. UNMOCK(scheduler_channel_doesnt_want_writes);
  517. UNMOCK(scheduler_release_channel);
  518. return;
  519. }
  520. static void
  521. test_channel_flush(void *arg)
  522. {
  523. channel_t *ch = NULL;
  524. cell_t *cell = NULL;
  525. packed_cell_t *p_cell = NULL;
  526. var_cell_t *v_cell = NULL;
  527. int init_count;
  528. (void)arg;
  529. ch = new_fake_channel();
  530. tt_assert(ch);
  531. /* Cache the original count */
  532. init_count = test_cells_written;
  533. /* Stop accepting so we can queue some */
  534. test_chan_accept_cells = 0;
  535. /* Queue a regular cell */
  536. cell = tor_malloc_zero(sizeof(cell_t));
  537. make_fake_cell(cell);
  538. channel_write_cell(ch, cell);
  539. /* It should be queued, so assert that we didn't write it */
  540. tt_int_op(test_cells_written, OP_EQ, init_count);
  541. /* Queue a var cell */
  542. v_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  543. make_fake_var_cell(v_cell);
  544. channel_write_var_cell(ch, v_cell);
  545. /* It should be queued, so assert that we didn't write it */
  546. tt_int_op(test_cells_written, OP_EQ, init_count);
  547. /* Try a packed cell now */
  548. p_cell = packed_cell_new();
  549. tt_assert(p_cell);
  550. channel_write_packed_cell(ch, p_cell);
  551. /* It should be queued, so assert that we didn't write it */
  552. tt_int_op(test_cells_written, OP_EQ, init_count);
  553. /* Now allow writes through again */
  554. test_chan_accept_cells = 1;
  555. /* ...and flush */
  556. channel_flush_cells(ch);
  557. /* All three should have gone through */
  558. tt_int_op(test_cells_written, OP_EQ, init_count + 3);
  559. done:
  560. tor_free(ch);
  561. return;
  562. }
  563. /**
  564. * Channel flush tests that require cmux mocking
  565. */
  566. static void
  567. test_channel_flushmux(void *arg)
  568. {
  569. channel_t *ch = NULL;
  570. int old_count, q_len_before, q_len_after;
  571. ssize_t result;
  572. (void)arg;
  573. /* Install mocks we need for this test */
  574. MOCK(channel_flush_from_first_active_circuit,
  575. chan_test_channel_flush_from_first_active_circuit_mock);
  576. MOCK(circuitmux_num_cells,
  577. chan_test_circuitmux_num_cells_mock);
  578. ch = new_fake_channel();
  579. tt_assert(ch);
  580. ch->cmux = circuitmux_alloc();
  581. old_count = test_cells_written;
  582. test_target_cmux = ch->cmux;
  583. test_cmux_cells = 1;
  584. /* Enable cell acceptance */
  585. test_chan_accept_cells = 1;
  586. result = channel_flush_some_cells(ch, 1);
  587. tt_int_op(result, OP_EQ, 1);
  588. tt_int_op(test_cells_written, OP_EQ, old_count + 1);
  589. tt_int_op(test_cmux_cells, OP_EQ, 0);
  590. /* Now try it without accepting to force them into the queue */
  591. test_chan_accept_cells = 0;
  592. test_cmux_cells = 1;
  593. q_len_before = chan_cell_queue_len(&(ch->outgoing_queue));
  594. result = channel_flush_some_cells(ch, 1);
  595. /* We should not have actually flushed any */
  596. tt_int_op(result, OP_EQ, 0);
  597. tt_int_op(test_cells_written, OP_EQ, old_count + 1);
  598. /* But we should have gotten to the fake cellgen loop */
  599. tt_int_op(test_cmux_cells, OP_EQ, 0);
  600. /* ...and we should have a queued cell */
  601. q_len_after = chan_cell_queue_len(&(ch->outgoing_queue));
  602. tt_int_op(q_len_after, OP_EQ, q_len_before + 1);
  603. /* Now accept cells again and drain the queue */
  604. test_chan_accept_cells = 1;
  605. channel_flush_cells(ch);
  606. tt_int_op(test_cells_written, OP_EQ, old_count + 2);
  607. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 0);
  608. test_target_cmux = NULL;
  609. test_cmux_cells = 0;
  610. done:
  611. if (ch)
  612. circuitmux_free(ch->cmux);
  613. tor_free(ch);
  614. UNMOCK(channel_flush_from_first_active_circuit);
  615. UNMOCK(circuitmux_num_cells);
  616. test_chan_accept_cells = 0;
  617. return;
  618. }
  619. static void
  620. test_channel_incoming(void *arg)
  621. {
  622. channel_t *ch = NULL;
  623. cell_t *cell = NULL;
  624. var_cell_t *var_cell = NULL;
  625. int old_count;
  626. (void)arg;
  627. /* Mock these for duration of the test */
  628. MOCK(scheduler_channel_doesnt_want_writes,
  629. scheduler_channel_doesnt_want_writes_mock);
  630. MOCK(scheduler_release_channel,
  631. scheduler_release_channel_mock);
  632. /* Accept cells to lower layer */
  633. test_chan_accept_cells = 1;
  634. /* Use default overhead factor */
  635. test_overhead_estimate = 1.0;
  636. ch = new_fake_channel();
  637. tt_assert(ch);
  638. /* Start it off in OPENING */
  639. ch->state = CHANNEL_STATE_OPENING;
  640. /* We'll need a cmux */
  641. ch->cmux = circuitmux_alloc();
  642. /* Install incoming cell handlers */
  643. channel_set_cell_handlers(ch,
  644. chan_test_cell_handler,
  645. chan_test_var_cell_handler);
  646. /* Test cell handler getters */
  647. tt_ptr_op(channel_get_cell_handler(ch), OP_EQ, chan_test_cell_handler);
  648. tt_ptr_op(channel_get_var_cell_handler(ch), OP_EQ,
  649. chan_test_var_cell_handler);
  650. /* Try to register it */
  651. channel_register(ch);
  652. tt_assert(ch->registered);
  653. /* Open it */
  654. channel_change_state_open(ch);
  655. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  656. /* Receive a fixed cell */
  657. cell = tor_malloc_zero(sizeof(cell_t));
  658. make_fake_cell(cell);
  659. old_count = test_chan_fixed_cells_recved;
  660. channel_queue_cell(ch, cell);
  661. tor_free(cell);
  662. tt_int_op(test_chan_fixed_cells_recved, OP_EQ, old_count + 1);
  663. /* Receive a variable-size cell */
  664. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  665. make_fake_var_cell(var_cell);
  666. old_count = test_chan_var_cells_recved;
  667. channel_queue_var_cell(ch, var_cell);
  668. tor_free(cell);
  669. tt_int_op(test_chan_var_cells_recved, OP_EQ, old_count + 1);
  670. /* Close it */
  671. channel_mark_for_close(ch);
  672. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  673. chan_test_finish_close(ch);
  674. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  675. channel_run_cleanup();
  676. ch = NULL;
  677. done:
  678. free_fake_channel(ch);
  679. tor_free(cell);
  680. tor_free(var_cell);
  681. UNMOCK(scheduler_channel_doesnt_want_writes);
  682. UNMOCK(scheduler_release_channel);
  683. return;
  684. }
  685. /**
  686. * Normal channel lifecycle test:
  687. *
  688. * OPENING->OPEN->MAINT->OPEN->CLOSING->CLOSED
  689. */
  690. static void
  691. test_channel_lifecycle(void *arg)
  692. {
  693. channel_t *ch1 = NULL, *ch2 = NULL;
  694. cell_t *cell = NULL;
  695. int old_count, init_doesnt_want_writes_count;
  696. int init_releases_count;
  697. (void)arg;
  698. /* Mock these for the whole lifecycle test */
  699. MOCK(scheduler_channel_doesnt_want_writes,
  700. scheduler_channel_doesnt_want_writes_mock);
  701. MOCK(scheduler_release_channel,
  702. scheduler_release_channel_mock);
  703. /* Cache some initial counter values */
  704. init_doesnt_want_writes_count = test_doesnt_want_writes_count;
  705. init_releases_count = test_releases_count;
  706. /* Accept cells to lower layer */
  707. test_chan_accept_cells = 1;
  708. /* Use default overhead factor */
  709. test_overhead_estimate = 1.0;
  710. ch1 = new_fake_channel();
  711. tt_assert(ch1);
  712. /* Start it off in OPENING */
  713. ch1->state = CHANNEL_STATE_OPENING;
  714. /* We'll need a cmux */
  715. ch1->cmux = circuitmux_alloc();
  716. /* Try to register it */
  717. channel_register(ch1);
  718. tt_assert(ch1->registered);
  719. /* Try to write a cell through (should queue) */
  720. cell = tor_malloc_zero(sizeof(cell_t));
  721. make_fake_cell(cell);
  722. old_count = test_cells_written;
  723. channel_write_cell(ch1, cell);
  724. tt_int_op(old_count, OP_EQ, test_cells_written);
  725. /* Move it to OPEN and flush */
  726. channel_change_state_open(ch1);
  727. /* Queue should drain */
  728. tt_int_op(old_count + 1, OP_EQ, test_cells_written);
  729. /* Get another one */
  730. ch2 = new_fake_channel();
  731. tt_assert(ch2);
  732. ch2->state = CHANNEL_STATE_OPENING;
  733. ch2->cmux = circuitmux_alloc();
  734. /* Register */
  735. channel_register(ch2);
  736. tt_assert(ch2->registered);
  737. /* Check counters */
  738. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  739. init_doesnt_want_writes_count);
  740. tt_int_op(test_releases_count, OP_EQ, init_releases_count);
  741. /* Move ch1 to MAINT */
  742. channel_change_state(ch1, CHANNEL_STATE_MAINT);
  743. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  744. init_doesnt_want_writes_count + 1);
  745. tt_int_op(test_releases_count, OP_EQ, init_releases_count);
  746. /* Move ch2 to OPEN */
  747. channel_change_state_open(ch2);
  748. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  749. init_doesnt_want_writes_count + 1);
  750. tt_int_op(test_releases_count, OP_EQ, init_releases_count);
  751. /* Move ch1 back to OPEN */
  752. channel_change_state_open(ch1);
  753. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  754. init_doesnt_want_writes_count + 1);
  755. tt_int_op(test_releases_count, OP_EQ, init_releases_count);
  756. /* Mark ch2 for close */
  757. channel_mark_for_close(ch2);
  758. tt_int_op(ch2->state, OP_EQ, CHANNEL_STATE_CLOSING);
  759. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  760. init_doesnt_want_writes_count + 1);
  761. tt_int_op(test_releases_count, OP_EQ, init_releases_count + 1);
  762. /* Shut down channels */
  763. channel_free_all();
  764. ch1 = ch2 = NULL;
  765. tt_int_op(test_doesnt_want_writes_count, OP_EQ,
  766. init_doesnt_want_writes_count + 1);
  767. /* channel_free() calls scheduler_release_channel() */
  768. tt_int_op(test_releases_count, OP_EQ, init_releases_count + 4);
  769. done:
  770. free_fake_channel(ch1);
  771. free_fake_channel(ch2);
  772. UNMOCK(scheduler_channel_doesnt_want_writes);
  773. UNMOCK(scheduler_release_channel);
  774. return;
  775. }
  776. /**
  777. * Weird channel lifecycle test:
  778. *
  779. * OPENING->CLOSING->CLOSED
  780. * OPENING->OPEN->CLOSING->ERROR
  781. * OPENING->OPEN->MAINT->CLOSING->CLOSED
  782. * OPENING->OPEN->MAINT->CLOSING->ERROR
  783. */
  784. static void
  785. test_channel_lifecycle_2(void *arg)
  786. {
  787. channel_t *ch = NULL;
  788. (void)arg;
  789. /* Mock these for the whole lifecycle test */
  790. MOCK(scheduler_channel_doesnt_want_writes,
  791. scheduler_channel_doesnt_want_writes_mock);
  792. MOCK(scheduler_release_channel,
  793. scheduler_release_channel_mock);
  794. /* Accept cells to lower layer */
  795. test_chan_accept_cells = 1;
  796. /* Use default overhead factor */
  797. test_overhead_estimate = 1.0;
  798. ch = new_fake_channel();
  799. tt_assert(ch);
  800. /* Start it off in OPENING */
  801. ch->state = CHANNEL_STATE_OPENING;
  802. /* The full lifecycle test needs a cmux */
  803. ch->cmux = circuitmux_alloc();
  804. /* Try to register it */
  805. channel_register(ch);
  806. tt_assert(ch->registered);
  807. /* Try to close it */
  808. channel_mark_for_close(ch);
  809. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  810. /* Finish closing it */
  811. chan_test_finish_close(ch);
  812. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  813. channel_run_cleanup();
  814. ch = NULL;
  815. /* Now try OPENING->OPEN->CLOSING->ERROR */
  816. ch = new_fake_channel();
  817. tt_assert(ch);
  818. ch->state = CHANNEL_STATE_OPENING;
  819. ch->cmux = circuitmux_alloc();
  820. channel_register(ch);
  821. tt_assert(ch->registered);
  822. /* Finish opening it */
  823. channel_change_state_open(ch);
  824. /* Error exit from lower layer */
  825. chan_test_error(ch);
  826. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  827. chan_test_finish_close(ch);
  828. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_ERROR);
  829. channel_run_cleanup();
  830. ch = NULL;
  831. /* OPENING->OPEN->MAINT->CLOSING->CLOSED close from maintenance state */
  832. ch = new_fake_channel();
  833. tt_assert(ch);
  834. ch->state = CHANNEL_STATE_OPENING;
  835. ch->cmux = circuitmux_alloc();
  836. channel_register(ch);
  837. tt_assert(ch->registered);
  838. /* Finish opening it */
  839. channel_change_state_open(ch);
  840. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  841. /* Go to maintenance state */
  842. channel_change_state(ch, CHANNEL_STATE_MAINT);
  843. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_MAINT);
  844. /* Lower layer close */
  845. channel_mark_for_close(ch);
  846. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  847. /* Finish */
  848. chan_test_finish_close(ch);
  849. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  850. channel_run_cleanup();
  851. ch = NULL;
  852. /*
  853. * OPENING->OPEN->MAINT->CLOSING->CLOSED lower-layer close during
  854. * maintenance state
  855. */
  856. ch = new_fake_channel();
  857. tt_assert(ch);
  858. ch->state = CHANNEL_STATE_OPENING;
  859. ch->cmux = circuitmux_alloc();
  860. channel_register(ch);
  861. tt_assert(ch->registered);
  862. /* Finish opening it */
  863. channel_change_state_open(ch);
  864. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  865. /* Go to maintenance state */
  866. channel_change_state(ch, CHANNEL_STATE_MAINT);
  867. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_MAINT);
  868. /* Lower layer close */
  869. channel_close_from_lower_layer(ch);
  870. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  871. /* Finish */
  872. chan_test_finish_close(ch);
  873. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  874. channel_run_cleanup();
  875. ch = NULL;
  876. /* OPENING->OPEN->MAINT->CLOSING->ERROR */
  877. ch = new_fake_channel();
  878. tt_assert(ch);
  879. ch->state = CHANNEL_STATE_OPENING;
  880. ch->cmux = circuitmux_alloc();
  881. channel_register(ch);
  882. tt_assert(ch->registered);
  883. /* Finish opening it */
  884. channel_change_state_open(ch);
  885. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  886. /* Go to maintenance state */
  887. channel_change_state(ch, CHANNEL_STATE_MAINT);
  888. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_MAINT);
  889. /* Lower layer close */
  890. chan_test_error(ch);
  891. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  892. /* Finish */
  893. chan_test_finish_close(ch);
  894. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_ERROR);
  895. channel_run_cleanup();
  896. ch = NULL;
  897. /* Shut down channels */
  898. channel_free_all();
  899. done:
  900. tor_free(ch);
  901. UNMOCK(scheduler_channel_doesnt_want_writes);
  902. UNMOCK(scheduler_release_channel);
  903. return;
  904. }
  905. static void
  906. test_channel_multi(void *arg)
  907. {
  908. channel_t *ch1 = NULL, *ch2 = NULL;
  909. uint64_t global_queue_estimate;
  910. cell_t *cell = NULL;
  911. (void)arg;
  912. /* Accept cells to lower layer */
  913. test_chan_accept_cells = 1;
  914. /* Use default overhead factor */
  915. test_overhead_estimate = 1.0;
  916. ch1 = new_fake_channel();
  917. tt_assert(ch1);
  918. ch2 = new_fake_channel();
  919. tt_assert(ch2);
  920. /* Initial queue size update */
  921. channel_update_xmit_queue_size(ch1);
  922. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 0);
  923. channel_update_xmit_queue_size(ch2);
  924. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 0);
  925. global_queue_estimate = channel_get_global_queue_estimate();
  926. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  927. /* Queue some cells, check queue estimates */
  928. cell = tor_malloc_zero(sizeof(cell_t));
  929. make_fake_cell(cell);
  930. channel_write_cell(ch1, cell);
  931. cell = tor_malloc_zero(sizeof(cell_t));
  932. make_fake_cell(cell);
  933. channel_write_cell(ch2, cell);
  934. channel_update_xmit_queue_size(ch1);
  935. channel_update_xmit_queue_size(ch2);
  936. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 0);
  937. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 0);
  938. global_queue_estimate = channel_get_global_queue_estimate();
  939. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  940. /* Stop accepting cells at lower layer */
  941. test_chan_accept_cells = 0;
  942. /* Queue some cells and check queue estimates */
  943. cell = tor_malloc_zero(sizeof(cell_t));
  944. make_fake_cell(cell);
  945. channel_write_cell(ch1, cell);
  946. channel_update_xmit_queue_size(ch1);
  947. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 512);
  948. global_queue_estimate = channel_get_global_queue_estimate();
  949. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  950. cell = tor_malloc_zero(sizeof(cell_t));
  951. make_fake_cell(cell);
  952. channel_write_cell(ch2, cell);
  953. channel_update_xmit_queue_size(ch2);
  954. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 512);
  955. global_queue_estimate = channel_get_global_queue_estimate();
  956. tt_u64_op(global_queue_estimate, OP_EQ, 1024);
  957. /* Allow cells through again */
  958. test_chan_accept_cells = 1;
  959. /* Flush chan 2 */
  960. channel_flush_cells(ch2);
  961. /* Update and check queue sizes */
  962. channel_update_xmit_queue_size(ch1);
  963. channel_update_xmit_queue_size(ch2);
  964. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 512);
  965. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 0);
  966. global_queue_estimate = channel_get_global_queue_estimate();
  967. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  968. /* Flush chan 1 */
  969. channel_flush_cells(ch1);
  970. /* Update and check queue sizes */
  971. channel_update_xmit_queue_size(ch1);
  972. channel_update_xmit_queue_size(ch2);
  973. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 0);
  974. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 0);
  975. global_queue_estimate = channel_get_global_queue_estimate();
  976. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  977. /* Now block again */
  978. test_chan_accept_cells = 0;
  979. /* Queue some cells */
  980. cell = tor_malloc_zero(sizeof(cell_t));
  981. make_fake_cell(cell);
  982. channel_write_cell(ch1, cell);
  983. cell = tor_malloc_zero(sizeof(cell_t));
  984. make_fake_cell(cell);
  985. channel_write_cell(ch2, cell);
  986. cell = NULL;
  987. /* Check the estimates */
  988. channel_update_xmit_queue_size(ch1);
  989. channel_update_xmit_queue_size(ch2);
  990. tt_u64_op(ch1->bytes_queued_for_xmit, OP_EQ, 512);
  991. tt_u64_op(ch2->bytes_queued_for_xmit, OP_EQ, 512);
  992. global_queue_estimate = channel_get_global_queue_estimate();
  993. tt_u64_op(global_queue_estimate, OP_EQ, 1024);
  994. /* Now close channel 2; it should be subtracted from the global queue */
  995. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  996. channel_mark_for_close(ch2);
  997. UNMOCK(scheduler_release_channel);
  998. global_queue_estimate = channel_get_global_queue_estimate();
  999. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  1000. /*
  1001. * Since the fake channels aren't registered, channel_free_all() can't
  1002. * see them properly.
  1003. */
  1004. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  1005. channel_mark_for_close(ch1);
  1006. UNMOCK(scheduler_release_channel);
  1007. global_queue_estimate = channel_get_global_queue_estimate();
  1008. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  1009. /* Now free everything */
  1010. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  1011. channel_free_all();
  1012. UNMOCK(scheduler_release_channel);
  1013. done:
  1014. free_fake_channel(ch1);
  1015. free_fake_channel(ch2);
  1016. return;
  1017. }
  1018. /**
  1019. * Check some hopefully-impossible edge cases in the channel queue we
  1020. * can only trigger by doing evil things to the queue directly.
  1021. */
  1022. static void
  1023. test_channel_queue_impossible(void *arg)
  1024. {
  1025. channel_t *ch = NULL;
  1026. cell_t *cell = NULL;
  1027. packed_cell_t *packed_cell = NULL;
  1028. var_cell_t *var_cell = NULL;
  1029. int old_count;
  1030. cell_queue_entry_t *q = NULL;
  1031. uint64_t global_queue_estimate;
  1032. uintptr_t cellintptr;
  1033. /* Cache the global queue size (see below) */
  1034. global_queue_estimate = channel_get_global_queue_estimate();
  1035. (void)arg;
  1036. ch = new_fake_channel();
  1037. tt_assert(ch);
  1038. /* We test queueing here; tell it not to accept cells */
  1039. test_chan_accept_cells = 0;
  1040. /* ...and keep it from trying to flush the queue */
  1041. ch->state = CHANNEL_STATE_MAINT;
  1042. /* Cache the cell written count */
  1043. old_count = test_cells_written;
  1044. /* Assert that the queue is initially empty */
  1045. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 0);
  1046. /* Get a fresh cell and write it to the channel*/
  1047. cell = tor_malloc_zero(sizeof(cell_t));
  1048. make_fake_cell(cell);
  1049. cellintptr = (uintptr_t)(void*)cell;
  1050. channel_write_cell(ch, cell);
  1051. /* Now it should be queued */
  1052. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 1);
  1053. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  1054. tt_assert(q);
  1055. if (q) {
  1056. tt_int_op(q->type, OP_EQ, CELL_QUEUE_FIXED);
  1057. tt_assert((uintptr_t)q->u.fixed.cell == cellintptr);
  1058. }
  1059. /* Do perverse things to it */
  1060. tor_free(q->u.fixed.cell);
  1061. q->u.fixed.cell = NULL;
  1062. /*
  1063. * Now change back to open with channel_change_state() and assert that it
  1064. * gets thrown away properly.
  1065. */
  1066. test_chan_accept_cells = 1;
  1067. channel_change_state_open(ch);
  1068. tt_assert(test_cells_written == old_count);
  1069. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 0);
  1070. /* Same thing but for a var_cell */
  1071. test_chan_accept_cells = 0;
  1072. ch->state = CHANNEL_STATE_MAINT;
  1073. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  1074. make_fake_var_cell(var_cell);
  1075. cellintptr = (uintptr_t)(void*)var_cell;
  1076. channel_write_var_cell(ch, var_cell);
  1077. /* Check that it's queued */
  1078. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 1);
  1079. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  1080. tt_assert(q);
  1081. if (q) {
  1082. tt_int_op(q->type, OP_EQ, CELL_QUEUE_VAR);
  1083. tt_assert((uintptr_t)q->u.var.var_cell == cellintptr);
  1084. }
  1085. /* Remove the cell from the queue entry */
  1086. tor_free(q->u.var.var_cell);
  1087. q->u.var.var_cell = NULL;
  1088. /* Let it drain and check that the bad entry is discarded */
  1089. test_chan_accept_cells = 1;
  1090. channel_change_state_open(ch);
  1091. tt_assert(test_cells_written == old_count);
  1092. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 0);
  1093. /* Same thing with a packed_cell */
  1094. test_chan_accept_cells = 0;
  1095. ch->state = CHANNEL_STATE_MAINT;
  1096. packed_cell = packed_cell_new();
  1097. tt_assert(packed_cell);
  1098. cellintptr = (uintptr_t)(void*)packed_cell;
  1099. channel_write_packed_cell(ch, packed_cell);
  1100. /* Check that it's queued */
  1101. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 1);
  1102. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  1103. tt_assert(q);
  1104. if (q) {
  1105. tt_int_op(q->type, OP_EQ, CELL_QUEUE_PACKED);
  1106. tt_assert((uintptr_t)q->u.packed.packed_cell == cellintptr);
  1107. }
  1108. /* Remove the cell from the queue entry */
  1109. packed_cell_free(q->u.packed.packed_cell);
  1110. q->u.packed.packed_cell = NULL;
  1111. /* Let it drain and check that the bad entry is discarded */
  1112. test_chan_accept_cells = 1;
  1113. channel_change_state_open(ch);
  1114. tt_assert(test_cells_written == old_count);
  1115. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 0);
  1116. /* Unknown cell type case */
  1117. test_chan_accept_cells = 0;
  1118. ch->state = CHANNEL_STATE_MAINT;
  1119. cell = tor_malloc_zero(sizeof(cell_t));
  1120. make_fake_cell(cell);
  1121. cellintptr = (uintptr_t)(void*)cell;
  1122. channel_write_cell(ch, cell);
  1123. /* Check that it's queued */
  1124. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 1);
  1125. q = TOR_SIMPLEQ_FIRST(&(ch->outgoing_queue));
  1126. tt_assert(q);
  1127. if (q) {
  1128. tt_int_op(q->type, OP_EQ, CELL_QUEUE_FIXED);
  1129. tt_assert((uintptr_t)q->u.fixed.cell == cellintptr);
  1130. }
  1131. /* Clobber it, including the queue entry type */
  1132. tor_free(q->u.fixed.cell);
  1133. q->u.fixed.cell = NULL;
  1134. q->type = CELL_QUEUE_PACKED + 1;
  1135. /* Let it drain and check that the bad entry is discarded */
  1136. test_chan_accept_cells = 1;
  1137. tor_capture_bugs_(1);
  1138. channel_change_state_open(ch);
  1139. tt_assert(test_cells_written == old_count);
  1140. tt_int_op(chan_cell_queue_len(&(ch->outgoing_queue)), OP_EQ, 0);
  1141. tt_int_op(smartlist_len(tor_get_captured_bug_log_()), OP_EQ, 1);
  1142. tor_end_capture_bugs_();
  1143. done:
  1144. free_fake_channel(ch);
  1145. /*
  1146. * Doing that meant that we couldn't correctly adjust the queue size
  1147. * for the var cell, so manually reset the global queue size estimate
  1148. * so the next test doesn't break if we run with --no-fork.
  1149. */
  1150. estimated_total_queue_size = global_queue_estimate;
  1151. return;
  1152. }
  1153. static void
  1154. test_channel_queue_incoming(void *arg)
  1155. {
  1156. channel_t *ch = NULL;
  1157. cell_t *cell = NULL;
  1158. var_cell_t *var_cell = NULL;
  1159. int old_fixed_count, old_var_count;
  1160. (void)arg;
  1161. /* Mock these for duration of the test */
  1162. MOCK(scheduler_channel_doesnt_want_writes,
  1163. scheduler_channel_doesnt_want_writes_mock);
  1164. MOCK(scheduler_release_channel,
  1165. scheduler_release_channel_mock);
  1166. /* Accept cells to lower layer */
  1167. test_chan_accept_cells = 1;
  1168. /* Use default overhead factor */
  1169. test_overhead_estimate = 1.0;
  1170. ch = new_fake_channel();
  1171. tt_assert(ch);
  1172. /* Start it off in OPENING */
  1173. ch->state = CHANNEL_STATE_OPENING;
  1174. /* We'll need a cmux */
  1175. ch->cmux = circuitmux_alloc();
  1176. /* Test cell handler getters */
  1177. tt_ptr_op(channel_get_cell_handler(ch), OP_EQ, NULL);
  1178. tt_ptr_op(channel_get_var_cell_handler(ch), OP_EQ, NULL);
  1179. /* Try to register it */
  1180. channel_register(ch);
  1181. tt_assert(ch->registered);
  1182. /* Open it */
  1183. channel_change_state_open(ch);
  1184. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_OPEN);
  1185. /* Assert that the incoming queue is empty */
  1186. tt_assert(TOR_SIMPLEQ_EMPTY(&(ch->incoming_queue)));
  1187. /* Queue an incoming fixed-length cell */
  1188. cell = tor_malloc_zero(sizeof(cell_t));
  1189. make_fake_cell(cell);
  1190. channel_queue_cell(ch, cell);
  1191. /* Assert that the incoming queue has one entry */
  1192. tt_int_op(chan_cell_queue_len(&(ch->incoming_queue)), OP_EQ, 1);
  1193. /* Queue an incoming var cell */
  1194. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  1195. make_fake_var_cell(var_cell);
  1196. channel_queue_var_cell(ch, var_cell);
  1197. /* Assert that the incoming queue has two entries */
  1198. tt_int_op(chan_cell_queue_len(&(ch->incoming_queue)), OP_EQ, 2);
  1199. /*
  1200. * Install cell handlers; this will drain the queue, so save the old
  1201. * cell counters first
  1202. */
  1203. old_fixed_count = test_chan_fixed_cells_recved;
  1204. old_var_count = test_chan_var_cells_recved;
  1205. channel_set_cell_handlers(ch,
  1206. chan_test_cell_handler,
  1207. chan_test_var_cell_handler);
  1208. tt_ptr_op(channel_get_cell_handler(ch), OP_EQ, chan_test_cell_handler);
  1209. tt_ptr_op(channel_get_var_cell_handler(ch), OP_EQ,
  1210. chan_test_var_cell_handler);
  1211. /* Assert cells were received */
  1212. tt_int_op(test_chan_fixed_cells_recved, OP_EQ, old_fixed_count + 1);
  1213. tt_int_op(test_chan_var_cells_recved, OP_EQ, old_var_count + 1);
  1214. /*
  1215. * Assert that the pointers are different from the cells we allocated;
  1216. * when queueing cells with no incoming cell handlers installed, the
  1217. * channel layer should copy them to a new buffer, and free them after
  1218. * delivery. These pointers will have already been freed by the time
  1219. * we get here, so don't dereference them.
  1220. */
  1221. tt_ptr_op(test_chan_last_seen_fixed_cell_ptr, OP_NE, cell);
  1222. tt_ptr_op(test_chan_last_seen_var_cell_ptr, OP_NE, var_cell);
  1223. /* Assert queue is now empty */
  1224. tt_assert(TOR_SIMPLEQ_EMPTY(&(ch->incoming_queue)));
  1225. /* Close it; this contains an assertion that the incoming queue is empty */
  1226. channel_mark_for_close(ch);
  1227. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSING);
  1228. chan_test_finish_close(ch);
  1229. tt_int_op(ch->state, OP_EQ, CHANNEL_STATE_CLOSED);
  1230. channel_run_cleanup();
  1231. ch = NULL;
  1232. done:
  1233. free_fake_channel(ch);
  1234. tor_free(cell);
  1235. tor_free(var_cell);
  1236. UNMOCK(scheduler_channel_doesnt_want_writes);
  1237. UNMOCK(scheduler_release_channel);
  1238. return;
  1239. }
  1240. static void
  1241. test_channel_queue_size(void *arg)
  1242. {
  1243. channel_t *ch = NULL;
  1244. cell_t *cell = NULL;
  1245. int n, old_count;
  1246. uint64_t global_queue_estimate;
  1247. (void)arg;
  1248. ch = new_fake_channel();
  1249. tt_assert(ch);
  1250. /* Initial queue size update */
  1251. channel_update_xmit_queue_size(ch);
  1252. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 0);
  1253. global_queue_estimate = channel_get_global_queue_estimate();
  1254. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  1255. /* Test the call-through to our fake lower layer */
  1256. n = channel_num_cells_writeable(ch);
  1257. /* chan_test_num_cells_writeable() always returns 32 */
  1258. tt_int_op(n, OP_EQ, 32);
  1259. /*
  1260. * Now we queue some cells and check that channel_num_cells_writeable()
  1261. * adjusts properly
  1262. */
  1263. /* tell it not to accept cells */
  1264. test_chan_accept_cells = 0;
  1265. /* ...and keep it from trying to flush the queue */
  1266. ch->state = CHANNEL_STATE_MAINT;
  1267. /* Get a fresh cell */
  1268. cell = tor_malloc_zero(sizeof(cell_t));
  1269. make_fake_cell(cell);
  1270. old_count = test_cells_written;
  1271. channel_write_cell(ch, cell);
  1272. /* Assert that it got queued, not written through, correctly */
  1273. tt_int_op(test_cells_written, OP_EQ, old_count);
  1274. /* Now check chan_test_num_cells_writeable() again */
  1275. n = channel_num_cells_writeable(ch);
  1276. /* Should return 0 since we're in CHANNEL_STATE_MAINT */
  1277. tt_int_op(n, OP_EQ, 0);
  1278. /* Update queue size estimates */
  1279. channel_update_xmit_queue_size(ch);
  1280. /* One cell, times an overhead factor of 1.0 */
  1281. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 512);
  1282. /* Try a different overhead factor */
  1283. test_overhead_estimate = 0.5;
  1284. /* This one should be ignored since it's below 1.0 */
  1285. channel_update_xmit_queue_size(ch);
  1286. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 512);
  1287. /* Now try a larger one */
  1288. test_overhead_estimate = 2.0;
  1289. channel_update_xmit_queue_size(ch);
  1290. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 1024);
  1291. /* Go back to 1.0 */
  1292. test_overhead_estimate = 1.0;
  1293. channel_update_xmit_queue_size(ch);
  1294. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 512);
  1295. /* Check the global estimate too */
  1296. global_queue_estimate = channel_get_global_queue_estimate();
  1297. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  1298. /* Go to open */
  1299. old_count = test_cells_written;
  1300. channel_change_state_open(ch);
  1301. /*
  1302. * It should try to write, but we aren't accepting cells right now, so
  1303. * it'll requeue
  1304. */
  1305. tt_int_op(test_cells_written, OP_EQ, old_count);
  1306. /* Check the queue size again */
  1307. channel_update_xmit_queue_size(ch);
  1308. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 512);
  1309. global_queue_estimate = channel_get_global_queue_estimate();
  1310. tt_u64_op(global_queue_estimate, OP_EQ, 512);
  1311. /*
  1312. * Now the cell is in the queue, and we're open, so we should get 31
  1313. * writeable cells.
  1314. */
  1315. n = channel_num_cells_writeable(ch);
  1316. tt_int_op(n, OP_EQ, 31);
  1317. /* Accept cells again */
  1318. test_chan_accept_cells = 1;
  1319. /* ...and re-process the queue */
  1320. old_count = test_cells_written;
  1321. channel_flush_cells(ch);
  1322. tt_int_op(test_cells_written, OP_EQ, old_count + 1);
  1323. /* Should have 32 writeable now */
  1324. n = channel_num_cells_writeable(ch);
  1325. tt_int_op(n, OP_EQ, 32);
  1326. /* Should have queue size estimate of zero */
  1327. channel_update_xmit_queue_size(ch);
  1328. tt_u64_op(ch->bytes_queued_for_xmit, OP_EQ, 0);
  1329. global_queue_estimate = channel_get_global_queue_estimate();
  1330. tt_u64_op(global_queue_estimate, OP_EQ, 0);
  1331. /* Okay, now we're done with this one */
  1332. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  1333. channel_mark_for_close(ch);
  1334. UNMOCK(scheduler_release_channel);
  1335. done:
  1336. free_fake_channel(ch);
  1337. return;
  1338. }
  1339. static void
  1340. test_channel_write(void *arg)
  1341. {
  1342. channel_t *ch = NULL;
  1343. cell_t *cell = tor_malloc_zero(sizeof(cell_t));
  1344. packed_cell_t *packed_cell = NULL;
  1345. var_cell_t *var_cell =
  1346. tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  1347. int old_count;
  1348. (void)arg;
  1349. packed_cell = packed_cell_new();
  1350. tt_assert(packed_cell);
  1351. ch = new_fake_channel();
  1352. tt_assert(ch);
  1353. make_fake_cell(cell);
  1354. make_fake_var_cell(var_cell);
  1355. /* Tell it to accept cells */
  1356. test_chan_accept_cells = 1;
  1357. old_count = test_cells_written;
  1358. channel_write_cell(ch, cell);
  1359. cell = NULL;
  1360. tt_assert(test_cells_written == old_count + 1);
  1361. channel_write_var_cell(ch, var_cell);
  1362. var_cell = NULL;
  1363. tt_assert(test_cells_written == old_count + 2);
  1364. channel_write_packed_cell(ch, packed_cell);
  1365. packed_cell = NULL;
  1366. tt_assert(test_cells_written == old_count + 3);
  1367. /* Now we test queueing; tell it not to accept cells */
  1368. test_chan_accept_cells = 0;
  1369. /* ...and keep it from trying to flush the queue */
  1370. ch->state = CHANNEL_STATE_MAINT;
  1371. /* Get a fresh cell */
  1372. cell = tor_malloc_zero(sizeof(cell_t));
  1373. make_fake_cell(cell);
  1374. old_count = test_cells_written;
  1375. channel_write_cell(ch, cell);
  1376. tt_assert(test_cells_written == old_count);
  1377. /*
  1378. * Now change back to open with channel_change_state() and assert that it
  1379. * gets drained from the queue.
  1380. */
  1381. test_chan_accept_cells = 1;
  1382. channel_change_state_open(ch);
  1383. tt_assert(test_cells_written == old_count + 1);
  1384. /*
  1385. * Check the note destroy case
  1386. */
  1387. cell = tor_malloc_zero(sizeof(cell_t));
  1388. make_fake_cell(cell);
  1389. cell->command = CELL_DESTROY;
  1390. /* Set up the mock */
  1391. MOCK(channel_note_destroy_not_pending,
  1392. channel_note_destroy_not_pending_mock);
  1393. old_count = test_destroy_not_pending_calls;
  1394. channel_write_cell(ch, cell);
  1395. tt_assert(test_destroy_not_pending_calls == old_count + 1);
  1396. /* Now send a non-destroy and check we don't call it */
  1397. cell = tor_malloc_zero(sizeof(cell_t));
  1398. make_fake_cell(cell);
  1399. channel_write_cell(ch, cell);
  1400. tt_assert(test_destroy_not_pending_calls == old_count + 1);
  1401. UNMOCK(channel_note_destroy_not_pending);
  1402. /*
  1403. * Now switch it to CLOSING so we can test the discard-cells case
  1404. * in the channel_write_*() functions.
  1405. */
  1406. MOCK(scheduler_release_channel, scheduler_release_channel_mock);
  1407. channel_mark_for_close(ch);
  1408. UNMOCK(scheduler_release_channel);
  1409. /* Send cells that will drop in the closing state */
  1410. old_count = test_cells_written;
  1411. cell = tor_malloc_zero(sizeof(cell_t));
  1412. make_fake_cell(cell);
  1413. channel_write_cell(ch, cell);
  1414. cell = NULL;
  1415. tt_assert(test_cells_written == old_count);
  1416. var_cell = tor_malloc_zero(sizeof(var_cell_t) + CELL_PAYLOAD_SIZE);
  1417. make_fake_var_cell(var_cell);
  1418. channel_write_var_cell(ch, var_cell);
  1419. var_cell = NULL;
  1420. tt_assert(test_cells_written == old_count);
  1421. packed_cell = packed_cell_new();
  1422. channel_write_packed_cell(ch, packed_cell);
  1423. packed_cell = NULL;
  1424. tt_assert(test_cells_written == old_count);
  1425. done:
  1426. free_fake_channel(ch);
  1427. tor_free(var_cell);
  1428. tor_free(cell);
  1429. packed_cell_free(packed_cell);
  1430. return;
  1431. }
  1432. static void
  1433. test_channel_id_map(void *arg)
  1434. {
  1435. (void)arg;
  1436. #define N_CHAN 6
  1437. char rsa_id[N_CHAN][DIGEST_LEN];
  1438. ed25519_public_key_t *ed_id[N_CHAN];
  1439. channel_t *chan[N_CHAN];
  1440. int i;
  1441. ed25519_public_key_t ed_zero;
  1442. memset(&ed_zero, 0, sizeof(ed_zero));
  1443. tt_int_op(DIGEST_LEN, OP_EQ, sizeof(rsa_id[0])); // Do I remember C?
  1444. for (i = 0; i < N_CHAN; ++i) {
  1445. crypto_rand(rsa_id[i], DIGEST_LEN);
  1446. ed_id[i] = tor_malloc_zero(sizeof(*ed_id[i]));
  1447. crypto_rand((char*)ed_id[i]->pubkey, sizeof(ed_id[i]->pubkey));
  1448. }
  1449. /* For channel 3, have no Ed identity. */
  1450. tor_free(ed_id[3]);
  1451. /* Channel 2 and 4 have same ROSA identity */
  1452. memcpy(rsa_id[4], rsa_id[2], DIGEST_LEN);
  1453. /* Channel 2 and 4 and 5 have same RSA identity */
  1454. memcpy(rsa_id[4], rsa_id[2], DIGEST_LEN);
  1455. memcpy(rsa_id[5], rsa_id[2], DIGEST_LEN);
  1456. /* Channels 2 and 5 have same Ed25519 identity */
  1457. memcpy(ed_id[5], ed_id[2], sizeof(*ed_id[2]));
  1458. for (i = 0; i < N_CHAN; ++i) {
  1459. chan[i] = new_fake_channel();
  1460. channel_register(chan[i]);
  1461. channel_set_identity_digest(chan[i], rsa_id[i], ed_id[i]);
  1462. }
  1463. /* Lookup by RSA id only */
  1464. tt_ptr_op(chan[0], OP_EQ,
  1465. channel_find_by_remote_identity(rsa_id[0], NULL));
  1466. tt_ptr_op(chan[1], OP_EQ,
  1467. channel_find_by_remote_identity(rsa_id[1], NULL));
  1468. tt_ptr_op(chan[3], OP_EQ,
  1469. channel_find_by_remote_identity(rsa_id[3], NULL));
  1470. channel_t *ch;
  1471. ch = channel_find_by_remote_identity(rsa_id[2], NULL);
  1472. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1473. ch = channel_next_with_rsa_identity(ch);
  1474. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1475. ch = channel_next_with_rsa_identity(ch);
  1476. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1477. ch = channel_next_with_rsa_identity(ch);
  1478. tt_ptr_op(ch, OP_EQ, NULL);
  1479. /* As above, but with zero Ed25519 ID (meaning "any ID") */
  1480. tt_ptr_op(chan[0], OP_EQ,
  1481. channel_find_by_remote_identity(rsa_id[0], &ed_zero));
  1482. tt_ptr_op(chan[1], OP_EQ,
  1483. channel_find_by_remote_identity(rsa_id[1], &ed_zero));
  1484. tt_ptr_op(chan[3], OP_EQ,
  1485. channel_find_by_remote_identity(rsa_id[3], &ed_zero));
  1486. ch = channel_find_by_remote_identity(rsa_id[2], &ed_zero);
  1487. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1488. ch = channel_next_with_rsa_identity(ch);
  1489. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1490. ch = channel_next_with_rsa_identity(ch);
  1491. tt_assert(ch == chan[2] || ch == chan[4] || ch == chan[5]);
  1492. ch = channel_next_with_rsa_identity(ch);
  1493. tt_ptr_op(ch, OP_EQ, NULL);
  1494. /* Lookup nonexistent RSA identity */
  1495. tt_ptr_op(NULL, OP_EQ,
  1496. channel_find_by_remote_identity("!!!!!!!!!!!!!!!!!!!!", NULL));
  1497. /* Look up by full identity pair */
  1498. tt_ptr_op(chan[0], OP_EQ,
  1499. channel_find_by_remote_identity(rsa_id[0], ed_id[0]));
  1500. tt_ptr_op(chan[1], OP_EQ,
  1501. channel_find_by_remote_identity(rsa_id[1], ed_id[1]));
  1502. tt_ptr_op(chan[3], OP_EQ,
  1503. channel_find_by_remote_identity(rsa_id[3], ed_id[3] /*NULL*/));
  1504. tt_ptr_op(chan[4], OP_EQ,
  1505. channel_find_by_remote_identity(rsa_id[4], ed_id[4]));
  1506. ch = channel_find_by_remote_identity(rsa_id[2], ed_id[2]);
  1507. tt_assert(ch == chan[2] || ch == chan[5]);
  1508. /* Look up RSA identity with wrong ed25519 identity */
  1509. tt_ptr_op(NULL, OP_EQ,
  1510. channel_find_by_remote_identity(rsa_id[4], ed_id[0]));
  1511. tt_ptr_op(NULL, OP_EQ,
  1512. channel_find_by_remote_identity(rsa_id[2], ed_id[1]));
  1513. tt_ptr_op(NULL, OP_EQ,
  1514. channel_find_by_remote_identity(rsa_id[3], ed_id[1]));
  1515. done:
  1516. for (i = 0; i < N_CHAN; ++i) {
  1517. channel_clear_identity_digest(chan[i]);
  1518. channel_unregister(chan[i]);
  1519. free_fake_channel(chan[i]);
  1520. tor_free(ed_id[i]);
  1521. }
  1522. #undef N_CHAN
  1523. }
  1524. struct testcase_t channel_tests[] = {
  1525. { "dumpstats", test_channel_dumpstats, TT_FORK, NULL, NULL },
  1526. { "flush", test_channel_flush, TT_FORK, NULL, NULL },
  1527. { "flushmux", test_channel_flushmux, TT_FORK, NULL, NULL },
  1528. { "incoming", test_channel_incoming, TT_FORK, NULL, NULL },
  1529. { "lifecycle", test_channel_lifecycle, TT_FORK, NULL, NULL },
  1530. { "lifecycle_2", test_channel_lifecycle_2, TT_FORK, NULL, NULL },
  1531. { "multi", test_channel_multi, TT_FORK, NULL, NULL },
  1532. { "queue_impossible", test_channel_queue_impossible, TT_FORK, NULL, NULL },
  1533. { "queue_incoming", test_channel_queue_incoming, TT_FORK, NULL, NULL },
  1534. { "queue_size", test_channel_queue_size, TT_FORK, NULL, NULL },
  1535. { "write", test_channel_write, TT_FORK, NULL, NULL },
  1536. { "id_map", test_channel_id_map, TT_FORK, NULL, NULL },
  1537. END_OF_TESTCASES
  1538. };