test_hs_common.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444
  1. /* Copyright (c) 2017, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file test_hs_common.c
  5. * \brief Test hidden service common functionalities.
  6. */
  7. #define HS_COMMON_PRIVATE
  8. #define HS_SERVICE_PRIVATE
  9. #define NODELIST_PRIVATE
  10. #include "test.h"
  11. #include "test_helpers.h"
  12. #include "log_test_helpers.h"
  13. #include "hs_test_helpers.h"
  14. #include "connection_edge.h"
  15. #include "hs_common.h"
  16. #include "hs_service.h"
  17. #include "config.h"
  18. #include "networkstatus.h"
  19. #include "directory.h"
  20. #include "nodelist.h"
  21. #include "routerlist.h"
  22. #include "statefile.h"
  23. #include "circuitlist.h"
  24. #include "shared_random.h"
  25. #include "util.h"
  26. /** Test the validation of HS v3 addresses */
  27. static void
  28. test_validate_address(void *arg)
  29. {
  30. int ret;
  31. (void) arg;
  32. /* Address too short and too long. */
  33. setup_full_capture_of_logs(LOG_WARN);
  34. ret = hs_address_is_valid("blah");
  35. tt_int_op(ret, OP_EQ, 0);
  36. expect_log_msg_containing("has an invalid length");
  37. teardown_capture_of_logs();
  38. setup_full_capture_of_logs(LOG_WARN);
  39. ret = hs_address_is_valid(
  40. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
  41. tt_int_op(ret, OP_EQ, 0);
  42. expect_log_msg_containing("has an invalid length");
  43. teardown_capture_of_logs();
  44. /* Invalid checksum (taken from prop224) */
  45. setup_full_capture_of_logs(LOG_WARN);
  46. ret = hs_address_is_valid(
  47. "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
  48. tt_int_op(ret, OP_EQ, 0);
  49. expect_log_msg_containing("invalid checksum");
  50. teardown_capture_of_logs();
  51. setup_full_capture_of_logs(LOG_WARN);
  52. ret = hs_address_is_valid(
  53. "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
  54. tt_int_op(ret, OP_EQ, 0);
  55. expect_log_msg_containing("invalid checksum");
  56. teardown_capture_of_logs();
  57. /* Non base32 decodable string. */
  58. setup_full_capture_of_logs(LOG_WARN);
  59. ret = hs_address_is_valid(
  60. "????????????????????????????????????????????????????????");
  61. tt_int_op(ret, OP_EQ, 0);
  62. expect_log_msg_containing("can't be decoded");
  63. teardown_capture_of_logs();
  64. /* Valid address. */
  65. ret = hs_address_is_valid(
  66. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnad");
  67. tt_int_op(ret, OP_EQ, 1);
  68. done:
  69. ;
  70. }
  71. static int
  72. mock_write_str_to_file(const char *path, const char *str, int bin)
  73. {
  74. (void)bin;
  75. tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
  76. tt_str_op(str, OP_EQ,
  77. "ijbeeqscijbeeqscijbeeqscijbeeqscijbeeqscijbeeqscijbezhid.onion\n");
  78. done:
  79. return 0;
  80. }
  81. /** Test building HS v3 onion addresses */
  82. static void
  83. test_build_address(void *arg)
  84. {
  85. int ret;
  86. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  87. ed25519_public_key_t pubkey;
  88. hs_service_t *service = NULL;
  89. (void) arg;
  90. MOCK(write_str_to_file, mock_write_str_to_file);
  91. /* The following has been created with hs_build_address.py script that
  92. * follows proposal 224 specification to build an onion address. */
  93. static const char *test_addr =
  94. "ijbeeqscijbeeqscijbeeqscijbeeqscijbeeqscijbeeqscijbezhid";
  95. /* Let's try to build the same onion address that the script can do. Key is
  96. * a long set of very random \x42 :). */
  97. memset(&pubkey, '\x42', sizeof(pubkey));
  98. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  99. tt_str_op(test_addr, OP_EQ, onion_addr);
  100. /* Validate that address. */
  101. ret = hs_address_is_valid(onion_addr);
  102. tt_int_op(ret, OP_EQ, 1);
  103. service = tor_malloc_zero(sizeof(hs_service_t));
  104. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  105. tor_asprintf(&service->config.directory_path, "/double/five");
  106. ret = write_address_to_file(service, "squared");
  107. tt_int_op(ret, OP_EQ, 0);
  108. done:
  109. hs_service_free(service);
  110. }
  111. /** Test that our HS time period calculation functions work properly */
  112. static void
  113. test_time_period(void *arg)
  114. {
  115. (void) arg;
  116. uint64_t tn;
  117. int retval;
  118. time_t fake_time, correct_time, start_time;
  119. /* Let's do the example in prop224 section [TIME-PERIODS] */
  120. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  121. &fake_time);
  122. tt_int_op(retval, OP_EQ, 0);
  123. /* Check that the time period number is right */
  124. tn = hs_get_time_period_num(fake_time);
  125. tt_u64_op(tn, OP_EQ, 16903);
  126. /* Increase current time to 11:59:59 UTC and check that the time period
  127. number is still the same */
  128. fake_time += 3599;
  129. tn = hs_get_time_period_num(fake_time);
  130. tt_u64_op(tn, OP_EQ, 16903);
  131. { /* Check start time of next time period */
  132. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  133. &correct_time);
  134. tt_int_op(retval, OP_EQ, 0);
  135. start_time = hs_get_start_time_of_next_time_period(fake_time);
  136. tt_int_op(start_time, OP_EQ, correct_time);
  137. }
  138. /* Now take time to 12:00:00 UTC and check that the time period rotated */
  139. fake_time += 1;
  140. tn = hs_get_time_period_num(fake_time);
  141. tt_u64_op(tn, OP_EQ, 16904);
  142. /* Now also check our hs_get_next_time_period_num() function */
  143. tn = hs_get_next_time_period_num(fake_time);
  144. tt_u64_op(tn, OP_EQ, 16905);
  145. { /* Check start time of next time period again */
  146. retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
  147. &correct_time);
  148. tt_int_op(retval, OP_EQ, 0);
  149. start_time = hs_get_start_time_of_next_time_period(fake_time);
  150. tt_int_op(start_time, OP_EQ, correct_time);
  151. }
  152. /* Now do another sanity check: The time period number at the start of the
  153. * next time period, must be the same time period number as the one returned
  154. * from hs_get_next_time_period_num() */
  155. {
  156. time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
  157. tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
  158. hs_get_next_time_period_num(fake_time));
  159. }
  160. done:
  161. ;
  162. }
  163. /** Test that we can correctly find the start time of the next time period */
  164. static void
  165. test_start_time_of_next_time_period(void *arg)
  166. {
  167. (void) arg;
  168. int retval;
  169. time_t fake_time;
  170. char tbuf[ISO_TIME_LEN + 1];
  171. time_t next_tp_start_time;
  172. /* Do some basic tests */
  173. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  174. &fake_time);
  175. tt_int_op(retval, OP_EQ, 0);
  176. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  177. /* Compare it with the correct result */
  178. format_iso_time(tbuf, next_tp_start_time);
  179. tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
  180. /* Another test with an edge-case time (start of TP) */
  181. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  182. &fake_time);
  183. tt_int_op(retval, OP_EQ, 0);
  184. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  185. format_iso_time(tbuf, next_tp_start_time);
  186. tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
  187. {
  188. /* Now pretend we are on a testing network and alter the voting schedule to
  189. be every 10 seconds. This means that a time period has length 10*24
  190. seconds (4 minutes). It also means that we apply a rotational offset of
  191. 120 seconds to the time period, so that it starts at 00:02:00 instead of
  192. 00:00:00. */
  193. or_options_t *options = get_options_mutable();
  194. options->TestingTorNetwork = 1;
  195. options->V3AuthVotingInterval = 10;
  196. options->TestingV3AuthInitialVotingInterval = 10;
  197. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
  198. &fake_time);
  199. tt_int_op(retval, OP_EQ, 0);
  200. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  201. /* Compare it with the correct result */
  202. format_iso_time(tbuf, next_tp_start_time);
  203. tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
  204. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
  205. &fake_time);
  206. tt_int_op(retval, OP_EQ, 0);
  207. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  208. /* Compare it with the correct result */
  209. format_iso_time(tbuf, next_tp_start_time);
  210. tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
  211. }
  212. done:
  213. ;
  214. }
  215. /* Cleanup the global nodelist. It also frees the "md" in the node_t because
  216. * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
  217. static void
  218. cleanup_nodelist(void)
  219. {
  220. smartlist_t *nodelist = nodelist_get_list();
  221. SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
  222. tor_free(node->md);
  223. node->md = NULL;
  224. } SMARTLIST_FOREACH_END(node);
  225. nodelist_free_all();
  226. }
  227. static void
  228. helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
  229. int identity_idx,
  230. const char *nickname,
  231. int is_hsdir)
  232. {
  233. routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
  234. routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
  235. uint8_t identity[DIGEST_LEN];
  236. tor_addr_t ipv4_addr;
  237. memset(identity, identity_idx, sizeof(identity));
  238. memcpy(rs->identity_digest, identity, DIGEST_LEN);
  239. rs->is_hs_dir = is_hsdir;
  240. rs->supports_v3_hsdir = 1;
  241. strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
  242. tor_addr_parse(&ipv4_addr, "1.2.3.4");
  243. ri->addr = tor_addr_to_ipv4h(&ipv4_addr);
  244. rs->addr = tor_addr_to_ipv4h(&ipv4_addr);
  245. ri->nickname = tor_strdup(nickname);
  246. ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
  247. memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
  248. ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
  249. /* Needed for the HSDir index computation. */
  250. memset(&ri->cache_info.signing_key_cert->signing_key,
  251. identity_idx, ED25519_PUBKEY_LEN);
  252. tt_assert(nodelist_set_routerinfo(ri, NULL));
  253. node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
  254. tt_assert(node);
  255. node->rs = rs;
  256. /* We need this to exist for node_has_descriptor() to return true. */
  257. node->md = tor_malloc_zero(sizeof(microdesc_t));
  258. /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
  259. * the indexes which it doesn't have when it is called. */
  260. node_set_hsdir_index(node, ns);
  261. node->ri = NULL;
  262. smartlist_add(ns->routerstatus_list, rs);
  263. done:
  264. routerinfo_free(ri);
  265. }
  266. static networkstatus_t *mock_ns = NULL;
  267. static networkstatus_t *
  268. mock_networkstatus_get_latest_consensus(void)
  269. {
  270. time_t now = approx_time();
  271. /* If initialized, return it */
  272. if (mock_ns) {
  273. return mock_ns;
  274. }
  275. /* Initialize fake consensus */
  276. mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
  277. /* This consensus is live */
  278. mock_ns->valid_after = now-1;
  279. mock_ns->fresh_until = now+1;
  280. mock_ns->valid_until = now+2;
  281. /* Create routerstatus list */
  282. mock_ns->routerstatus_list = smartlist_new();
  283. mock_ns->type = NS_TYPE_CONSENSUS;
  284. return mock_ns;
  285. }
  286. /** Test the responsible HSDirs calculation function */
  287. static void
  288. test_responsible_hsdirs(void *arg)
  289. {
  290. time_t now = approx_time();
  291. smartlist_t *responsible_dirs = smartlist_new();
  292. networkstatus_t *ns = NULL;
  293. int retval;
  294. (void) arg;
  295. hs_init();
  296. MOCK(networkstatus_get_latest_consensus,
  297. mock_networkstatus_get_latest_consensus);
  298. ns = networkstatus_get_latest_consensus();
  299. { /* First router: HSdir */
  300. helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
  301. }
  302. { /* Second HSDir */
  303. helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
  304. }
  305. { /* Third relay but not HSDir */
  306. helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
  307. }
  308. ed25519_keypair_t kp;
  309. retval = ed25519_keypair_generate(&kp, 0);
  310. tt_int_op(retval, OP_EQ , 0);
  311. uint64_t time_period_num = hs_get_time_period_num(now);
  312. hs_get_responsible_hsdirs(&kp.pubkey, time_period_num,
  313. 0, 0, responsible_dirs);
  314. /* Make sure that we only found 2 responsible HSDirs.
  315. * The third relay was not an hsdir! */
  316. tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
  317. /** TODO: Build a bigger network and do more tests here */
  318. done:
  319. SMARTLIST_FOREACH(ns->routerstatus_list,
  320. routerstatus_t *, rs, routerstatus_free(rs));
  321. smartlist_free(responsible_dirs);
  322. smartlist_clear(ns->routerstatus_list);
  323. networkstatus_vote_free(mock_ns);
  324. cleanup_nodelist();
  325. }
  326. static void
  327. mock_directory_initiate_request(directory_request_t *req)
  328. {
  329. (void)req;
  330. return;
  331. }
  332. static int
  333. mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
  334. const ed25519_keypair_t *signing_kp,
  335. char **encoded_out)
  336. {
  337. (void)desc;
  338. (void)signing_kp;
  339. tor_asprintf(encoded_out, "lulu");
  340. return 0;
  341. }
  342. static or_state_t dummy_state;
  343. /* Mock function to get fake or state (used for rev counters) */
  344. static or_state_t *
  345. get_or_state_replacement(void)
  346. {
  347. return &dummy_state;
  348. }
  349. static int
  350. mock_router_have_minimum_dir_info(void)
  351. {
  352. return 1;
  353. }
  354. /** Test that we correctly detect when the HSDir hash ring changes so that we
  355. * reupload our descriptor. */
  356. static void
  357. test_desc_reupload_logic(void *arg)
  358. {
  359. networkstatus_t *ns = NULL;
  360. (void) arg;
  361. hs_init();
  362. MOCK(router_have_minimum_dir_info,
  363. mock_router_have_minimum_dir_info);
  364. MOCK(get_or_state,
  365. get_or_state_replacement);
  366. MOCK(networkstatus_get_latest_consensus,
  367. mock_networkstatus_get_latest_consensus);
  368. MOCK(directory_initiate_request,
  369. mock_directory_initiate_request);
  370. MOCK(hs_desc_encode_descriptor,
  371. mock_hs_desc_encode_descriptor);
  372. ns = networkstatus_get_latest_consensus();
  373. /** Test logic:
  374. * 1) Upload descriptor to HSDirs
  375. * CHECK that previous_hsdirs list was populated.
  376. * 2) Then call router_dir_info_changed() without an HSDir set change.
  377. * CHECK that no reuplod occurs.
  378. * 3) Now change the HSDir set, and call dir_info_changed() again.
  379. * CHECK that reupload occurs.
  380. * 4) Finally call service_desc_schedule_upload().
  381. * CHECK that previous_hsdirs list was cleared.
  382. **/
  383. /* Let's start by building our descriptor and service */
  384. hs_service_descriptor_t *desc = service_descriptor_new();
  385. hs_service_t *service = NULL;
  386. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  387. ed25519_public_key_t pubkey;
  388. memset(&pubkey, '\x42', sizeof(pubkey));
  389. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  390. service = tor_malloc_zero(sizeof(hs_service_t));
  391. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  392. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  393. ed25519_public_key_generate(&service->keys.identity_pk,
  394. &service->keys.identity_sk);
  395. service->desc_current = desc;
  396. /* Also add service to service map */
  397. hs_service_ht *service_map = get_hs_service_map();
  398. tt_assert(service_map);
  399. tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
  400. register_service(service_map, service);
  401. tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
  402. /* Now let's create our hash ring: */
  403. {
  404. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  405. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  406. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  407. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  408. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  409. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  410. }
  411. /* Now let's upload our desc to all hsdirs */
  412. upload_descriptor_to_all(service, desc);
  413. /* Check that previous hsdirs were populated */
  414. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  415. /* Poison next upload time so that we can see if it was changed by
  416. * router_dir_info_changed(). No changes in hash ring so far, so the upload
  417. * time should stay as is. */
  418. desc->next_upload_time = 42;
  419. router_dir_info_changed();
  420. tt_int_op(desc->next_upload_time, OP_EQ, 42);
  421. /* Now change the HSDir hash ring by swapping nora for aaron.
  422. * Start by clearing the hash ring */
  423. {
  424. SMARTLIST_FOREACH(ns->routerstatus_list,
  425. routerstatus_t *, rs, routerstatus_free(rs));
  426. smartlist_clear(ns->routerstatus_list);
  427. cleanup_nodelist();
  428. routerlist_free_all();
  429. }
  430. { /* Now add back all the nodes */
  431. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  432. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  433. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  434. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  435. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  436. helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
  437. }
  438. /* Now call service_desc_hsdirs_changed() and see that it detected the hash
  439. ring change */
  440. time_t now = approx_time();
  441. tt_assert(now);
  442. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  443. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  444. /* Now order another upload and see that we keep having 6 prev hsdirs */
  445. upload_descriptor_to_all(service, desc);
  446. /* Check that previous hsdirs were populated */
  447. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  448. /* Now restore the HSDir hash ring to its original state by swapping back
  449. aaron for nora */
  450. /* First clear up the hash ring */
  451. {
  452. SMARTLIST_FOREACH(ns->routerstatus_list,
  453. routerstatus_t *, rs, routerstatus_free(rs));
  454. smartlist_clear(ns->routerstatus_list);
  455. cleanup_nodelist();
  456. routerlist_free_all();
  457. }
  458. { /* Now populate the hash ring again */
  459. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  460. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  461. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  462. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  463. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  464. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  465. }
  466. /* Check that our algorithm catches this change of hsdirs */
  467. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  468. /* Now pretend that the descriptor changed, and order a reupload to all
  469. HSDirs. Make sure that the set of previous HSDirs was cleared. */
  470. service_desc_schedule_upload(desc, now, 1);
  471. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
  472. /* Now reupload again: see that the prev hsdir set got populated again. */
  473. upload_descriptor_to_all(service, desc);
  474. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  475. done:
  476. SMARTLIST_FOREACH(ns->routerstatus_list,
  477. routerstatus_t *, rs, routerstatus_free(rs));
  478. smartlist_clear(ns->routerstatus_list);
  479. networkstatus_vote_free(ns);
  480. cleanup_nodelist();
  481. hs_free_all();
  482. }
  483. /** Test disaster SRV computation and caching */
  484. static void
  485. test_disaster_srv(void *arg)
  486. {
  487. uint8_t *cached_disaster_srv_one = NULL;
  488. uint8_t *cached_disaster_srv_two = NULL;
  489. uint8_t srv_one[DIGEST256_LEN] = {0};
  490. uint8_t srv_two[DIGEST256_LEN] = {0};
  491. uint8_t srv_three[DIGEST256_LEN] = {0};
  492. uint8_t srv_four[DIGEST256_LEN] = {0};
  493. uint8_t srv_five[DIGEST256_LEN] = {0};
  494. (void) arg;
  495. /* Get the cached SRVs: we gonna use them later for verification */
  496. cached_disaster_srv_one = get_first_cached_disaster_srv();
  497. cached_disaster_srv_two = get_second_cached_disaster_srv();
  498. /* Compute some srvs */
  499. get_disaster_srv(1, srv_one);
  500. get_disaster_srv(2, srv_two);
  501. /* Check that the cached ones where updated */
  502. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  503. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  504. /* Ask for an SRV that has already been computed */
  505. get_disaster_srv(2, srv_two);
  506. /* and check that the cache entries have not changed */
  507. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  508. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  509. /* Ask for a new SRV */
  510. get_disaster_srv(3, srv_three);
  511. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  512. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  513. /* Ask for another SRV: none of the original SRVs should now be cached */
  514. get_disaster_srv(4, srv_four);
  515. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  516. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  517. /* Ask for yet another SRV */
  518. get_disaster_srv(5, srv_five);
  519. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
  520. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  521. done:
  522. ;
  523. }
  524. /** Test our HS descriptor request tracker by making various requests and
  525. * checking whether they get tracked properly. */
  526. static void
  527. test_hid_serv_request_tracker(void *arg)
  528. {
  529. (void) arg;
  530. time_t retval;
  531. routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
  532. time_t now = approx_time();
  533. const char *req_key_str_first =
  534. "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  535. const char *req_key_str_second =
  536. "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  537. const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
  538. /*************************** basic test *******************************/
  539. /* Get request tracker and make sure it's empty */
  540. strmap_t *request_tracker = get_last_hid_serv_requests();
  541. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  542. /* Let's register a hid serv request */
  543. hsdir = tor_malloc_zero(sizeof(routerstatus_t));
  544. memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
  545. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  546. now, 1);
  547. tt_int_op(retval, OP_EQ, now);
  548. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  549. /* Let's lookup a non-existent hidserv request */
  550. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
  551. now+1, 0);
  552. tt_int_op(retval, OP_EQ, 0);
  553. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  554. /* Let's lookup a real hidserv request */
  555. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  556. now+2, 0);
  557. tt_int_op(retval, OP_EQ, now); /* we got it */
  558. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  559. /**********************************************************************/
  560. /* Let's add another request for the same HS but on a different HSDir. */
  561. hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
  562. memset(hsdir2->identity_digest, 2, DIGEST_LEN);
  563. retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
  564. now+3, 1);
  565. tt_int_op(retval, OP_EQ, now+3);
  566. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  567. /* Check that we can clean the first request based on time */
  568. hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
  569. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  570. /* Check that it doesn't exist anymore */
  571. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  572. now+2, 0);
  573. tt_int_op(retval, OP_EQ, 0);
  574. /* Now let's add a smaller req key str */
  575. hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
  576. memset(hsdir3->identity_digest, 3, DIGEST_LEN);
  577. retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
  578. now+4, 1);
  579. tt_int_op(retval, OP_EQ, now+4);
  580. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  581. /*************************** deleting entries **************************/
  582. /* Add another request with very short key */
  583. retval = hs_lookup_last_hid_serv_request(hsdir, "l", now, 1);
  584. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  585. /* Try deleting entries with a dummy key. Check that our previous requests
  586. * are still there */
  587. tor_capture_bugs_(1);
  588. hs_purge_hid_serv_from_last_hid_serv_requests("a");
  589. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  590. tor_end_capture_bugs_();
  591. /* Try another dummy key. Check that requests are still there */
  592. {
  593. char dummy[2000];
  594. memset(dummy, 'Z', 2000);
  595. dummy[1999] = '\x00';
  596. hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
  597. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  598. }
  599. /* Another dummy key! */
  600. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
  601. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  602. /* Now actually delete a request! */
  603. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
  604. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  605. /* Purge it all! */
  606. hs_purge_last_hid_serv_requests();
  607. request_tracker = get_last_hid_serv_requests();
  608. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  609. done:
  610. tor_free(hsdir);
  611. tor_free(hsdir2);
  612. tor_free(hsdir3);
  613. }
  614. static void
  615. test_parse_extended_hostname(void *arg)
  616. {
  617. (void) arg;
  618. char address1[] = "fooaddress.onion";
  619. char address2[] = "aaaaaaaaaaaaaaaa.onion";
  620. char address3[] = "fooaddress.exit";
  621. char address4[] = "www.torproject.org";
  622. char address5[] = "foo.abcdefghijklmnop.onion";
  623. char address6[] = "foo.bar.abcdefghijklmnop.onion";
  624. char address7[] = ".abcdefghijklmnop.onion";
  625. char address8[] =
  626. "www.p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnad.onion";
  627. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address1));
  628. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address2));
  629. tt_str_op(address2,OP_EQ, "aaaaaaaaaaaaaaaa");
  630. tt_assert(EXIT_HOSTNAME == parse_extended_hostname(address3));
  631. tt_assert(NORMAL_HOSTNAME == parse_extended_hostname(address4));
  632. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address5));
  633. tt_str_op(address5,OP_EQ, "abcdefghijklmnop");
  634. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address6));
  635. tt_str_op(address6,OP_EQ, "abcdefghijklmnop");
  636. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address7));
  637. tt_assert(ONION_V3_HOSTNAME == parse_extended_hostname(address8));
  638. tt_str_op(address8, OP_EQ,
  639. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnad");
  640. done: ;
  641. }
  642. static void
  643. test_time_between_tp_and_srv(void *arg)
  644. {
  645. int ret;
  646. networkstatus_t ns;
  647. (void) arg;
  648. /* This function should be returning true where "^" are:
  649. *
  650. * +------------------------------------------------------------------+
  651. * | |
  652. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  653. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  654. * | |
  655. * | $==========|-----------$===========|-----------$===========| |
  656. * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
  657. * | |
  658. * +------------------------------------------------------------------+
  659. */
  660. ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
  661. tt_int_op(ret, OP_EQ, 0);
  662. ret = hs_time_between_tp_and_srv(&ns, 0);
  663. tt_int_op(ret, OP_EQ, 0);
  664. ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
  665. tt_int_op(ret, OP_EQ, 0);
  666. ret = hs_time_between_tp_and_srv(&ns, 0);
  667. tt_int_op(ret, OP_EQ, 0);
  668. ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
  669. tt_int_op(ret, OP_EQ, 0);
  670. ret = hs_time_between_tp_and_srv(&ns, 0);
  671. tt_int_op(ret, OP_EQ, 1);
  672. ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
  673. tt_int_op(ret, OP_EQ, 0);
  674. ret = hs_time_between_tp_and_srv(&ns, 0);
  675. tt_int_op(ret, OP_EQ, 1);
  676. ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
  677. tt_int_op(ret, OP_EQ, 0);
  678. ret = hs_time_between_tp_and_srv(&ns, 0);
  679. tt_int_op(ret, OP_EQ, 0);
  680. done:
  681. ;
  682. }
  683. /************ Reachability Test (it is huge) ****************/
  684. /* Simulate different consensus for client and service. Used by the
  685. * reachability test. The SRV and responsible HSDir list are used by all
  686. * reachability tests so make them common to simplify setup and teardown. */
  687. static networkstatus_t *mock_service_ns = NULL;
  688. static networkstatus_t *mock_client_ns = NULL;
  689. static sr_srv_t current_srv, previous_srv;
  690. static smartlist_t *service_responsible_hsdirs = NULL;
  691. static smartlist_t *client_responsible_hsdirs = NULL;
  692. static networkstatus_t *
  693. mock_networkstatus_get_live_consensus_service(time_t now)
  694. {
  695. (void) now;
  696. if (mock_service_ns) {
  697. return mock_service_ns;
  698. }
  699. mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
  700. mock_service_ns->routerstatus_list = smartlist_new();
  701. mock_service_ns->type = NS_TYPE_CONSENSUS;
  702. return mock_service_ns;
  703. }
  704. static networkstatus_t *
  705. mock_networkstatus_get_latest_consensus_service(void)
  706. {
  707. return mock_networkstatus_get_live_consensus_service(0);
  708. }
  709. static networkstatus_t *
  710. mock_networkstatus_get_live_consensus_client(time_t now)
  711. {
  712. (void) now;
  713. if (mock_client_ns) {
  714. return mock_client_ns;
  715. }
  716. mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
  717. mock_client_ns->routerstatus_list = smartlist_new();
  718. mock_client_ns->type = NS_TYPE_CONSENSUS;
  719. return mock_client_ns;
  720. }
  721. static networkstatus_t *
  722. mock_networkstatus_get_latest_consensus_client(void)
  723. {
  724. return mock_networkstatus_get_live_consensus_client(0);
  725. }
  726. /* Mock function because we are not trying to test the close circuit that does
  727. * an awful lot of checks on the circuit object. */
  728. static void
  729. mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
  730. const char *file)
  731. {
  732. (void) circ;
  733. (void) reason;
  734. (void) line;
  735. (void) file;
  736. return;
  737. }
  738. /* Initialize a big HSDir V3 hash ring. */
  739. static void
  740. helper_initialize_big_hash_ring(networkstatus_t *ns)
  741. {
  742. int ret;
  743. /* Generate 250 hsdirs! :) */
  744. for (int counter = 1 ; counter < 251 ; counter++) {
  745. /* Let's generate random nickname for each hsdir... */
  746. char nickname_binary[8];
  747. char nickname_str[13] = {0};
  748. crypto_rand(nickname_binary, sizeof(nickname_binary));
  749. ret = base64_encode(nickname_str, sizeof(nickname_str),
  750. nickname_binary, sizeof(nickname_binary), 0);
  751. tt_int_op(ret, OP_EQ, 12);
  752. helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
  753. }
  754. /* Make sure we have 200 hsdirs in our list */
  755. tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
  756. done:
  757. ;
  758. }
  759. /** Initialize service and publish its descriptor as needed. Return the newly
  760. * allocated service object to the caller. */
  761. static hs_service_t *
  762. helper_init_service(time_t now)
  763. {
  764. int retval;
  765. hs_service_t *service = hs_service_new(get_options());
  766. tt_assert(service);
  767. service->config.version = HS_VERSION_THREE;
  768. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  769. ed25519_public_key_generate(&service->keys.identity_pk,
  770. &service->keys.identity_sk);
  771. /* Register service to global map. */
  772. retval = register_service(get_hs_service_map(), service);
  773. tt_int_op(retval, OP_EQ, 0);
  774. /* Initialize service descriptor */
  775. build_all_descriptors(now);
  776. tt_assert(service->desc_current);
  777. tt_assert(service->desc_next);
  778. done:
  779. return service;
  780. }
  781. /* Helper function to set the RFC 1123 time string into t. */
  782. static void
  783. set_consensus_times(const char *time, time_t *t)
  784. {
  785. tt_assert(time);
  786. tt_assert(t);
  787. int ret = parse_rfc1123_time(time, t);
  788. tt_int_op(ret, OP_EQ, 0);
  789. done:
  790. return;
  791. }
  792. /* Helper function to cleanup the mock consensus (client and service) */
  793. static void
  794. cleanup_mock_ns(void)
  795. {
  796. if (mock_service_ns) {
  797. SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
  798. routerstatus_t *, rs, routerstatus_free(rs));
  799. smartlist_clear(mock_service_ns->routerstatus_list);
  800. mock_service_ns->sr_info.current_srv = NULL;
  801. mock_service_ns->sr_info.previous_srv = NULL;
  802. networkstatus_vote_free(mock_service_ns);
  803. mock_service_ns = NULL;
  804. }
  805. if (mock_client_ns) {
  806. SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
  807. routerstatus_t *, rs, routerstatus_free(rs));
  808. smartlist_clear(mock_client_ns->routerstatus_list);
  809. mock_client_ns->sr_info.current_srv = NULL;
  810. mock_client_ns->sr_info.previous_srv = NULL;
  811. networkstatus_vote_free(mock_client_ns);
  812. mock_client_ns = NULL;
  813. }
  814. }
  815. /* Helper function to setup a reachability test. Once called, the
  816. * cleanup_reachability_test MUST be called at the end. */
  817. static void
  818. setup_reachability_test(void)
  819. {
  820. MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
  821. MOCK(get_or_state, get_or_state_replacement);
  822. hs_init();
  823. /* Baseline to start with. */
  824. memset(&current_srv, 0, sizeof(current_srv));
  825. memset(&previous_srv, 1, sizeof(previous_srv));
  826. /* Initialize the consensuses. */
  827. mock_networkstatus_get_latest_consensus_service();
  828. mock_networkstatus_get_latest_consensus_client();
  829. service_responsible_hsdirs = smartlist_new();
  830. client_responsible_hsdirs = smartlist_new();
  831. }
  832. /* Helper function to cleanup a reachability test initial setup. */
  833. static void
  834. cleanup_reachability_test(void)
  835. {
  836. smartlist_free(service_responsible_hsdirs);
  837. service_responsible_hsdirs = NULL;
  838. smartlist_free(client_responsible_hsdirs);
  839. client_responsible_hsdirs = NULL;
  840. hs_free_all();
  841. cleanup_mock_ns();
  842. UNMOCK(get_or_state);
  843. UNMOCK(circuit_mark_for_close_);
  844. }
  845. /* A reachability test always check if the resulting service and client
  846. * responsible HSDir for the given parameters are equal.
  847. *
  848. * Return true iff the same exact nodes are in both list. */
  849. static int
  850. are_responsible_hsdirs_equal(void)
  851. {
  852. int count = 0;
  853. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  854. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 6);
  855. SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
  856. const routerstatus_t *, c_rs) {
  857. SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
  858. const routerstatus_t *, s_rs) {
  859. if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
  860. DIGEST_LEN)) {
  861. count++;
  862. break;
  863. }
  864. } SMARTLIST_FOREACH_END(s_rs);
  865. } SMARTLIST_FOREACH_END(c_rs);
  866. done:
  867. return (count == 6);
  868. }
  869. /* Tor doesn't use such a function to get the previous HSDir, it is only used
  870. * in node_set_hsdir_index(). We need it here so we can test the reachability
  871. * scenario 6 that requires the previous time period to compute the list of
  872. * responsible HSDir because of the client state timing. */
  873. static uint64_t
  874. get_previous_time_period(time_t now)
  875. {
  876. return hs_get_time_period_num(now) - 1;
  877. }
  878. /* Configuration of a reachability test scenario. */
  879. typedef struct reachability_cfg_t {
  880. /* Consensus timings to be set. They have to be compliant with
  881. * RFC 1123 time format. */
  882. const char *service_valid_after;
  883. const char *service_valid_until;
  884. const char *client_valid_after;
  885. const char *client_valid_until;
  886. /* SRVs that the service and client should use. */
  887. sr_srv_t *service_current_srv;
  888. sr_srv_t *service_previous_srv;
  889. sr_srv_t *client_current_srv;
  890. sr_srv_t *client_previous_srv;
  891. /* A time period function for the service to use for this scenario. For a
  892. * successful reachability test, the client always use the current time
  893. * period thus why no client function. */
  894. uint64_t (*service_time_period_fn)(time_t);
  895. /* Is the client and service expected to be in a new time period. After
  896. * setting the consensus time, the reachability test checks
  897. * hs_time_between_tp_and_srv() and test the returned value against this. */
  898. unsigned int service_in_new_tp;
  899. unsigned int client_in_new_tp;
  900. /* Some scenario requires a hint that the client, because of its consensus
  901. * time, will request the "next" service descriptor so this indicates if it
  902. * is the case or not. */
  903. unsigned int client_fetch_next_desc;
  904. } reachability_cfg_t;
  905. /* Some defines to help with semantic while reading a configuration below. */
  906. #define NOT_IN_NEW_TP 0
  907. #define IN_NEW_TP 1
  908. #define DONT_NEED_NEXT_DESC 0
  909. #define NEED_NEXT_DESC 1
  910. static reachability_cfg_t reachability_scenarios[] = {
  911. /* Scenario 1
  912. *
  913. * +------------------------------------------------------------------+
  914. * | |
  915. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  916. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  917. * | |
  918. * | $==========|-----------$===========|-----------$===========| |
  919. * | ^ ^ |
  920. * | S C |
  921. * +------------------------------------------------------------------+
  922. *
  923. * S: Service, C: Client
  924. *
  925. * Service consensus valid_after time is set to 13:00 and client to 15:00,
  926. * both are after TP#1 thus have access to SRV#1. Service and client should
  927. * be using TP#1.
  928. */
  929. { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
  930. "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
  931. "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
  932. "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
  933. &current_srv, NULL, /* Service current and previous SRV */
  934. &current_srv, NULL, /* Client current and previous SRV */
  935. hs_get_time_period_num, /* Service time period function. */
  936. IN_NEW_TP, /* Is service in new TP? */
  937. IN_NEW_TP, /* Is client in new TP? */
  938. NEED_NEXT_DESC },
  939. /* Scenario 2
  940. *
  941. * +------------------------------------------------------------------+
  942. * | |
  943. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  944. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  945. * | |
  946. * | $==========|-----------$===========|-----------$===========| |
  947. * | ^ ^ |
  948. * | S C |
  949. * +------------------------------------------------------------------+
  950. *
  951. * S: Service, C: Client
  952. *
  953. * Service consensus valid_after time is set to 23:00 and client to 01:00,
  954. * which makes the client after the SRV#2 and the service just before. The
  955. * service should only be using TP#1. The client should be using TP#1.
  956. */
  957. { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
  958. "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
  959. "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
  960. "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
  961. &previous_srv, NULL, /* Service current and previous SRV */
  962. &current_srv, &previous_srv, /* Client current and previous SRV */
  963. hs_get_time_period_num, /* Service time period function. */
  964. IN_NEW_TP, /* Is service in new TP? */
  965. NOT_IN_NEW_TP, /* Is client in new TP? */
  966. NEED_NEXT_DESC },
  967. /* Scenario 3
  968. *
  969. * +------------------------------------------------------------------+
  970. * | |
  971. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  972. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  973. * | |
  974. * | $==========|-----------$===========|----------$===========| |
  975. * | ^ ^ |
  976. * | S C |
  977. * +------------------------------------------------------------------+
  978. *
  979. * S: Service, C: Client
  980. *
  981. * Service consensus valid_after time is set to 03:00 and client to 05:00,
  982. * which makes both after SRV#2. The service should be using TP#1 as its
  983. * current time period. The client should be using TP#1.
  984. */
  985. { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
  986. "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
  987. "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
  988. "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
  989. &current_srv, &previous_srv, /* Service current and previous SRV */
  990. &current_srv, &previous_srv, /* Client current and previous SRV */
  991. hs_get_time_period_num, /* Service time period function. */
  992. NOT_IN_NEW_TP, /* Is service in new TP? */
  993. NOT_IN_NEW_TP, /* Is client in new TP? */
  994. DONT_NEED_NEXT_DESC },
  995. /* Scenario 4
  996. *
  997. * +------------------------------------------------------------------+
  998. * | |
  999. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1000. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1001. * | |
  1002. * | $==========|-----------$===========|-----------$===========| |
  1003. * | ^ ^ |
  1004. * | S C |
  1005. * +------------------------------------------------------------------+
  1006. *
  1007. * S: Service, C: Client
  1008. *
  1009. * Service consensus valid_after time is set to 11:00 and client to 13:00,
  1010. * which makes the service before TP#2 and the client just after. The
  1011. * service should be using TP#1 as its current time period and TP#2 as the
  1012. * next. The client should be using TP#2 time period.
  1013. */
  1014. { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
  1015. "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
  1016. "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
  1017. "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
  1018. &current_srv, &previous_srv, /* Service current and previous SRV */
  1019. &current_srv, &previous_srv, /* Client current and previous SRV */
  1020. hs_get_next_time_period_num, /* Service time period function. */
  1021. NOT_IN_NEW_TP, /* Is service in new TP? */
  1022. IN_NEW_TP, /* Is client in new TP? */
  1023. NEED_NEXT_DESC },
  1024. /* Scenario 5
  1025. *
  1026. * +------------------------------------------------------------------+
  1027. * | |
  1028. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1029. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1030. * | |
  1031. * | $==========|-----------$===========|-----------$===========| |
  1032. * | ^ ^ |
  1033. * | C S |
  1034. * +------------------------------------------------------------------+
  1035. *
  1036. * S: Service, C: Client
  1037. *
  1038. * Service consensus valid_after time is set to 01:00 and client to 23:00,
  1039. * which makes the service after SRV#2 and the client just before. The
  1040. * service should be using TP#1 as its current time period and TP#2 as the
  1041. * next. The client should be using TP#1 time period.
  1042. */
  1043. { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
  1044. "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
  1045. "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
  1046. "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
  1047. &current_srv, &previous_srv, /* Service current and previous SRV */
  1048. &previous_srv, NULL, /* Client current and previous SRV */
  1049. hs_get_time_period_num, /* Service time period function. */
  1050. NOT_IN_NEW_TP, /* Is service in new TP? */
  1051. IN_NEW_TP, /* Is client in new TP? */
  1052. DONT_NEED_NEXT_DESC },
  1053. /* Scenario 6
  1054. *
  1055. * +------------------------------------------------------------------+
  1056. * | |
  1057. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1058. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1059. * | |
  1060. * | $==========|-----------$===========|-----------$===========| |
  1061. * | ^ ^ |
  1062. * | C S |
  1063. * +------------------------------------------------------------------+
  1064. *
  1065. * S: Service, C: Client
  1066. *
  1067. * Service consensus valid_after time is set to 13:00 and client to 11:00,
  1068. * which makes the service outside after TP#2 and the client just before.
  1069. * The service should be using TP#1 as its current time period and TP#2 as
  1070. * its next. The client should be using TP#1 time period.
  1071. */
  1072. { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
  1073. "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
  1074. "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
  1075. "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
  1076. &current_srv, &previous_srv, /* Service current and previous SRV */
  1077. &current_srv, &previous_srv, /* Client current and previous SRV */
  1078. get_previous_time_period, /* Service time period function. */
  1079. IN_NEW_TP, /* Is service in new TP? */
  1080. NOT_IN_NEW_TP, /* Is client in new TP? */
  1081. DONT_NEED_NEXT_DESC },
  1082. /* End marker. */
  1083. { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
  1084. };
  1085. /* Run a single reachability scenario. num_scenario is the corresponding
  1086. * scenario number from the documentation. It is used to log it in case of
  1087. * failure so we know which scenario fails. */
  1088. static int
  1089. run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
  1090. {
  1091. int ret = -1;
  1092. hs_service_t *service;
  1093. uint64_t service_tp, client_tp;
  1094. ed25519_public_key_t service_blinded_pk, client_blinded_pk;
  1095. setup_reachability_test();
  1096. tt_assert(cfg);
  1097. /* Set service consensus time. */
  1098. set_consensus_times(cfg->service_valid_after,
  1099. &mock_service_ns->valid_after);
  1100. set_consensus_times(cfg->service_valid_until,
  1101. &mock_service_ns->valid_until);
  1102. set_consensus_times(cfg->service_valid_until,
  1103. &mock_service_ns->fresh_until);
  1104. /* Set client consensus time. */
  1105. set_consensus_times(cfg->client_valid_after,
  1106. &mock_client_ns->valid_after);
  1107. set_consensus_times(cfg->client_valid_until,
  1108. &mock_client_ns->valid_until);
  1109. set_consensus_times(cfg->client_valid_until,
  1110. &mock_client_ns->fresh_until);
  1111. /* New time period checks for this scenario. */
  1112. tt_int_op(hs_time_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
  1113. cfg->service_in_new_tp);
  1114. tt_int_op(hs_time_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
  1115. cfg->client_in_new_tp);
  1116. /* Set the SRVs for this scenario. */
  1117. mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
  1118. mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
  1119. mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
  1120. mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
  1121. /* Initialize a service to get keys. */
  1122. service = helper_init_service(time(NULL));
  1123. /*
  1124. * === Client setup ===
  1125. */
  1126. MOCK(networkstatus_get_live_consensus,
  1127. mock_networkstatus_get_live_consensus_client);
  1128. MOCK(networkstatus_get_latest_consensus,
  1129. mock_networkstatus_get_latest_consensus_client);
  1130. /* Make networkstatus_is_live() happy. */
  1131. update_approx_time(mock_client_ns->valid_after);
  1132. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1133. helper_initialize_big_hash_ring(mock_client_ns);
  1134. /* Client ONLY use the current time period. This is the whole point of these
  1135. * reachability test that is to make sure the client can always reach the
  1136. * service using only its current time period. */
  1137. client_tp = hs_get_time_period_num(0);
  1138. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1139. client_tp, &client_blinded_pk);
  1140. hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
  1141. client_responsible_hsdirs);
  1142. /* Cleanup the nodelist so we can let the service computes its own set of
  1143. * node with its own hashring. */
  1144. cleanup_nodelist();
  1145. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  1146. UNMOCK(networkstatus_get_latest_consensus);
  1147. UNMOCK(networkstatus_get_live_consensus);
  1148. /*
  1149. * === Service setup ===
  1150. */
  1151. MOCK(networkstatus_get_live_consensus,
  1152. mock_networkstatus_get_live_consensus_service);
  1153. MOCK(networkstatus_get_latest_consensus,
  1154. mock_networkstatus_get_latest_consensus_service);
  1155. /* Make networkstatus_is_live() happy. */
  1156. update_approx_time(mock_service_ns->valid_after);
  1157. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1158. helper_initialize_big_hash_ring(mock_service_ns);
  1159. service_tp = cfg->service_time_period_fn(0);
  1160. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1161. service_tp, &service_blinded_pk);
  1162. /* A service builds two lists of responsible HSDir, for the current and the
  1163. * next descriptor. Depending on the scenario, the client timing indicate if
  1164. * it is fetching the current or the next descriptor so we use the
  1165. * "client_fetch_next_desc" to know which one the client is trying to get to
  1166. * confirm that the service computes the same hashring for the same blinded
  1167. * key and service time period function. */
  1168. hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
  1169. cfg->client_fetch_next_desc, 0,
  1170. service_responsible_hsdirs);
  1171. cleanup_nodelist();
  1172. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 6);
  1173. UNMOCK(networkstatus_get_latest_consensus);
  1174. UNMOCK(networkstatus_get_live_consensus);
  1175. /* Some testing of the values we just got from the client and service. */
  1176. tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
  1177. ED25519_PUBKEY_LEN);
  1178. tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
  1179. /* Everything went well. */
  1180. ret = 0;
  1181. done:
  1182. cleanup_reachability_test();
  1183. if (ret == -1) {
  1184. /* Do this so we can know which scenario failed. */
  1185. char msg[32];
  1186. tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
  1187. tt_fail_msg(msg);
  1188. }
  1189. return ret;
  1190. }
  1191. static void
  1192. test_reachability(void *arg)
  1193. {
  1194. (void) arg;
  1195. /* NOTE: An important axiom to understand here is that SRV#N must only be
  1196. * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
  1197. * together. The HSDir index computation is based on this axiom.*/
  1198. for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
  1199. int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
  1200. if (ret < 0) {
  1201. return;
  1202. }
  1203. }
  1204. }
  1205. struct testcase_t hs_common_tests[] = {
  1206. { "build_address", test_build_address, TT_FORK,
  1207. NULL, NULL },
  1208. { "validate_address", test_validate_address, TT_FORK,
  1209. NULL, NULL },
  1210. { "time_period", test_time_period, TT_FORK,
  1211. NULL, NULL },
  1212. { "start_time_of_next_time_period", test_start_time_of_next_time_period,
  1213. TT_FORK, NULL, NULL },
  1214. { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
  1215. NULL, NULL },
  1216. { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
  1217. NULL, NULL },
  1218. { "disaster_srv", test_disaster_srv, TT_FORK,
  1219. NULL, NULL },
  1220. { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
  1221. NULL, NULL },
  1222. { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
  1223. NULL, NULL },
  1224. { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
  1225. NULL, NULL },
  1226. { "reachability", test_reachability, TT_FORK,
  1227. NULL, NULL },
  1228. END_OF_TESTCASES
  1229. };