test_hs_common.c 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828
  1. /* Copyright (c) 2017, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file test_hs_common.c
  5. * \brief Test hidden service common functionalities.
  6. */
  7. #define HS_COMMON_PRIVATE
  8. #define HS_CLIENT_PRIVATE
  9. #define HS_SERVICE_PRIVATE
  10. #define NODELIST_PRIVATE
  11. #include "test.h"
  12. #include "test_helpers.h"
  13. #include "log_test_helpers.h"
  14. #include "hs_test_helpers.h"
  15. #include "connection_edge.h"
  16. #include "crypto_rand.h"
  17. #include "hs_common.h"
  18. #include "hs_client.h"
  19. #include "hs_service.h"
  20. #include "config.h"
  21. #include "networkstatus.h"
  22. #include "directory.h"
  23. #include "dirauth/dirvote.h"
  24. #include "nodelist.h"
  25. #include "routerlist.h"
  26. #include "statefile.h"
  27. #include "circuitlist.h"
  28. #include "dirauth/shared_random.h"
  29. #include "util.h"
  30. #include "voting_schedule.h"
  31. #include "networkstatus_st.h"
  32. #include "node_st.h"
  33. #include "routerstatus_st.h"
  34. /** Test the validation of HS v3 addresses */
  35. static void
  36. test_validate_address(void *arg)
  37. {
  38. int ret;
  39. (void) arg;
  40. /* Address too short and too long. */
  41. setup_full_capture_of_logs(LOG_WARN);
  42. ret = hs_address_is_valid("blah");
  43. tt_int_op(ret, OP_EQ, 0);
  44. expect_log_msg_containing("has an invalid length");
  45. teardown_capture_of_logs();
  46. setup_full_capture_of_logs(LOG_WARN);
  47. ret = hs_address_is_valid(
  48. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
  49. tt_int_op(ret, OP_EQ, 0);
  50. expect_log_msg_containing("has an invalid length");
  51. teardown_capture_of_logs();
  52. /* Invalid checksum (taken from prop224) */
  53. setup_full_capture_of_logs(LOG_WARN);
  54. ret = hs_address_is_valid(
  55. "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
  56. tt_int_op(ret, OP_EQ, 0);
  57. expect_log_msg_containing("invalid checksum");
  58. teardown_capture_of_logs();
  59. setup_full_capture_of_logs(LOG_WARN);
  60. ret = hs_address_is_valid(
  61. "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
  62. tt_int_op(ret, OP_EQ, 0);
  63. expect_log_msg_containing("invalid checksum");
  64. teardown_capture_of_logs();
  65. /* Non base32 decodable string. */
  66. setup_full_capture_of_logs(LOG_WARN);
  67. ret = hs_address_is_valid(
  68. "????????????????????????????????????????????????????????");
  69. tt_int_op(ret, OP_EQ, 0);
  70. expect_log_msg_containing("can't be decoded");
  71. teardown_capture_of_logs();
  72. /* Valid address. */
  73. ret = hs_address_is_valid(
  74. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
  75. tt_int_op(ret, OP_EQ, 1);
  76. done:
  77. ;
  78. }
  79. static int
  80. mock_write_str_to_file(const char *path, const char *str, int bin)
  81. {
  82. (void)bin;
  83. tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
  84. tt_str_op(str, OP_EQ,
  85. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
  86. done:
  87. return 0;
  88. }
  89. /** Test building HS v3 onion addresses. Uses test vectors from the
  90. * ./hs_build_address.py script. */
  91. static void
  92. test_build_address(void *arg)
  93. {
  94. int ret;
  95. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  96. ed25519_public_key_t pubkey;
  97. /* hex-encoded ed25519 pubkey used in hs_build_address.py */
  98. char pubkey_hex[] =
  99. "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
  100. hs_service_t *service = NULL;
  101. (void) arg;
  102. MOCK(write_str_to_file, mock_write_str_to_file);
  103. /* The following has been created with hs_build_address.py script that
  104. * follows proposal 224 specification to build an onion address. */
  105. static const char *test_addr =
  106. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
  107. /* Let's try to build the same onion address as the script */
  108. base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
  109. pubkey_hex, strlen(pubkey_hex));
  110. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  111. tt_str_op(test_addr, OP_EQ, onion_addr);
  112. /* Validate that address. */
  113. ret = hs_address_is_valid(onion_addr);
  114. tt_int_op(ret, OP_EQ, 1);
  115. service = tor_malloc_zero(sizeof(hs_service_t));
  116. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  117. tor_asprintf(&service->config.directory_path, "/double/five");
  118. ret = write_address_to_file(service, "squared");
  119. tt_int_op(ret, OP_EQ, 0);
  120. done:
  121. hs_service_free(service);
  122. }
  123. /** Test that our HS time period calculation functions work properly */
  124. static void
  125. test_time_period(void *arg)
  126. {
  127. (void) arg;
  128. uint64_t tn;
  129. int retval;
  130. time_t fake_time, correct_time, start_time;
  131. /* Let's do the example in prop224 section [TIME-PERIODS] */
  132. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  133. &fake_time);
  134. tt_int_op(retval, OP_EQ, 0);
  135. /* Check that the time period number is right */
  136. tn = hs_get_time_period_num(fake_time);
  137. tt_u64_op(tn, OP_EQ, 16903);
  138. /* Increase current time to 11:59:59 UTC and check that the time period
  139. number is still the same */
  140. fake_time += 3599;
  141. tn = hs_get_time_period_num(fake_time);
  142. tt_u64_op(tn, OP_EQ, 16903);
  143. { /* Check start time of next time period */
  144. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  145. &correct_time);
  146. tt_int_op(retval, OP_EQ, 0);
  147. start_time = hs_get_start_time_of_next_time_period(fake_time);
  148. tt_int_op(start_time, OP_EQ, correct_time);
  149. }
  150. /* Now take time to 12:00:00 UTC and check that the time period rotated */
  151. fake_time += 1;
  152. tn = hs_get_time_period_num(fake_time);
  153. tt_u64_op(tn, OP_EQ, 16904);
  154. /* Now also check our hs_get_next_time_period_num() function */
  155. tn = hs_get_next_time_period_num(fake_time);
  156. tt_u64_op(tn, OP_EQ, 16905);
  157. { /* Check start time of next time period again */
  158. retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
  159. &correct_time);
  160. tt_int_op(retval, OP_EQ, 0);
  161. start_time = hs_get_start_time_of_next_time_period(fake_time);
  162. tt_int_op(start_time, OP_EQ, correct_time);
  163. }
  164. /* Now do another sanity check: The time period number at the start of the
  165. * next time period, must be the same time period number as the one returned
  166. * from hs_get_next_time_period_num() */
  167. {
  168. time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
  169. tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
  170. hs_get_next_time_period_num(fake_time));
  171. }
  172. done:
  173. ;
  174. }
  175. /** Test that we can correctly find the start time of the next time period */
  176. static void
  177. test_start_time_of_next_time_period(void *arg)
  178. {
  179. (void) arg;
  180. int retval;
  181. time_t fake_time;
  182. char tbuf[ISO_TIME_LEN + 1];
  183. time_t next_tp_start_time;
  184. /* Do some basic tests */
  185. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  186. &fake_time);
  187. tt_int_op(retval, OP_EQ, 0);
  188. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  189. /* Compare it with the correct result */
  190. format_iso_time(tbuf, next_tp_start_time);
  191. tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
  192. /* Another test with an edge-case time (start of TP) */
  193. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  194. &fake_time);
  195. tt_int_op(retval, OP_EQ, 0);
  196. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  197. format_iso_time(tbuf, next_tp_start_time);
  198. tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
  199. {
  200. /* Now pretend we are on a testing network and alter the voting schedule to
  201. be every 10 seconds. This means that a time period has length 10*24
  202. seconds (4 minutes). It also means that we apply a rotational offset of
  203. 120 seconds to the time period, so that it starts at 00:02:00 instead of
  204. 00:00:00. */
  205. or_options_t *options = get_options_mutable();
  206. options->TestingTorNetwork = 1;
  207. options->V3AuthVotingInterval = 10;
  208. options->TestingV3AuthInitialVotingInterval = 10;
  209. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
  210. &fake_time);
  211. tt_int_op(retval, OP_EQ, 0);
  212. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  213. /* Compare it with the correct result */
  214. format_iso_time(tbuf, next_tp_start_time);
  215. tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
  216. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
  217. &fake_time);
  218. tt_int_op(retval, OP_EQ, 0);
  219. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  220. /* Compare it with the correct result */
  221. format_iso_time(tbuf, next_tp_start_time);
  222. tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
  223. }
  224. done:
  225. ;
  226. }
  227. /* Cleanup the global nodelist. It also frees the "md" in the node_t because
  228. * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
  229. static void
  230. cleanup_nodelist(void)
  231. {
  232. smartlist_t *nodelist = nodelist_get_list();
  233. SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
  234. tor_free(node->md);
  235. node->md = NULL;
  236. } SMARTLIST_FOREACH_END(node);
  237. nodelist_free_all();
  238. }
  239. static void
  240. helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
  241. int identity_idx,
  242. const char *nickname,
  243. int is_hsdir)
  244. {
  245. routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
  246. routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
  247. uint8_t identity[DIGEST_LEN];
  248. tor_addr_t ipv4_addr;
  249. memset(identity, identity_idx, sizeof(identity));
  250. memcpy(rs->identity_digest, identity, DIGEST_LEN);
  251. rs->is_hs_dir = is_hsdir;
  252. rs->pv.supports_v3_hsdir = 1;
  253. strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
  254. tor_addr_parse(&ipv4_addr, "1.2.3.4");
  255. ri->addr = tor_addr_to_ipv4h(&ipv4_addr);
  256. rs->addr = tor_addr_to_ipv4h(&ipv4_addr);
  257. ri->nickname = tor_strdup(nickname);
  258. ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
  259. memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
  260. ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
  261. /* Needed for the HSDir index computation. */
  262. memset(&ri->cache_info.signing_key_cert->signing_key,
  263. identity_idx, ED25519_PUBKEY_LEN);
  264. tt_assert(nodelist_set_routerinfo(ri, NULL));
  265. node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
  266. tt_assert(node);
  267. node->rs = rs;
  268. /* We need this to exist for node_has_preferred_descriptor() to return
  269. * true. */
  270. node->md = tor_malloc_zero(sizeof(microdesc_t));
  271. /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
  272. * the indexes which it doesn't have when it is called. */
  273. node_set_hsdir_index(node, ns);
  274. node->ri = NULL;
  275. smartlist_add(ns->routerstatus_list, rs);
  276. done:
  277. routerinfo_free(ri);
  278. }
  279. static networkstatus_t *mock_ns = NULL;
  280. static networkstatus_t *
  281. mock_networkstatus_get_latest_consensus(void)
  282. {
  283. time_t now = approx_time();
  284. /* If initialized, return it */
  285. if (mock_ns) {
  286. return mock_ns;
  287. }
  288. /* Initialize fake consensus */
  289. mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
  290. /* This consensus is live */
  291. mock_ns->valid_after = now-1;
  292. mock_ns->fresh_until = now+1;
  293. mock_ns->valid_until = now+2;
  294. /* Create routerstatus list */
  295. mock_ns->routerstatus_list = smartlist_new();
  296. mock_ns->type = NS_TYPE_CONSENSUS;
  297. return mock_ns;
  298. }
  299. static networkstatus_t *
  300. mock_networkstatus_get_live_consensus(time_t now)
  301. {
  302. (void) now;
  303. tt_assert(mock_ns);
  304. done:
  305. return mock_ns;
  306. }
  307. /** Test the responsible HSDirs calculation function */
  308. static void
  309. test_responsible_hsdirs(void *arg)
  310. {
  311. smartlist_t *responsible_dirs = smartlist_new();
  312. networkstatus_t *ns = NULL;
  313. (void) arg;
  314. hs_init();
  315. MOCK(networkstatus_get_latest_consensus,
  316. mock_networkstatus_get_latest_consensus);
  317. ns = networkstatus_get_latest_consensus();
  318. { /* First router: HSdir */
  319. helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
  320. }
  321. { /* Second HSDir */
  322. helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
  323. }
  324. { /* Third relay but not HSDir */
  325. helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
  326. }
  327. /* Use a fixed time period and pub key so we always take the same path */
  328. ed25519_public_key_t pubkey;
  329. uint64_t time_period_num = 17653; // 2 May, 2018, 14:00.
  330. memset(&pubkey, 42, sizeof(pubkey));
  331. hs_get_responsible_hsdirs(&pubkey, time_period_num,
  332. 0, 0, responsible_dirs);
  333. /* Make sure that we only found 2 responsible HSDirs.
  334. * The third relay was not an hsdir! */
  335. tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
  336. /** TODO: Build a bigger network and do more tests here */
  337. done:
  338. SMARTLIST_FOREACH(ns->routerstatus_list,
  339. routerstatus_t *, rs, routerstatus_free(rs));
  340. smartlist_free(responsible_dirs);
  341. smartlist_clear(ns->routerstatus_list);
  342. networkstatus_vote_free(mock_ns);
  343. cleanup_nodelist();
  344. }
  345. static void
  346. mock_directory_initiate_request(directory_request_t *req)
  347. {
  348. (void)req;
  349. return;
  350. }
  351. static int
  352. mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
  353. const ed25519_keypair_t *signing_kp,
  354. char **encoded_out)
  355. {
  356. (void)desc;
  357. (void)signing_kp;
  358. tor_asprintf(encoded_out, "lulu");
  359. return 0;
  360. }
  361. static or_state_t dummy_state;
  362. /* Mock function to get fake or state (used for rev counters) */
  363. static or_state_t *
  364. get_or_state_replacement(void)
  365. {
  366. return &dummy_state;
  367. }
  368. static int
  369. mock_router_have_minimum_dir_info(void)
  370. {
  371. return 1;
  372. }
  373. /** Test that we correctly detect when the HSDir hash ring changes so that we
  374. * reupload our descriptor. */
  375. static void
  376. test_desc_reupload_logic(void *arg)
  377. {
  378. networkstatus_t *ns = NULL;
  379. (void) arg;
  380. hs_init();
  381. MOCK(router_have_minimum_dir_info,
  382. mock_router_have_minimum_dir_info);
  383. MOCK(get_or_state,
  384. get_or_state_replacement);
  385. MOCK(networkstatus_get_latest_consensus,
  386. mock_networkstatus_get_latest_consensus);
  387. MOCK(directory_initiate_request,
  388. mock_directory_initiate_request);
  389. MOCK(hs_desc_encode_descriptor,
  390. mock_hs_desc_encode_descriptor);
  391. ns = networkstatus_get_latest_consensus();
  392. /** Test logic:
  393. * 1) Upload descriptor to HSDirs
  394. * CHECK that previous_hsdirs list was populated.
  395. * 2) Then call router_dir_info_changed() without an HSDir set change.
  396. * CHECK that no reuplod occurs.
  397. * 3) Now change the HSDir set, and call dir_info_changed() again.
  398. * CHECK that reupload occurs.
  399. * 4) Finally call service_desc_schedule_upload().
  400. * CHECK that previous_hsdirs list was cleared.
  401. **/
  402. /* Let's start by building our descriptor and service */
  403. hs_service_descriptor_t *desc = service_descriptor_new();
  404. hs_service_t *service = NULL;
  405. /* hex-encoded ed25519 pubkey used in hs_build_address.py */
  406. char pubkey_hex[] =
  407. "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
  408. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  409. ed25519_public_key_t pubkey;
  410. base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
  411. pubkey_hex, strlen(pubkey_hex));
  412. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  413. service = tor_malloc_zero(sizeof(hs_service_t));
  414. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  415. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  416. ed25519_public_key_generate(&service->keys.identity_pk,
  417. &service->keys.identity_sk);
  418. service->desc_current = desc;
  419. /* Also add service to service map */
  420. hs_service_ht *service_map = get_hs_service_map();
  421. tt_assert(service_map);
  422. tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
  423. register_service(service_map, service);
  424. tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
  425. /* Now let's create our hash ring: */
  426. {
  427. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  428. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  429. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  430. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  431. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  432. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  433. }
  434. /* Now let's upload our desc to all hsdirs */
  435. upload_descriptor_to_all(service, desc);
  436. /* Check that previous hsdirs were populated */
  437. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  438. /* Poison next upload time so that we can see if it was changed by
  439. * router_dir_info_changed(). No changes in hash ring so far, so the upload
  440. * time should stay as is. */
  441. desc->next_upload_time = 42;
  442. router_dir_info_changed();
  443. tt_int_op(desc->next_upload_time, OP_EQ, 42);
  444. /* Now change the HSDir hash ring by swapping nora for aaron.
  445. * Start by clearing the hash ring */
  446. {
  447. SMARTLIST_FOREACH(ns->routerstatus_list,
  448. routerstatus_t *, rs, routerstatus_free(rs));
  449. smartlist_clear(ns->routerstatus_list);
  450. cleanup_nodelist();
  451. routerlist_free_all();
  452. }
  453. { /* Now add back all the nodes */
  454. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  455. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  456. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  457. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  458. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  459. helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
  460. }
  461. /* Now call service_desc_hsdirs_changed() and see that it detected the hash
  462. ring change */
  463. time_t now = approx_time();
  464. tt_assert(now);
  465. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  466. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  467. /* Now order another upload and see that we keep having 6 prev hsdirs */
  468. upload_descriptor_to_all(service, desc);
  469. /* Check that previous hsdirs were populated */
  470. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  471. /* Now restore the HSDir hash ring to its original state by swapping back
  472. aaron for nora */
  473. /* First clear up the hash ring */
  474. {
  475. SMARTLIST_FOREACH(ns->routerstatus_list,
  476. routerstatus_t *, rs, routerstatus_free(rs));
  477. smartlist_clear(ns->routerstatus_list);
  478. cleanup_nodelist();
  479. routerlist_free_all();
  480. }
  481. { /* Now populate the hash ring again */
  482. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  483. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  484. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  485. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  486. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  487. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  488. }
  489. /* Check that our algorithm catches this change of hsdirs */
  490. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  491. /* Now pretend that the descriptor changed, and order a reupload to all
  492. HSDirs. Make sure that the set of previous HSDirs was cleared. */
  493. service_desc_schedule_upload(desc, now, 1);
  494. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
  495. /* Now reupload again: see that the prev hsdir set got populated again. */
  496. upload_descriptor_to_all(service, desc);
  497. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  498. done:
  499. SMARTLIST_FOREACH(ns->routerstatus_list,
  500. routerstatus_t *, rs, routerstatus_free(rs));
  501. smartlist_clear(ns->routerstatus_list);
  502. networkstatus_vote_free(ns);
  503. cleanup_nodelist();
  504. hs_free_all();
  505. }
  506. /** Test disaster SRV computation and caching */
  507. static void
  508. test_disaster_srv(void *arg)
  509. {
  510. uint8_t *cached_disaster_srv_one = NULL;
  511. uint8_t *cached_disaster_srv_two = NULL;
  512. uint8_t srv_one[DIGEST256_LEN] = {0};
  513. uint8_t srv_two[DIGEST256_LEN] = {0};
  514. uint8_t srv_three[DIGEST256_LEN] = {0};
  515. uint8_t srv_four[DIGEST256_LEN] = {0};
  516. uint8_t srv_five[DIGEST256_LEN] = {0};
  517. (void) arg;
  518. /* Get the cached SRVs: we gonna use them later for verification */
  519. cached_disaster_srv_one = get_first_cached_disaster_srv();
  520. cached_disaster_srv_two = get_second_cached_disaster_srv();
  521. /* Compute some srvs */
  522. get_disaster_srv(1, srv_one);
  523. get_disaster_srv(2, srv_two);
  524. /* Check that the cached ones where updated */
  525. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  526. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  527. /* Ask for an SRV that has already been computed */
  528. get_disaster_srv(2, srv_two);
  529. /* and check that the cache entries have not changed */
  530. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  531. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  532. /* Ask for a new SRV */
  533. get_disaster_srv(3, srv_three);
  534. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  535. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  536. /* Ask for another SRV: none of the original SRVs should now be cached */
  537. get_disaster_srv(4, srv_four);
  538. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  539. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  540. /* Ask for yet another SRV */
  541. get_disaster_srv(5, srv_five);
  542. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
  543. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  544. done:
  545. ;
  546. }
  547. /** Test our HS descriptor request tracker by making various requests and
  548. * checking whether they get tracked properly. */
  549. static void
  550. test_hid_serv_request_tracker(void *arg)
  551. {
  552. (void) arg;
  553. time_t retval;
  554. routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
  555. time_t now = approx_time();
  556. const char *req_key_str_first =
  557. "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  558. const char *req_key_str_second =
  559. "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  560. const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
  561. /*************************** basic test *******************************/
  562. /* Get request tracker and make sure it's empty */
  563. strmap_t *request_tracker = get_last_hid_serv_requests();
  564. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  565. /* Let's register a hid serv request */
  566. hsdir = tor_malloc_zero(sizeof(routerstatus_t));
  567. memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
  568. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  569. now, 1);
  570. tt_int_op(retval, OP_EQ, now);
  571. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  572. /* Let's lookup a non-existent hidserv request */
  573. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
  574. now+1, 0);
  575. tt_int_op(retval, OP_EQ, 0);
  576. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  577. /* Let's lookup a real hidserv request */
  578. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  579. now+2, 0);
  580. tt_int_op(retval, OP_EQ, now); /* we got it */
  581. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  582. /**********************************************************************/
  583. /* Let's add another request for the same HS but on a different HSDir. */
  584. hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
  585. memset(hsdir2->identity_digest, 2, DIGEST_LEN);
  586. retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
  587. now+3, 1);
  588. tt_int_op(retval, OP_EQ, now+3);
  589. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  590. /* Check that we can clean the first request based on time */
  591. hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
  592. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  593. /* Check that it doesn't exist anymore */
  594. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  595. now+2, 0);
  596. tt_int_op(retval, OP_EQ, 0);
  597. /* Now let's add a smaller req key str */
  598. hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
  599. memset(hsdir3->identity_digest, 3, DIGEST_LEN);
  600. retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
  601. now+4, 1);
  602. tt_int_op(retval, OP_EQ, now+4);
  603. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  604. /*************************** deleting entries **************************/
  605. /* Add another request with very short key */
  606. retval = hs_lookup_last_hid_serv_request(hsdir, "l", now, 1);
  607. tt_int_op(retval, OP_EQ, now);
  608. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  609. /* Try deleting entries with a dummy key. Check that our previous requests
  610. * are still there */
  611. tor_capture_bugs_(1);
  612. hs_purge_hid_serv_from_last_hid_serv_requests("a");
  613. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  614. tor_end_capture_bugs_();
  615. /* Try another dummy key. Check that requests are still there */
  616. {
  617. char dummy[2000];
  618. memset(dummy, 'Z', 2000);
  619. dummy[1999] = '\x00';
  620. hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
  621. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  622. }
  623. /* Another dummy key! */
  624. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
  625. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  626. /* Now actually delete a request! */
  627. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
  628. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  629. /* Purge it all! */
  630. hs_purge_last_hid_serv_requests();
  631. request_tracker = get_last_hid_serv_requests();
  632. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  633. done:
  634. tor_free(hsdir);
  635. tor_free(hsdir2);
  636. tor_free(hsdir3);
  637. }
  638. static void
  639. test_parse_extended_hostname(void *arg)
  640. {
  641. (void) arg;
  642. char address1[] = "fooaddress.onion";
  643. char address2[] = "aaaaaaaaaaaaaaaa.onion";
  644. char address3[] = "fooaddress.exit";
  645. char address4[] = "www.torproject.org";
  646. char address5[] = "foo.abcdefghijklmnop.onion";
  647. char address6[] = "foo.bar.abcdefghijklmnop.onion";
  648. char address7[] = ".abcdefghijklmnop.onion";
  649. char address8[] =
  650. "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
  651. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address1));
  652. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address2));
  653. tt_str_op(address2,OP_EQ, "aaaaaaaaaaaaaaaa");
  654. tt_assert(EXIT_HOSTNAME == parse_extended_hostname(address3));
  655. tt_assert(NORMAL_HOSTNAME == parse_extended_hostname(address4));
  656. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address5));
  657. tt_str_op(address5,OP_EQ, "abcdefghijklmnop");
  658. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address6));
  659. tt_str_op(address6,OP_EQ, "abcdefghijklmnop");
  660. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address7));
  661. tt_assert(ONION_V3_HOSTNAME == parse_extended_hostname(address8));
  662. tt_str_op(address8, OP_EQ,
  663. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
  664. done: ;
  665. }
  666. static void
  667. test_time_between_tp_and_srv(void *arg)
  668. {
  669. int ret;
  670. networkstatus_t ns;
  671. (void) arg;
  672. /* This function should be returning true where "^" are:
  673. *
  674. * +------------------------------------------------------------------+
  675. * | |
  676. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  677. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  678. * | |
  679. * | $==========|-----------$===========|-----------$===========| |
  680. * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
  681. * | |
  682. * +------------------------------------------------------------------+
  683. */
  684. ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
  685. tt_int_op(ret, OP_EQ, 0);
  686. ret = parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns.fresh_until);
  687. tt_int_op(ret, OP_EQ, 0);
  688. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  689. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  690. tt_int_op(ret, OP_EQ, 0);
  691. ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
  692. tt_int_op(ret, OP_EQ, 0);
  693. ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.fresh_until);
  694. tt_int_op(ret, OP_EQ, 0);
  695. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  696. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  697. tt_int_op(ret, OP_EQ, 0);
  698. ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
  699. tt_int_op(ret, OP_EQ, 0);
  700. ret = parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns.fresh_until);
  701. tt_int_op(ret, OP_EQ, 0);
  702. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  703. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  704. tt_int_op(ret, OP_EQ, 1);
  705. ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
  706. tt_int_op(ret, OP_EQ, 0);
  707. ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.fresh_until);
  708. tt_int_op(ret, OP_EQ, 0);
  709. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  710. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  711. tt_int_op(ret, OP_EQ, 1);
  712. ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.valid_after);
  713. tt_int_op(ret, OP_EQ, 0);
  714. ret = parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns.fresh_until);
  715. tt_int_op(ret, OP_EQ, 0);
  716. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  717. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  718. tt_int_op(ret, OP_EQ, 0);
  719. done:
  720. ;
  721. }
  722. /************ Reachability Test (it is huge) ****************/
  723. /* Simulate different consensus for client and service. Used by the
  724. * reachability test. The SRV and responsible HSDir list are used by all
  725. * reachability tests so make them common to simplify setup and teardown. */
  726. static networkstatus_t *mock_service_ns = NULL;
  727. static networkstatus_t *mock_client_ns = NULL;
  728. static sr_srv_t current_srv, previous_srv;
  729. static smartlist_t *service_responsible_hsdirs = NULL;
  730. static smartlist_t *client_responsible_hsdirs = NULL;
  731. static networkstatus_t *
  732. mock_networkstatus_get_live_consensus_service(time_t now)
  733. {
  734. (void) now;
  735. if (mock_service_ns) {
  736. return mock_service_ns;
  737. }
  738. mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
  739. mock_service_ns->routerstatus_list = smartlist_new();
  740. mock_service_ns->type = NS_TYPE_CONSENSUS;
  741. return mock_service_ns;
  742. }
  743. static networkstatus_t *
  744. mock_networkstatus_get_latest_consensus_service(void)
  745. {
  746. return mock_networkstatus_get_live_consensus_service(0);
  747. }
  748. static networkstatus_t *
  749. mock_networkstatus_get_live_consensus_client(time_t now)
  750. {
  751. (void) now;
  752. if (mock_client_ns) {
  753. return mock_client_ns;
  754. }
  755. mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
  756. mock_client_ns->routerstatus_list = smartlist_new();
  757. mock_client_ns->type = NS_TYPE_CONSENSUS;
  758. return mock_client_ns;
  759. }
  760. static networkstatus_t *
  761. mock_networkstatus_get_latest_consensus_client(void)
  762. {
  763. return mock_networkstatus_get_live_consensus_client(0);
  764. }
  765. /* Mock function because we are not trying to test the close circuit that does
  766. * an awful lot of checks on the circuit object. */
  767. static void
  768. mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
  769. const char *file)
  770. {
  771. (void) circ;
  772. (void) reason;
  773. (void) line;
  774. (void) file;
  775. return;
  776. }
  777. /* Initialize a big HSDir V3 hash ring. */
  778. static void
  779. helper_initialize_big_hash_ring(networkstatus_t *ns)
  780. {
  781. int ret;
  782. /* Generate 250 hsdirs! :) */
  783. for (int counter = 1 ; counter < 251 ; counter++) {
  784. /* Let's generate random nickname for each hsdir... */
  785. char nickname_binary[8];
  786. char nickname_str[13] = {0};
  787. crypto_rand(nickname_binary, sizeof(nickname_binary));
  788. ret = base64_encode(nickname_str, sizeof(nickname_str),
  789. nickname_binary, sizeof(nickname_binary), 0);
  790. tt_int_op(ret, OP_EQ, 12);
  791. helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
  792. }
  793. /* Make sure we have 200 hsdirs in our list */
  794. tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
  795. done:
  796. ;
  797. }
  798. /** Initialize service and publish its descriptor as needed. Return the newly
  799. * allocated service object to the caller. */
  800. static hs_service_t *
  801. helper_init_service(time_t now)
  802. {
  803. int retval;
  804. hs_service_t *service = hs_service_new(get_options());
  805. tt_assert(service);
  806. service->config.version = HS_VERSION_THREE;
  807. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  808. ed25519_public_key_generate(&service->keys.identity_pk,
  809. &service->keys.identity_sk);
  810. /* Register service to global map. */
  811. retval = register_service(get_hs_service_map(), service);
  812. tt_int_op(retval, OP_EQ, 0);
  813. /* Initialize service descriptor */
  814. build_all_descriptors(now);
  815. tt_assert(service->desc_current);
  816. tt_assert(service->desc_next);
  817. done:
  818. return service;
  819. }
  820. /* Helper function to set the RFC 1123 time string into t. */
  821. static void
  822. set_consensus_times(const char *timestr, time_t *t)
  823. {
  824. tt_assert(timestr);
  825. tt_assert(t);
  826. int ret = parse_rfc1123_time(timestr, t);
  827. tt_int_op(ret, OP_EQ, 0);
  828. done:
  829. return;
  830. }
  831. /* Helper function to cleanup the mock consensus (client and service) */
  832. static void
  833. cleanup_mock_ns(void)
  834. {
  835. if (mock_service_ns) {
  836. SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
  837. routerstatus_t *, rs, routerstatus_free(rs));
  838. smartlist_clear(mock_service_ns->routerstatus_list);
  839. mock_service_ns->sr_info.current_srv = NULL;
  840. mock_service_ns->sr_info.previous_srv = NULL;
  841. networkstatus_vote_free(mock_service_ns);
  842. mock_service_ns = NULL;
  843. }
  844. if (mock_client_ns) {
  845. SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
  846. routerstatus_t *, rs, routerstatus_free(rs));
  847. smartlist_clear(mock_client_ns->routerstatus_list);
  848. mock_client_ns->sr_info.current_srv = NULL;
  849. mock_client_ns->sr_info.previous_srv = NULL;
  850. networkstatus_vote_free(mock_client_ns);
  851. mock_client_ns = NULL;
  852. }
  853. }
  854. /* Helper function to setup a reachability test. Once called, the
  855. * cleanup_reachability_test MUST be called at the end. */
  856. static void
  857. setup_reachability_test(void)
  858. {
  859. MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
  860. MOCK(get_or_state, get_or_state_replacement);
  861. hs_init();
  862. /* Baseline to start with. */
  863. memset(&current_srv, 0, sizeof(current_srv));
  864. memset(&previous_srv, 1, sizeof(previous_srv));
  865. /* Initialize the consensuses. */
  866. mock_networkstatus_get_latest_consensus_service();
  867. mock_networkstatus_get_latest_consensus_client();
  868. service_responsible_hsdirs = smartlist_new();
  869. client_responsible_hsdirs = smartlist_new();
  870. }
  871. /* Helper function to cleanup a reachability test initial setup. */
  872. static void
  873. cleanup_reachability_test(void)
  874. {
  875. smartlist_free(service_responsible_hsdirs);
  876. service_responsible_hsdirs = NULL;
  877. smartlist_free(client_responsible_hsdirs);
  878. client_responsible_hsdirs = NULL;
  879. hs_free_all();
  880. cleanup_mock_ns();
  881. UNMOCK(get_or_state);
  882. UNMOCK(circuit_mark_for_close_);
  883. }
  884. /* A reachability test always check if the resulting service and client
  885. * responsible HSDir for the given parameters are equal.
  886. *
  887. * Return true iff the same exact nodes are in both list. */
  888. static int
  889. are_responsible_hsdirs_equal(void)
  890. {
  891. int count = 0;
  892. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  893. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
  894. SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
  895. const routerstatus_t *, c_rs) {
  896. SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
  897. const routerstatus_t *, s_rs) {
  898. if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
  899. DIGEST_LEN)) {
  900. count++;
  901. break;
  902. }
  903. } SMARTLIST_FOREACH_END(s_rs);
  904. } SMARTLIST_FOREACH_END(c_rs);
  905. done:
  906. return (count == 6);
  907. }
  908. /* Tor doesn't use such a function to get the previous HSDir, it is only used
  909. * in node_set_hsdir_index(). We need it here so we can test the reachability
  910. * scenario 6 that requires the previous time period to compute the list of
  911. * responsible HSDir because of the client state timing. */
  912. static uint64_t
  913. get_previous_time_period(time_t now)
  914. {
  915. return hs_get_time_period_num(now) - 1;
  916. }
  917. /* Configuration of a reachability test scenario. */
  918. typedef struct reachability_cfg_t {
  919. /* Consensus timings to be set. They have to be compliant with
  920. * RFC 1123 time format. */
  921. const char *service_valid_after;
  922. const char *service_valid_until;
  923. const char *client_valid_after;
  924. const char *client_valid_until;
  925. /* SRVs that the service and client should use. */
  926. sr_srv_t *service_current_srv;
  927. sr_srv_t *service_previous_srv;
  928. sr_srv_t *client_current_srv;
  929. sr_srv_t *client_previous_srv;
  930. /* A time period function for the service to use for this scenario. For a
  931. * successful reachability test, the client always use the current time
  932. * period thus why no client function. */
  933. uint64_t (*service_time_period_fn)(time_t);
  934. /* Is the client and service expected to be in a new time period. After
  935. * setting the consensus time, the reachability test checks
  936. * hs_in_period_between_tp_and_srv() and test the returned value against
  937. * this. */
  938. unsigned int service_in_new_tp;
  939. unsigned int client_in_new_tp;
  940. /* Some scenario requires a hint that the client, because of its consensus
  941. * time, will request the "next" service descriptor so this indicates if it
  942. * is the case or not. */
  943. unsigned int client_fetch_next_desc;
  944. } reachability_cfg_t;
  945. /* Some defines to help with semantic while reading a configuration below. */
  946. #define NOT_IN_NEW_TP 0
  947. #define IN_NEW_TP 1
  948. #define DONT_NEED_NEXT_DESC 0
  949. #define NEED_NEXT_DESC 1
  950. static reachability_cfg_t reachability_scenarios[] = {
  951. /* Scenario 1
  952. *
  953. * +------------------------------------------------------------------+
  954. * | |
  955. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  956. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  957. * | |
  958. * | $==========|-----------$===========|-----------$===========| |
  959. * | ^ ^ |
  960. * | S C |
  961. * +------------------------------------------------------------------+
  962. *
  963. * S: Service, C: Client
  964. *
  965. * Service consensus valid_after time is set to 13:00 and client to 15:00,
  966. * both are after TP#1 thus have access to SRV#1. Service and client should
  967. * be using TP#1.
  968. */
  969. { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
  970. "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
  971. "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
  972. "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
  973. &current_srv, NULL, /* Service current and previous SRV */
  974. &current_srv, NULL, /* Client current and previous SRV */
  975. hs_get_time_period_num, /* Service time period function. */
  976. IN_NEW_TP, /* Is service in new TP? */
  977. IN_NEW_TP, /* Is client in new TP? */
  978. NEED_NEXT_DESC },
  979. /* Scenario 2
  980. *
  981. * +------------------------------------------------------------------+
  982. * | |
  983. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  984. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  985. * | |
  986. * | $==========|-----------$===========|-----------$===========| |
  987. * | ^ ^ |
  988. * | S C |
  989. * +------------------------------------------------------------------+
  990. *
  991. * S: Service, C: Client
  992. *
  993. * Service consensus valid_after time is set to 23:00 and client to 01:00,
  994. * which makes the client after the SRV#2 and the service just before. The
  995. * service should only be using TP#1. The client should be using TP#1.
  996. */
  997. { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
  998. "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
  999. "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
  1000. "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
  1001. &previous_srv, NULL, /* Service current and previous SRV */
  1002. &current_srv, &previous_srv, /* Client current and previous SRV */
  1003. hs_get_time_period_num, /* Service time period function. */
  1004. IN_NEW_TP, /* Is service in new TP? */
  1005. NOT_IN_NEW_TP, /* Is client in new TP? */
  1006. NEED_NEXT_DESC },
  1007. /* Scenario 3
  1008. *
  1009. * +------------------------------------------------------------------+
  1010. * | |
  1011. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1012. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1013. * | |
  1014. * | $==========|-----------$===========|----------$===========| |
  1015. * | ^ ^ |
  1016. * | S C |
  1017. * +------------------------------------------------------------------+
  1018. *
  1019. * S: Service, C: Client
  1020. *
  1021. * Service consensus valid_after time is set to 03:00 and client to 05:00,
  1022. * which makes both after SRV#2. The service should be using TP#1 as its
  1023. * current time period. The client should be using TP#1.
  1024. */
  1025. { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
  1026. "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
  1027. "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
  1028. "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
  1029. &current_srv, &previous_srv, /* Service current and previous SRV */
  1030. &current_srv, &previous_srv, /* Client current and previous SRV */
  1031. hs_get_time_period_num, /* Service time period function. */
  1032. NOT_IN_NEW_TP, /* Is service in new TP? */
  1033. NOT_IN_NEW_TP, /* Is client in new TP? */
  1034. DONT_NEED_NEXT_DESC },
  1035. /* Scenario 4
  1036. *
  1037. * +------------------------------------------------------------------+
  1038. * | |
  1039. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1040. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1041. * | |
  1042. * | $==========|-----------$===========|-----------$===========| |
  1043. * | ^ ^ |
  1044. * | S C |
  1045. * +------------------------------------------------------------------+
  1046. *
  1047. * S: Service, C: Client
  1048. *
  1049. * Service consensus valid_after time is set to 11:00 and client to 13:00,
  1050. * which makes the service before TP#2 and the client just after. The
  1051. * service should be using TP#1 as its current time period and TP#2 as the
  1052. * next. The client should be using TP#2 time period.
  1053. */
  1054. { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
  1055. "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
  1056. "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
  1057. "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
  1058. &current_srv, &previous_srv, /* Service current and previous SRV */
  1059. &current_srv, &previous_srv, /* Client current and previous SRV */
  1060. hs_get_next_time_period_num, /* Service time period function. */
  1061. NOT_IN_NEW_TP, /* Is service in new TP? */
  1062. IN_NEW_TP, /* Is client in new TP? */
  1063. NEED_NEXT_DESC },
  1064. /* Scenario 5
  1065. *
  1066. * +------------------------------------------------------------------+
  1067. * | |
  1068. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1069. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1070. * | |
  1071. * | $==========|-----------$===========|-----------$===========| |
  1072. * | ^ ^ |
  1073. * | C S |
  1074. * +------------------------------------------------------------------+
  1075. *
  1076. * S: Service, C: Client
  1077. *
  1078. * Service consensus valid_after time is set to 01:00 and client to 23:00,
  1079. * which makes the service after SRV#2 and the client just before. The
  1080. * service should be using TP#1 as its current time period and TP#2 as the
  1081. * next. The client should be using TP#1 time period.
  1082. */
  1083. { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
  1084. "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
  1085. "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
  1086. "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
  1087. &current_srv, &previous_srv, /* Service current and previous SRV */
  1088. &previous_srv, NULL, /* Client current and previous SRV */
  1089. hs_get_time_period_num, /* Service time period function. */
  1090. NOT_IN_NEW_TP, /* Is service in new TP? */
  1091. IN_NEW_TP, /* Is client in new TP? */
  1092. DONT_NEED_NEXT_DESC },
  1093. /* Scenario 6
  1094. *
  1095. * +------------------------------------------------------------------+
  1096. * | |
  1097. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1098. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1099. * | |
  1100. * | $==========|-----------$===========|-----------$===========| |
  1101. * | ^ ^ |
  1102. * | C S |
  1103. * +------------------------------------------------------------------+
  1104. *
  1105. * S: Service, C: Client
  1106. *
  1107. * Service consensus valid_after time is set to 13:00 and client to 11:00,
  1108. * which makes the service outside after TP#2 and the client just before.
  1109. * The service should be using TP#1 as its current time period and TP#2 as
  1110. * its next. The client should be using TP#1 time period.
  1111. */
  1112. { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
  1113. "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
  1114. "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
  1115. "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
  1116. &current_srv, &previous_srv, /* Service current and previous SRV */
  1117. &current_srv, &previous_srv, /* Client current and previous SRV */
  1118. get_previous_time_period, /* Service time period function. */
  1119. IN_NEW_TP, /* Is service in new TP? */
  1120. NOT_IN_NEW_TP, /* Is client in new TP? */
  1121. DONT_NEED_NEXT_DESC },
  1122. /* End marker. */
  1123. { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
  1124. };
  1125. /* Run a single reachability scenario. num_scenario is the corresponding
  1126. * scenario number from the documentation. It is used to log it in case of
  1127. * failure so we know which scenario fails. */
  1128. static int
  1129. run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
  1130. {
  1131. int ret = -1;
  1132. hs_service_t *service;
  1133. uint64_t service_tp, client_tp;
  1134. ed25519_public_key_t service_blinded_pk, client_blinded_pk;
  1135. setup_reachability_test();
  1136. tt_assert(cfg);
  1137. /* Set service consensus time. */
  1138. set_consensus_times(cfg->service_valid_after,
  1139. &mock_service_ns->valid_after);
  1140. set_consensus_times(cfg->service_valid_until,
  1141. &mock_service_ns->valid_until);
  1142. set_consensus_times(cfg->service_valid_until,
  1143. &mock_service_ns->fresh_until);
  1144. voting_schedule_recalculate_timing(get_options(),
  1145. mock_service_ns->valid_after);
  1146. /* Set client consensus time. */
  1147. set_consensus_times(cfg->client_valid_after,
  1148. &mock_client_ns->valid_after);
  1149. set_consensus_times(cfg->client_valid_until,
  1150. &mock_client_ns->valid_until);
  1151. set_consensus_times(cfg->client_valid_until,
  1152. &mock_client_ns->fresh_until);
  1153. voting_schedule_recalculate_timing(get_options(),
  1154. mock_client_ns->valid_after);
  1155. /* New time period checks for this scenario. */
  1156. tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
  1157. cfg->service_in_new_tp);
  1158. tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
  1159. cfg->client_in_new_tp);
  1160. /* Set the SRVs for this scenario. */
  1161. mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
  1162. mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
  1163. mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
  1164. mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
  1165. /* Initialize a service to get keys. */
  1166. service = helper_init_service(time(NULL));
  1167. /*
  1168. * === Client setup ===
  1169. */
  1170. MOCK(networkstatus_get_live_consensus,
  1171. mock_networkstatus_get_live_consensus_client);
  1172. MOCK(networkstatus_get_latest_consensus,
  1173. mock_networkstatus_get_latest_consensus_client);
  1174. /* Make networkstatus_is_live() happy. */
  1175. update_approx_time(mock_client_ns->valid_after);
  1176. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1177. helper_initialize_big_hash_ring(mock_client_ns);
  1178. /* Client ONLY use the current time period. This is the whole point of these
  1179. * reachability test that is to make sure the client can always reach the
  1180. * service using only its current time period. */
  1181. client_tp = hs_get_time_period_num(0);
  1182. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1183. client_tp, &client_blinded_pk);
  1184. hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
  1185. client_responsible_hsdirs);
  1186. /* Cleanup the nodelist so we can let the service computes its own set of
  1187. * node with its own hashring. */
  1188. cleanup_nodelist();
  1189. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  1190. UNMOCK(networkstatus_get_latest_consensus);
  1191. UNMOCK(networkstatus_get_live_consensus);
  1192. /*
  1193. * === Service setup ===
  1194. */
  1195. MOCK(networkstatus_get_live_consensus,
  1196. mock_networkstatus_get_live_consensus_service);
  1197. MOCK(networkstatus_get_latest_consensus,
  1198. mock_networkstatus_get_latest_consensus_service);
  1199. /* Make networkstatus_is_live() happy. */
  1200. update_approx_time(mock_service_ns->valid_after);
  1201. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1202. helper_initialize_big_hash_ring(mock_service_ns);
  1203. service_tp = cfg->service_time_period_fn(0);
  1204. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1205. service_tp, &service_blinded_pk);
  1206. /* A service builds two lists of responsible HSDir, for the current and the
  1207. * next descriptor. Depending on the scenario, the client timing indicate if
  1208. * it is fetching the current or the next descriptor so we use the
  1209. * "client_fetch_next_desc" to know which one the client is trying to get to
  1210. * confirm that the service computes the same hashring for the same blinded
  1211. * key and service time period function. */
  1212. hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
  1213. cfg->client_fetch_next_desc, 0,
  1214. service_responsible_hsdirs);
  1215. cleanup_nodelist();
  1216. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
  1217. UNMOCK(networkstatus_get_latest_consensus);
  1218. UNMOCK(networkstatus_get_live_consensus);
  1219. /* Some testing of the values we just got from the client and service. */
  1220. tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
  1221. ED25519_PUBKEY_LEN);
  1222. tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
  1223. /* Everything went well. */
  1224. ret = 0;
  1225. done:
  1226. cleanup_reachability_test();
  1227. if (ret == -1) {
  1228. /* Do this so we can know which scenario failed. */
  1229. char msg[32];
  1230. tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
  1231. tt_fail_msg(msg);
  1232. }
  1233. return ret;
  1234. }
  1235. static void
  1236. test_reachability(void *arg)
  1237. {
  1238. (void) arg;
  1239. /* NOTE: An important axiom to understand here is that SRV#N must only be
  1240. * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
  1241. * together. The HSDir index computation is based on this axiom.*/
  1242. for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
  1243. int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
  1244. if (ret < 0) {
  1245. return;
  1246. }
  1247. }
  1248. }
  1249. /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
  1250. * its identity digest in <b>hsdir_digest_out</b>. */
  1251. static void
  1252. helper_client_pick_hsdir(const ed25519_public_key_t *onion_identity_pk,
  1253. char *hsdir_digest_out)
  1254. {
  1255. tt_assert(onion_identity_pk);
  1256. routerstatus_t *client_hsdir = pick_hsdir_v3(onion_identity_pk);
  1257. tt_assert(client_hsdir);
  1258. digest_to_base64(hsdir_digest_out, client_hsdir->identity_digest);
  1259. done:
  1260. ;
  1261. }
  1262. static void
  1263. test_hs_indexes(void *arg)
  1264. {
  1265. int ret;
  1266. uint64_t period_num = 42;
  1267. ed25519_public_key_t pubkey;
  1268. (void) arg;
  1269. /* Build the hs_index */
  1270. {
  1271. uint8_t hs_index[DIGEST256_LEN];
  1272. const char *b32_test_vector =
  1273. "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
  1274. char test_vector[DIGEST256_LEN];
  1275. ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
  1276. strlen(b32_test_vector));
  1277. tt_int_op(ret, OP_EQ, sizeof(test_vector));
  1278. /* Our test vector uses a public key set to 32 bytes of \x42. */
  1279. memset(&pubkey, '\x42', sizeof(pubkey));
  1280. hs_build_hs_index(1, &pubkey, period_num, hs_index);
  1281. tt_mem_op(hs_index, OP_EQ, test_vector, sizeof(hs_index));
  1282. }
  1283. /* Build the hsdir_index */
  1284. {
  1285. uint8_t srv[DIGEST256_LEN];
  1286. uint8_t hsdir_index[DIGEST256_LEN];
  1287. const char *b32_test_vector =
  1288. "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
  1289. char test_vector[DIGEST256_LEN];
  1290. ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
  1291. strlen(b32_test_vector));
  1292. tt_int_op(ret, OP_EQ, sizeof(test_vector));
  1293. /* Our test vector uses a public key set to 32 bytes of \x42. */
  1294. memset(&pubkey, '\x42', sizeof(pubkey));
  1295. memset(srv, '\x43', sizeof(srv));
  1296. hs_build_hsdir_index(&pubkey, srv, period_num, hsdir_index);
  1297. tt_mem_op(hsdir_index, OP_EQ, test_vector, sizeof(hsdir_index));
  1298. }
  1299. done:
  1300. ;
  1301. }
  1302. #define EARLY_IN_SRV_TO_TP 0
  1303. #define LATE_IN_SRV_TO_TP 1
  1304. #define EARLY_IN_TP_TO_SRV 2
  1305. #define LATE_IN_TP_TO_SRV 3
  1306. /** Set the consensus and system time based on <b>position</b>. See the
  1307. * following diagram for details:
  1308. *
  1309. * +------------------------------------------------------------------+
  1310. * | |
  1311. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1312. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1313. * | |
  1314. * | $==========|-----------$===========|----------$===========| |
  1315. * | |
  1316. * | |
  1317. * +------------------------------------------------------------------+
  1318. */
  1319. static time_t
  1320. helper_set_consensus_and_system_time(networkstatus_t *ns, int position)
  1321. {
  1322. time_t real_time = 0;
  1323. /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
  1324. * valid_after is what matters here, the rest is just to specify the voting
  1325. * period correctly. */
  1326. if (position == LATE_IN_SRV_TO_TP) {
  1327. parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns->valid_after);
  1328. parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns->fresh_until);
  1329. parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->valid_until);
  1330. } else if (position == EARLY_IN_TP_TO_SRV) {
  1331. parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns->valid_after);
  1332. parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->fresh_until);
  1333. parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns->valid_until);
  1334. } else if (position == LATE_IN_TP_TO_SRV) {
  1335. parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns->valid_after);
  1336. parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns->fresh_until);
  1337. parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->valid_until);
  1338. } else if (position == EARLY_IN_SRV_TO_TP) {
  1339. parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns->valid_after);
  1340. parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->fresh_until);
  1341. parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns->valid_until);
  1342. } else {
  1343. tt_assert(0);
  1344. }
  1345. voting_schedule_recalculate_timing(get_options(), ns->valid_after);
  1346. /* Set system time: pretend to be just 2 minutes before consensus expiry */
  1347. real_time = ns->valid_until - 120;
  1348. update_approx_time(real_time);
  1349. done:
  1350. return real_time;
  1351. }
  1352. /** Helper function that carries out the actual test for
  1353. * test_client_service_sync() */
  1354. static void
  1355. helper_test_hsdir_sync(networkstatus_t *ns,
  1356. int service_position, int client_position,
  1357. int client_fetches_next_desc)
  1358. {
  1359. hs_service_descriptor_t *desc;
  1360. int retval;
  1361. /** Test logic:
  1362. * 1) Initialize service time: consensus and system time.
  1363. * 1.1) Initialize service hash ring
  1364. * 2) Initialize service and publish descriptors.
  1365. * 3) Initialize client time: consensus and system time.
  1366. * 3.1) Initialize client hash ring
  1367. * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
  1368. * the client was also picked by service.
  1369. */
  1370. /* 1) Initialize service time: consensus and real time */
  1371. time_t now = helper_set_consensus_and_system_time(ns, service_position);
  1372. helper_initialize_big_hash_ring(ns);
  1373. /* 2) Initialize service */
  1374. hs_service_t *service = helper_init_service(now);
  1375. desc = client_fetches_next_desc ? service->desc_next : service->desc_current;
  1376. /* Now let's upload our desc to all hsdirs */
  1377. upload_descriptor_to_all(service, desc);
  1378. /* Cleanup right now so we don't memleak on error. */
  1379. cleanup_nodelist();
  1380. /* Check that previous hsdirs were populated */
  1381. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 8);
  1382. /* 3) Initialize client time */
  1383. helper_set_consensus_and_system_time(ns, client_position);
  1384. cleanup_nodelist();
  1385. SMARTLIST_FOREACH(ns->routerstatus_list,
  1386. routerstatus_t *, rs, routerstatus_free(rs));
  1387. smartlist_clear(ns->routerstatus_list);
  1388. helper_initialize_big_hash_ring(ns);
  1389. /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
  1390. service. */
  1391. for (int y = 0 ; y < 6 ; y++) {
  1392. char client_hsdir_b64_digest[BASE64_DIGEST_LEN+1] = {0};
  1393. helper_client_pick_hsdir(&service->keys.identity_pk,
  1394. client_hsdir_b64_digest);
  1395. /* CHECK: Go through the hsdirs chosen by the service and make sure that it
  1396. * contains the one picked by the client! */
  1397. retval = smartlist_contains_string(desc->previous_hsdirs,
  1398. client_hsdir_b64_digest);
  1399. tt_int_op(retval, OP_EQ, 1);
  1400. }
  1401. /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
  1402. * exhausted all of them: */
  1403. tt_assert(!pick_hsdir_v3(&service->keys.identity_pk));
  1404. done:
  1405. /* At the end: free all services and initialize the subsystem again, we will
  1406. * need it for next scenario. */
  1407. cleanup_nodelist();
  1408. hs_service_free_all();
  1409. hs_service_init();
  1410. SMARTLIST_FOREACH(ns->routerstatus_list,
  1411. routerstatus_t *, rs, routerstatus_free(rs));
  1412. smartlist_clear(ns->routerstatus_list);
  1413. }
  1414. /** This test ensures that client and service will pick the same HSDirs, under
  1415. * various timing scenarios:
  1416. * a) Scenario where both client and service are in the time segment between
  1417. * SRV#N and TP#N:
  1418. * b) Scenario where both client and service are in the time segment between
  1419. * TP#N and SRV#N+1.
  1420. * c) Scenario where service is between SRV#N and TP#N, but client is between
  1421. * TP#N and SRV#N+1.
  1422. * d) Scenario where service is between TP#N and SRV#N+1, but client is
  1423. * between SRV#N and TP#N.
  1424. *
  1425. * This test is important because it tests that upload_descriptor_to_all() is
  1426. * in synch with pick_hsdir_v3(). That's not the case for the
  1427. * test_reachability() test which only compares the responsible hsdir sets.
  1428. */
  1429. static void
  1430. test_client_service_hsdir_set_sync(void *arg)
  1431. {
  1432. networkstatus_t *ns = NULL;
  1433. (void) arg;
  1434. MOCK(networkstatus_get_latest_consensus,
  1435. mock_networkstatus_get_latest_consensus);
  1436. MOCK(networkstatus_get_live_consensus,
  1437. mock_networkstatus_get_live_consensus);
  1438. MOCK(get_or_state,
  1439. get_or_state_replacement);
  1440. MOCK(hs_desc_encode_descriptor,
  1441. mock_hs_desc_encode_descriptor);
  1442. MOCK(directory_initiate_request,
  1443. mock_directory_initiate_request);
  1444. hs_init();
  1445. /* Initialize a big hash ring: we want it to be big so that client and
  1446. * service cannot accidentally select the same HSDirs */
  1447. ns = networkstatus_get_latest_consensus();
  1448. tt_assert(ns);
  1449. /** Now test the various synch scenarios. See the helper function for more
  1450. details: */
  1451. /* a) Scenario where both client and service are in the time segment between
  1452. * SRV#N and TP#N. At this time the client fetches the first HS desc:
  1453. *
  1454. * +------------------------------------------------------------------+
  1455. * | |
  1456. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1457. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1458. * | |
  1459. * | $==========|-----------$===========|----------$===========| |
  1460. * | ^ ^ |
  1461. * | S C |
  1462. * +------------------------------------------------------------------+
  1463. */
  1464. helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, LATE_IN_SRV_TO_TP, 0);
  1465. /* b) Scenario where both client and service are in the time segment between
  1466. * TP#N and SRV#N+1. At this time the client fetches the second HS
  1467. * desc:
  1468. *
  1469. * +------------------------------------------------------------------+
  1470. * | |
  1471. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1472. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1473. * | |
  1474. * | $==========|-----------$===========|-----------$===========| |
  1475. * | ^ ^ |
  1476. * | S C |
  1477. * +------------------------------------------------------------------+
  1478. */
  1479. helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, LATE_IN_TP_TO_SRV, 1);
  1480. /* c) Scenario where service is between SRV#N and TP#N, but client is
  1481. * between TP#N and SRV#N+1. Client is forward in time so it fetches the
  1482. * second HS desc.
  1483. *
  1484. * +------------------------------------------------------------------+
  1485. * | |
  1486. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1487. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1488. * | |
  1489. * | $==========|-----------$===========|-----------$===========| |
  1490. * | ^ ^ |
  1491. * | S C |
  1492. * +------------------------------------------------------------------+
  1493. */
  1494. helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, EARLY_IN_TP_TO_SRV, 1);
  1495. /* d) Scenario where service is between TP#N and SRV#N+1, but client is
  1496. * between SRV#N and TP#N. Client is backwards in time so it fetches the
  1497. * first HS desc.
  1498. *
  1499. * +------------------------------------------------------------------+
  1500. * | |
  1501. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1502. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1503. * | |
  1504. * | $==========|-----------$===========|-----------$===========| |
  1505. * | ^ ^ |
  1506. * | C S |
  1507. * +------------------------------------------------------------------+
  1508. */
  1509. helper_test_hsdir_sync(ns, EARLY_IN_TP_TO_SRV, LATE_IN_SRV_TO_TP, 0);
  1510. /* e) Scenario where service is between SRV#N and TP#N, but client is
  1511. * between TP#N-1 and SRV#3. Client is backwards in time so it fetches
  1512. * the first HS desc.
  1513. *
  1514. * +------------------------------------------------------------------+
  1515. * | |
  1516. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1517. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1518. * | |
  1519. * | $==========|-----------$===========|-----------$===========| |
  1520. * | ^ ^ |
  1521. * | C S |
  1522. * +------------------------------------------------------------------+
  1523. */
  1524. helper_test_hsdir_sync(ns, EARLY_IN_SRV_TO_TP, LATE_IN_TP_TO_SRV, 0);
  1525. /* f) Scenario where service is between TP#N and SRV#N+1, but client is
  1526. * between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
  1527. * the second HS desc.
  1528. *
  1529. * +------------------------------------------------------------------+
  1530. * | |
  1531. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1532. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1533. * | |
  1534. * | $==========|-----------$===========|-----------$===========| |
  1535. * | ^ ^ |
  1536. * | S C |
  1537. * +------------------------------------------------------------------+
  1538. */
  1539. helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, EARLY_IN_SRV_TO_TP, 1);
  1540. done:
  1541. networkstatus_vote_free(ns);
  1542. nodelist_free_all();
  1543. hs_free_all();
  1544. }
  1545. struct testcase_t hs_common_tests[] = {
  1546. { "build_address", test_build_address, TT_FORK,
  1547. NULL, NULL },
  1548. { "validate_address", test_validate_address, TT_FORK,
  1549. NULL, NULL },
  1550. { "time_period", test_time_period, TT_FORK,
  1551. NULL, NULL },
  1552. { "start_time_of_next_time_period", test_start_time_of_next_time_period,
  1553. TT_FORK, NULL, NULL },
  1554. { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
  1555. NULL, NULL },
  1556. { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
  1557. NULL, NULL },
  1558. { "disaster_srv", test_disaster_srv, TT_FORK,
  1559. NULL, NULL },
  1560. { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
  1561. NULL, NULL },
  1562. { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
  1563. NULL, NULL },
  1564. { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
  1565. NULL, NULL },
  1566. { "reachability", test_reachability, TT_FORK,
  1567. NULL, NULL },
  1568. { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync,
  1569. TT_FORK, NULL, NULL },
  1570. { "hs_indexes", test_hs_indexes, TT_FORK,
  1571. NULL, NULL },
  1572. END_OF_TESTCASES
  1573. };