test_hs_common.c 67 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827
  1. /* Copyright (c) 2017, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file test_hs_common.c
  5. * \brief Test hidden service common functionalities.
  6. */
  7. #define HS_COMMON_PRIVATE
  8. #define HS_CLIENT_PRIVATE
  9. #define HS_SERVICE_PRIVATE
  10. #define NODELIST_PRIVATE
  11. #include "test.h"
  12. #include "test_helpers.h"
  13. #include "log_test_helpers.h"
  14. #include "hs_test_helpers.h"
  15. #include "connection_edge.h"
  16. #include "crypto_rand.h"
  17. #include "hs_common.h"
  18. #include "hs_client.h"
  19. #include "hs_service.h"
  20. #include "config.h"
  21. #include "networkstatus.h"
  22. #include "directory.h"
  23. #include "dirauth/dirvote.h"
  24. #include "nodelist.h"
  25. #include "routerlist.h"
  26. #include "statefile.h"
  27. #include "circuitlist.h"
  28. #include "dirauth/shared_random.h"
  29. #include "util.h"
  30. #include "voting_schedule.h"
  31. #include "networkstatus_st.h"
  32. #include "node_st.h"
  33. /** Test the validation of HS v3 addresses */
  34. static void
  35. test_validate_address(void *arg)
  36. {
  37. int ret;
  38. (void) arg;
  39. /* Address too short and too long. */
  40. setup_full_capture_of_logs(LOG_WARN);
  41. ret = hs_address_is_valid("blah");
  42. tt_int_op(ret, OP_EQ, 0);
  43. expect_log_msg_containing("has an invalid length");
  44. teardown_capture_of_logs();
  45. setup_full_capture_of_logs(LOG_WARN);
  46. ret = hs_address_is_valid(
  47. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
  48. tt_int_op(ret, OP_EQ, 0);
  49. expect_log_msg_containing("has an invalid length");
  50. teardown_capture_of_logs();
  51. /* Invalid checksum (taken from prop224) */
  52. setup_full_capture_of_logs(LOG_WARN);
  53. ret = hs_address_is_valid(
  54. "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
  55. tt_int_op(ret, OP_EQ, 0);
  56. expect_log_msg_containing("invalid checksum");
  57. teardown_capture_of_logs();
  58. setup_full_capture_of_logs(LOG_WARN);
  59. ret = hs_address_is_valid(
  60. "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
  61. tt_int_op(ret, OP_EQ, 0);
  62. expect_log_msg_containing("invalid checksum");
  63. teardown_capture_of_logs();
  64. /* Non base32 decodable string. */
  65. setup_full_capture_of_logs(LOG_WARN);
  66. ret = hs_address_is_valid(
  67. "????????????????????????????????????????????????????????");
  68. tt_int_op(ret, OP_EQ, 0);
  69. expect_log_msg_containing("can't be decoded");
  70. teardown_capture_of_logs();
  71. /* Valid address. */
  72. ret = hs_address_is_valid(
  73. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
  74. tt_int_op(ret, OP_EQ, 1);
  75. done:
  76. ;
  77. }
  78. static int
  79. mock_write_str_to_file(const char *path, const char *str, int bin)
  80. {
  81. (void)bin;
  82. tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
  83. tt_str_op(str, OP_EQ,
  84. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
  85. done:
  86. return 0;
  87. }
  88. /** Test building HS v3 onion addresses. Uses test vectors from the
  89. * ./hs_build_address.py script. */
  90. static void
  91. test_build_address(void *arg)
  92. {
  93. int ret;
  94. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  95. ed25519_public_key_t pubkey;
  96. /* hex-encoded ed25519 pubkey used in hs_build_address.py */
  97. char pubkey_hex[] =
  98. "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
  99. hs_service_t *service = NULL;
  100. (void) arg;
  101. MOCK(write_str_to_file, mock_write_str_to_file);
  102. /* The following has been created with hs_build_address.py script that
  103. * follows proposal 224 specification to build an onion address. */
  104. static const char *test_addr =
  105. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
  106. /* Let's try to build the same onion address as the script */
  107. base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
  108. pubkey_hex, strlen(pubkey_hex));
  109. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  110. tt_str_op(test_addr, OP_EQ, onion_addr);
  111. /* Validate that address. */
  112. ret = hs_address_is_valid(onion_addr);
  113. tt_int_op(ret, OP_EQ, 1);
  114. service = tor_malloc_zero(sizeof(hs_service_t));
  115. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  116. tor_asprintf(&service->config.directory_path, "/double/five");
  117. ret = write_address_to_file(service, "squared");
  118. tt_int_op(ret, OP_EQ, 0);
  119. done:
  120. hs_service_free(service);
  121. }
  122. /** Test that our HS time period calculation functions work properly */
  123. static void
  124. test_time_period(void *arg)
  125. {
  126. (void) arg;
  127. uint64_t tn;
  128. int retval;
  129. time_t fake_time, correct_time, start_time;
  130. /* Let's do the example in prop224 section [TIME-PERIODS] */
  131. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  132. &fake_time);
  133. tt_int_op(retval, OP_EQ, 0);
  134. /* Check that the time period number is right */
  135. tn = hs_get_time_period_num(fake_time);
  136. tt_u64_op(tn, OP_EQ, 16903);
  137. /* Increase current time to 11:59:59 UTC and check that the time period
  138. number is still the same */
  139. fake_time += 3599;
  140. tn = hs_get_time_period_num(fake_time);
  141. tt_u64_op(tn, OP_EQ, 16903);
  142. { /* Check start time of next time period */
  143. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  144. &correct_time);
  145. tt_int_op(retval, OP_EQ, 0);
  146. start_time = hs_get_start_time_of_next_time_period(fake_time);
  147. tt_int_op(start_time, OP_EQ, correct_time);
  148. }
  149. /* Now take time to 12:00:00 UTC and check that the time period rotated */
  150. fake_time += 1;
  151. tn = hs_get_time_period_num(fake_time);
  152. tt_u64_op(tn, OP_EQ, 16904);
  153. /* Now also check our hs_get_next_time_period_num() function */
  154. tn = hs_get_next_time_period_num(fake_time);
  155. tt_u64_op(tn, OP_EQ, 16905);
  156. { /* Check start time of next time period again */
  157. retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
  158. &correct_time);
  159. tt_int_op(retval, OP_EQ, 0);
  160. start_time = hs_get_start_time_of_next_time_period(fake_time);
  161. tt_int_op(start_time, OP_EQ, correct_time);
  162. }
  163. /* Now do another sanity check: The time period number at the start of the
  164. * next time period, must be the same time period number as the one returned
  165. * from hs_get_next_time_period_num() */
  166. {
  167. time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
  168. tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
  169. hs_get_next_time_period_num(fake_time));
  170. }
  171. done:
  172. ;
  173. }
  174. /** Test that we can correctly find the start time of the next time period */
  175. static void
  176. test_start_time_of_next_time_period(void *arg)
  177. {
  178. (void) arg;
  179. int retval;
  180. time_t fake_time;
  181. char tbuf[ISO_TIME_LEN + 1];
  182. time_t next_tp_start_time;
  183. /* Do some basic tests */
  184. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  185. &fake_time);
  186. tt_int_op(retval, OP_EQ, 0);
  187. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  188. /* Compare it with the correct result */
  189. format_iso_time(tbuf, next_tp_start_time);
  190. tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
  191. /* Another test with an edge-case time (start of TP) */
  192. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  193. &fake_time);
  194. tt_int_op(retval, OP_EQ, 0);
  195. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  196. format_iso_time(tbuf, next_tp_start_time);
  197. tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
  198. {
  199. /* Now pretend we are on a testing network and alter the voting schedule to
  200. be every 10 seconds. This means that a time period has length 10*24
  201. seconds (4 minutes). It also means that we apply a rotational offset of
  202. 120 seconds to the time period, so that it starts at 00:02:00 instead of
  203. 00:00:00. */
  204. or_options_t *options = get_options_mutable();
  205. options->TestingTorNetwork = 1;
  206. options->V3AuthVotingInterval = 10;
  207. options->TestingV3AuthInitialVotingInterval = 10;
  208. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
  209. &fake_time);
  210. tt_int_op(retval, OP_EQ, 0);
  211. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  212. /* Compare it with the correct result */
  213. format_iso_time(tbuf, next_tp_start_time);
  214. tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
  215. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
  216. &fake_time);
  217. tt_int_op(retval, OP_EQ, 0);
  218. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  219. /* Compare it with the correct result */
  220. format_iso_time(tbuf, next_tp_start_time);
  221. tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
  222. }
  223. done:
  224. ;
  225. }
  226. /* Cleanup the global nodelist. It also frees the "md" in the node_t because
  227. * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
  228. static void
  229. cleanup_nodelist(void)
  230. {
  231. smartlist_t *nodelist = nodelist_get_list();
  232. SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
  233. tor_free(node->md);
  234. node->md = NULL;
  235. } SMARTLIST_FOREACH_END(node);
  236. nodelist_free_all();
  237. }
  238. static void
  239. helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
  240. int identity_idx,
  241. const char *nickname,
  242. int is_hsdir)
  243. {
  244. routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
  245. routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
  246. uint8_t identity[DIGEST_LEN];
  247. tor_addr_t ipv4_addr;
  248. memset(identity, identity_idx, sizeof(identity));
  249. memcpy(rs->identity_digest, identity, DIGEST_LEN);
  250. rs->is_hs_dir = is_hsdir;
  251. rs->pv.supports_v3_hsdir = 1;
  252. strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
  253. tor_addr_parse(&ipv4_addr, "1.2.3.4");
  254. ri->addr = tor_addr_to_ipv4h(&ipv4_addr);
  255. rs->addr = tor_addr_to_ipv4h(&ipv4_addr);
  256. ri->nickname = tor_strdup(nickname);
  257. ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
  258. memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
  259. ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
  260. /* Needed for the HSDir index computation. */
  261. memset(&ri->cache_info.signing_key_cert->signing_key,
  262. identity_idx, ED25519_PUBKEY_LEN);
  263. tt_assert(nodelist_set_routerinfo(ri, NULL));
  264. node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
  265. tt_assert(node);
  266. node->rs = rs;
  267. /* We need this to exist for node_has_preferred_descriptor() to return
  268. * true. */
  269. node->md = tor_malloc_zero(sizeof(microdesc_t));
  270. /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
  271. * the indexes which it doesn't have when it is called. */
  272. node_set_hsdir_index(node, ns);
  273. node->ri = NULL;
  274. smartlist_add(ns->routerstatus_list, rs);
  275. done:
  276. routerinfo_free(ri);
  277. }
  278. static networkstatus_t *mock_ns = NULL;
  279. static networkstatus_t *
  280. mock_networkstatus_get_latest_consensus(void)
  281. {
  282. time_t now = approx_time();
  283. /* If initialized, return it */
  284. if (mock_ns) {
  285. return mock_ns;
  286. }
  287. /* Initialize fake consensus */
  288. mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
  289. /* This consensus is live */
  290. mock_ns->valid_after = now-1;
  291. mock_ns->fresh_until = now+1;
  292. mock_ns->valid_until = now+2;
  293. /* Create routerstatus list */
  294. mock_ns->routerstatus_list = smartlist_new();
  295. mock_ns->type = NS_TYPE_CONSENSUS;
  296. return mock_ns;
  297. }
  298. static networkstatus_t *
  299. mock_networkstatus_get_live_consensus(time_t now)
  300. {
  301. (void) now;
  302. tt_assert(mock_ns);
  303. done:
  304. return mock_ns;
  305. }
  306. /** Test the responsible HSDirs calculation function */
  307. static void
  308. test_responsible_hsdirs(void *arg)
  309. {
  310. smartlist_t *responsible_dirs = smartlist_new();
  311. networkstatus_t *ns = NULL;
  312. (void) arg;
  313. hs_init();
  314. MOCK(networkstatus_get_latest_consensus,
  315. mock_networkstatus_get_latest_consensus);
  316. ns = networkstatus_get_latest_consensus();
  317. { /* First router: HSdir */
  318. helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
  319. }
  320. { /* Second HSDir */
  321. helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
  322. }
  323. { /* Third relay but not HSDir */
  324. helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
  325. }
  326. /* Use a fixed time period and pub key so we always take the same path */
  327. ed25519_public_key_t pubkey;
  328. uint64_t time_period_num = 17653; // 2 May, 2018, 14:00.
  329. memset(&pubkey, 42, sizeof(pubkey));
  330. hs_get_responsible_hsdirs(&pubkey, time_period_num,
  331. 0, 0, responsible_dirs);
  332. /* Make sure that we only found 2 responsible HSDirs.
  333. * The third relay was not an hsdir! */
  334. tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
  335. /** TODO: Build a bigger network and do more tests here */
  336. done:
  337. SMARTLIST_FOREACH(ns->routerstatus_list,
  338. routerstatus_t *, rs, routerstatus_free(rs));
  339. smartlist_free(responsible_dirs);
  340. smartlist_clear(ns->routerstatus_list);
  341. networkstatus_vote_free(mock_ns);
  342. cleanup_nodelist();
  343. }
  344. static void
  345. mock_directory_initiate_request(directory_request_t *req)
  346. {
  347. (void)req;
  348. return;
  349. }
  350. static int
  351. mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
  352. const ed25519_keypair_t *signing_kp,
  353. char **encoded_out)
  354. {
  355. (void)desc;
  356. (void)signing_kp;
  357. tor_asprintf(encoded_out, "lulu");
  358. return 0;
  359. }
  360. static or_state_t dummy_state;
  361. /* Mock function to get fake or state (used for rev counters) */
  362. static or_state_t *
  363. get_or_state_replacement(void)
  364. {
  365. return &dummy_state;
  366. }
  367. static int
  368. mock_router_have_minimum_dir_info(void)
  369. {
  370. return 1;
  371. }
  372. /** Test that we correctly detect when the HSDir hash ring changes so that we
  373. * reupload our descriptor. */
  374. static void
  375. test_desc_reupload_logic(void *arg)
  376. {
  377. networkstatus_t *ns = NULL;
  378. (void) arg;
  379. hs_init();
  380. MOCK(router_have_minimum_dir_info,
  381. mock_router_have_minimum_dir_info);
  382. MOCK(get_or_state,
  383. get_or_state_replacement);
  384. MOCK(networkstatus_get_latest_consensus,
  385. mock_networkstatus_get_latest_consensus);
  386. MOCK(directory_initiate_request,
  387. mock_directory_initiate_request);
  388. MOCK(hs_desc_encode_descriptor,
  389. mock_hs_desc_encode_descriptor);
  390. ns = networkstatus_get_latest_consensus();
  391. /** Test logic:
  392. * 1) Upload descriptor to HSDirs
  393. * CHECK that previous_hsdirs list was populated.
  394. * 2) Then call router_dir_info_changed() without an HSDir set change.
  395. * CHECK that no reuplod occurs.
  396. * 3) Now change the HSDir set, and call dir_info_changed() again.
  397. * CHECK that reupload occurs.
  398. * 4) Finally call service_desc_schedule_upload().
  399. * CHECK that previous_hsdirs list was cleared.
  400. **/
  401. /* Let's start by building our descriptor and service */
  402. hs_service_descriptor_t *desc = service_descriptor_new();
  403. hs_service_t *service = NULL;
  404. /* hex-encoded ed25519 pubkey used in hs_build_address.py */
  405. char pubkey_hex[] =
  406. "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
  407. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  408. ed25519_public_key_t pubkey;
  409. base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
  410. pubkey_hex, strlen(pubkey_hex));
  411. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  412. service = tor_malloc_zero(sizeof(hs_service_t));
  413. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  414. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  415. ed25519_public_key_generate(&service->keys.identity_pk,
  416. &service->keys.identity_sk);
  417. service->desc_current = desc;
  418. /* Also add service to service map */
  419. hs_service_ht *service_map = get_hs_service_map();
  420. tt_assert(service_map);
  421. tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
  422. register_service(service_map, service);
  423. tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
  424. /* Now let's create our hash ring: */
  425. {
  426. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  427. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  428. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  429. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  430. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  431. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  432. }
  433. /* Now let's upload our desc to all hsdirs */
  434. upload_descriptor_to_all(service, desc);
  435. /* Check that previous hsdirs were populated */
  436. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  437. /* Poison next upload time so that we can see if it was changed by
  438. * router_dir_info_changed(). No changes in hash ring so far, so the upload
  439. * time should stay as is. */
  440. desc->next_upload_time = 42;
  441. router_dir_info_changed();
  442. tt_int_op(desc->next_upload_time, OP_EQ, 42);
  443. /* Now change the HSDir hash ring by swapping nora for aaron.
  444. * Start by clearing the hash ring */
  445. {
  446. SMARTLIST_FOREACH(ns->routerstatus_list,
  447. routerstatus_t *, rs, routerstatus_free(rs));
  448. smartlist_clear(ns->routerstatus_list);
  449. cleanup_nodelist();
  450. routerlist_free_all();
  451. }
  452. { /* Now add back all the nodes */
  453. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  454. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  455. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  456. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  457. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  458. helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
  459. }
  460. /* Now call service_desc_hsdirs_changed() and see that it detected the hash
  461. ring change */
  462. time_t now = approx_time();
  463. tt_assert(now);
  464. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  465. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  466. /* Now order another upload and see that we keep having 6 prev hsdirs */
  467. upload_descriptor_to_all(service, desc);
  468. /* Check that previous hsdirs were populated */
  469. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  470. /* Now restore the HSDir hash ring to its original state by swapping back
  471. aaron for nora */
  472. /* First clear up the hash ring */
  473. {
  474. SMARTLIST_FOREACH(ns->routerstatus_list,
  475. routerstatus_t *, rs, routerstatus_free(rs));
  476. smartlist_clear(ns->routerstatus_list);
  477. cleanup_nodelist();
  478. routerlist_free_all();
  479. }
  480. { /* Now populate the hash ring again */
  481. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  482. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  483. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  484. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  485. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  486. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  487. }
  488. /* Check that our algorithm catches this change of hsdirs */
  489. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  490. /* Now pretend that the descriptor changed, and order a reupload to all
  491. HSDirs. Make sure that the set of previous HSDirs was cleared. */
  492. service_desc_schedule_upload(desc, now, 1);
  493. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
  494. /* Now reupload again: see that the prev hsdir set got populated again. */
  495. upload_descriptor_to_all(service, desc);
  496. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  497. done:
  498. SMARTLIST_FOREACH(ns->routerstatus_list,
  499. routerstatus_t *, rs, routerstatus_free(rs));
  500. smartlist_clear(ns->routerstatus_list);
  501. networkstatus_vote_free(ns);
  502. cleanup_nodelist();
  503. hs_free_all();
  504. }
  505. /** Test disaster SRV computation and caching */
  506. static void
  507. test_disaster_srv(void *arg)
  508. {
  509. uint8_t *cached_disaster_srv_one = NULL;
  510. uint8_t *cached_disaster_srv_two = NULL;
  511. uint8_t srv_one[DIGEST256_LEN] = {0};
  512. uint8_t srv_two[DIGEST256_LEN] = {0};
  513. uint8_t srv_three[DIGEST256_LEN] = {0};
  514. uint8_t srv_four[DIGEST256_LEN] = {0};
  515. uint8_t srv_five[DIGEST256_LEN] = {0};
  516. (void) arg;
  517. /* Get the cached SRVs: we gonna use them later for verification */
  518. cached_disaster_srv_one = get_first_cached_disaster_srv();
  519. cached_disaster_srv_two = get_second_cached_disaster_srv();
  520. /* Compute some srvs */
  521. get_disaster_srv(1, srv_one);
  522. get_disaster_srv(2, srv_two);
  523. /* Check that the cached ones where updated */
  524. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  525. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  526. /* Ask for an SRV that has already been computed */
  527. get_disaster_srv(2, srv_two);
  528. /* and check that the cache entries have not changed */
  529. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  530. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  531. /* Ask for a new SRV */
  532. get_disaster_srv(3, srv_three);
  533. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  534. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  535. /* Ask for another SRV: none of the original SRVs should now be cached */
  536. get_disaster_srv(4, srv_four);
  537. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  538. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  539. /* Ask for yet another SRV */
  540. get_disaster_srv(5, srv_five);
  541. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
  542. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  543. done:
  544. ;
  545. }
  546. /** Test our HS descriptor request tracker by making various requests and
  547. * checking whether they get tracked properly. */
  548. static void
  549. test_hid_serv_request_tracker(void *arg)
  550. {
  551. (void) arg;
  552. time_t retval;
  553. routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
  554. time_t now = approx_time();
  555. const char *req_key_str_first =
  556. "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  557. const char *req_key_str_second =
  558. "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  559. const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
  560. /*************************** basic test *******************************/
  561. /* Get request tracker and make sure it's empty */
  562. strmap_t *request_tracker = get_last_hid_serv_requests();
  563. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  564. /* Let's register a hid serv request */
  565. hsdir = tor_malloc_zero(sizeof(routerstatus_t));
  566. memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
  567. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  568. now, 1);
  569. tt_int_op(retval, OP_EQ, now);
  570. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  571. /* Let's lookup a non-existent hidserv request */
  572. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
  573. now+1, 0);
  574. tt_int_op(retval, OP_EQ, 0);
  575. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  576. /* Let's lookup a real hidserv request */
  577. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  578. now+2, 0);
  579. tt_int_op(retval, OP_EQ, now); /* we got it */
  580. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  581. /**********************************************************************/
  582. /* Let's add another request for the same HS but on a different HSDir. */
  583. hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
  584. memset(hsdir2->identity_digest, 2, DIGEST_LEN);
  585. retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
  586. now+3, 1);
  587. tt_int_op(retval, OP_EQ, now+3);
  588. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  589. /* Check that we can clean the first request based on time */
  590. hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
  591. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  592. /* Check that it doesn't exist anymore */
  593. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  594. now+2, 0);
  595. tt_int_op(retval, OP_EQ, 0);
  596. /* Now let's add a smaller req key str */
  597. hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
  598. memset(hsdir3->identity_digest, 3, DIGEST_LEN);
  599. retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
  600. now+4, 1);
  601. tt_int_op(retval, OP_EQ, now+4);
  602. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  603. /*************************** deleting entries **************************/
  604. /* Add another request with very short key */
  605. retval = hs_lookup_last_hid_serv_request(hsdir, "l", now, 1);
  606. tt_int_op(retval, OP_EQ, now);
  607. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  608. /* Try deleting entries with a dummy key. Check that our previous requests
  609. * are still there */
  610. tor_capture_bugs_(1);
  611. hs_purge_hid_serv_from_last_hid_serv_requests("a");
  612. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  613. tor_end_capture_bugs_();
  614. /* Try another dummy key. Check that requests are still there */
  615. {
  616. char dummy[2000];
  617. memset(dummy, 'Z', 2000);
  618. dummy[1999] = '\x00';
  619. hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
  620. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  621. }
  622. /* Another dummy key! */
  623. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
  624. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  625. /* Now actually delete a request! */
  626. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
  627. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  628. /* Purge it all! */
  629. hs_purge_last_hid_serv_requests();
  630. request_tracker = get_last_hid_serv_requests();
  631. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  632. done:
  633. tor_free(hsdir);
  634. tor_free(hsdir2);
  635. tor_free(hsdir3);
  636. }
  637. static void
  638. test_parse_extended_hostname(void *arg)
  639. {
  640. (void) arg;
  641. char address1[] = "fooaddress.onion";
  642. char address2[] = "aaaaaaaaaaaaaaaa.onion";
  643. char address3[] = "fooaddress.exit";
  644. char address4[] = "www.torproject.org";
  645. char address5[] = "foo.abcdefghijklmnop.onion";
  646. char address6[] = "foo.bar.abcdefghijklmnop.onion";
  647. char address7[] = ".abcdefghijklmnop.onion";
  648. char address8[] =
  649. "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
  650. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address1));
  651. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address2));
  652. tt_str_op(address2,OP_EQ, "aaaaaaaaaaaaaaaa");
  653. tt_assert(EXIT_HOSTNAME == parse_extended_hostname(address3));
  654. tt_assert(NORMAL_HOSTNAME == parse_extended_hostname(address4));
  655. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address5));
  656. tt_str_op(address5,OP_EQ, "abcdefghijklmnop");
  657. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address6));
  658. tt_str_op(address6,OP_EQ, "abcdefghijklmnop");
  659. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address7));
  660. tt_assert(ONION_V3_HOSTNAME == parse_extended_hostname(address8));
  661. tt_str_op(address8, OP_EQ,
  662. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
  663. done: ;
  664. }
  665. static void
  666. test_time_between_tp_and_srv(void *arg)
  667. {
  668. int ret;
  669. networkstatus_t ns;
  670. (void) arg;
  671. /* This function should be returning true where "^" are:
  672. *
  673. * +------------------------------------------------------------------+
  674. * | |
  675. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  676. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  677. * | |
  678. * | $==========|-----------$===========|-----------$===========| |
  679. * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
  680. * | |
  681. * +------------------------------------------------------------------+
  682. */
  683. ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
  684. tt_int_op(ret, OP_EQ, 0);
  685. ret = parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns.fresh_until);
  686. tt_int_op(ret, OP_EQ, 0);
  687. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  688. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  689. tt_int_op(ret, OP_EQ, 0);
  690. ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
  691. tt_int_op(ret, OP_EQ, 0);
  692. ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.fresh_until);
  693. tt_int_op(ret, OP_EQ, 0);
  694. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  695. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  696. tt_int_op(ret, OP_EQ, 0);
  697. ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
  698. tt_int_op(ret, OP_EQ, 0);
  699. ret = parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns.fresh_until);
  700. tt_int_op(ret, OP_EQ, 0);
  701. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  702. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  703. tt_int_op(ret, OP_EQ, 1);
  704. ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
  705. tt_int_op(ret, OP_EQ, 0);
  706. ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.fresh_until);
  707. tt_int_op(ret, OP_EQ, 0);
  708. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  709. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  710. tt_int_op(ret, OP_EQ, 1);
  711. ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.valid_after);
  712. tt_int_op(ret, OP_EQ, 0);
  713. ret = parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns.fresh_until);
  714. tt_int_op(ret, OP_EQ, 0);
  715. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  716. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  717. tt_int_op(ret, OP_EQ, 0);
  718. done:
  719. ;
  720. }
  721. /************ Reachability Test (it is huge) ****************/
  722. /* Simulate different consensus for client and service. Used by the
  723. * reachability test. The SRV and responsible HSDir list are used by all
  724. * reachability tests so make them common to simplify setup and teardown. */
  725. static networkstatus_t *mock_service_ns = NULL;
  726. static networkstatus_t *mock_client_ns = NULL;
  727. static sr_srv_t current_srv, previous_srv;
  728. static smartlist_t *service_responsible_hsdirs = NULL;
  729. static smartlist_t *client_responsible_hsdirs = NULL;
  730. static networkstatus_t *
  731. mock_networkstatus_get_live_consensus_service(time_t now)
  732. {
  733. (void) now;
  734. if (mock_service_ns) {
  735. return mock_service_ns;
  736. }
  737. mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
  738. mock_service_ns->routerstatus_list = smartlist_new();
  739. mock_service_ns->type = NS_TYPE_CONSENSUS;
  740. return mock_service_ns;
  741. }
  742. static networkstatus_t *
  743. mock_networkstatus_get_latest_consensus_service(void)
  744. {
  745. return mock_networkstatus_get_live_consensus_service(0);
  746. }
  747. static networkstatus_t *
  748. mock_networkstatus_get_live_consensus_client(time_t now)
  749. {
  750. (void) now;
  751. if (mock_client_ns) {
  752. return mock_client_ns;
  753. }
  754. mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
  755. mock_client_ns->routerstatus_list = smartlist_new();
  756. mock_client_ns->type = NS_TYPE_CONSENSUS;
  757. return mock_client_ns;
  758. }
  759. static networkstatus_t *
  760. mock_networkstatus_get_latest_consensus_client(void)
  761. {
  762. return mock_networkstatus_get_live_consensus_client(0);
  763. }
  764. /* Mock function because we are not trying to test the close circuit that does
  765. * an awful lot of checks on the circuit object. */
  766. static void
  767. mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
  768. const char *file)
  769. {
  770. (void) circ;
  771. (void) reason;
  772. (void) line;
  773. (void) file;
  774. return;
  775. }
  776. /* Initialize a big HSDir V3 hash ring. */
  777. static void
  778. helper_initialize_big_hash_ring(networkstatus_t *ns)
  779. {
  780. int ret;
  781. /* Generate 250 hsdirs! :) */
  782. for (int counter = 1 ; counter < 251 ; counter++) {
  783. /* Let's generate random nickname for each hsdir... */
  784. char nickname_binary[8];
  785. char nickname_str[13] = {0};
  786. crypto_rand(nickname_binary, sizeof(nickname_binary));
  787. ret = base64_encode(nickname_str, sizeof(nickname_str),
  788. nickname_binary, sizeof(nickname_binary), 0);
  789. tt_int_op(ret, OP_EQ, 12);
  790. helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
  791. }
  792. /* Make sure we have 200 hsdirs in our list */
  793. tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
  794. done:
  795. ;
  796. }
  797. /** Initialize service and publish its descriptor as needed. Return the newly
  798. * allocated service object to the caller. */
  799. static hs_service_t *
  800. helper_init_service(time_t now)
  801. {
  802. int retval;
  803. hs_service_t *service = hs_service_new(get_options());
  804. tt_assert(service);
  805. service->config.version = HS_VERSION_THREE;
  806. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  807. ed25519_public_key_generate(&service->keys.identity_pk,
  808. &service->keys.identity_sk);
  809. /* Register service to global map. */
  810. retval = register_service(get_hs_service_map(), service);
  811. tt_int_op(retval, OP_EQ, 0);
  812. /* Initialize service descriptor */
  813. build_all_descriptors(now);
  814. tt_assert(service->desc_current);
  815. tt_assert(service->desc_next);
  816. done:
  817. return service;
  818. }
  819. /* Helper function to set the RFC 1123 time string into t. */
  820. static void
  821. set_consensus_times(const char *timestr, time_t *t)
  822. {
  823. tt_assert(timestr);
  824. tt_assert(t);
  825. int ret = parse_rfc1123_time(timestr, t);
  826. tt_int_op(ret, OP_EQ, 0);
  827. done:
  828. return;
  829. }
  830. /* Helper function to cleanup the mock consensus (client and service) */
  831. static void
  832. cleanup_mock_ns(void)
  833. {
  834. if (mock_service_ns) {
  835. SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
  836. routerstatus_t *, rs, routerstatus_free(rs));
  837. smartlist_clear(mock_service_ns->routerstatus_list);
  838. mock_service_ns->sr_info.current_srv = NULL;
  839. mock_service_ns->sr_info.previous_srv = NULL;
  840. networkstatus_vote_free(mock_service_ns);
  841. mock_service_ns = NULL;
  842. }
  843. if (mock_client_ns) {
  844. SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
  845. routerstatus_t *, rs, routerstatus_free(rs));
  846. smartlist_clear(mock_client_ns->routerstatus_list);
  847. mock_client_ns->sr_info.current_srv = NULL;
  848. mock_client_ns->sr_info.previous_srv = NULL;
  849. networkstatus_vote_free(mock_client_ns);
  850. mock_client_ns = NULL;
  851. }
  852. }
  853. /* Helper function to setup a reachability test. Once called, the
  854. * cleanup_reachability_test MUST be called at the end. */
  855. static void
  856. setup_reachability_test(void)
  857. {
  858. MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
  859. MOCK(get_or_state, get_or_state_replacement);
  860. hs_init();
  861. /* Baseline to start with. */
  862. memset(&current_srv, 0, sizeof(current_srv));
  863. memset(&previous_srv, 1, sizeof(previous_srv));
  864. /* Initialize the consensuses. */
  865. mock_networkstatus_get_latest_consensus_service();
  866. mock_networkstatus_get_latest_consensus_client();
  867. service_responsible_hsdirs = smartlist_new();
  868. client_responsible_hsdirs = smartlist_new();
  869. }
  870. /* Helper function to cleanup a reachability test initial setup. */
  871. static void
  872. cleanup_reachability_test(void)
  873. {
  874. smartlist_free(service_responsible_hsdirs);
  875. service_responsible_hsdirs = NULL;
  876. smartlist_free(client_responsible_hsdirs);
  877. client_responsible_hsdirs = NULL;
  878. hs_free_all();
  879. cleanup_mock_ns();
  880. UNMOCK(get_or_state);
  881. UNMOCK(circuit_mark_for_close_);
  882. }
  883. /* A reachability test always check if the resulting service and client
  884. * responsible HSDir for the given parameters are equal.
  885. *
  886. * Return true iff the same exact nodes are in both list. */
  887. static int
  888. are_responsible_hsdirs_equal(void)
  889. {
  890. int count = 0;
  891. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  892. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
  893. SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
  894. const routerstatus_t *, c_rs) {
  895. SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
  896. const routerstatus_t *, s_rs) {
  897. if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
  898. DIGEST_LEN)) {
  899. count++;
  900. break;
  901. }
  902. } SMARTLIST_FOREACH_END(s_rs);
  903. } SMARTLIST_FOREACH_END(c_rs);
  904. done:
  905. return (count == 6);
  906. }
  907. /* Tor doesn't use such a function to get the previous HSDir, it is only used
  908. * in node_set_hsdir_index(). We need it here so we can test the reachability
  909. * scenario 6 that requires the previous time period to compute the list of
  910. * responsible HSDir because of the client state timing. */
  911. static uint64_t
  912. get_previous_time_period(time_t now)
  913. {
  914. return hs_get_time_period_num(now) - 1;
  915. }
  916. /* Configuration of a reachability test scenario. */
  917. typedef struct reachability_cfg_t {
  918. /* Consensus timings to be set. They have to be compliant with
  919. * RFC 1123 time format. */
  920. const char *service_valid_after;
  921. const char *service_valid_until;
  922. const char *client_valid_after;
  923. const char *client_valid_until;
  924. /* SRVs that the service and client should use. */
  925. sr_srv_t *service_current_srv;
  926. sr_srv_t *service_previous_srv;
  927. sr_srv_t *client_current_srv;
  928. sr_srv_t *client_previous_srv;
  929. /* A time period function for the service to use for this scenario. For a
  930. * successful reachability test, the client always use the current time
  931. * period thus why no client function. */
  932. uint64_t (*service_time_period_fn)(time_t);
  933. /* Is the client and service expected to be in a new time period. After
  934. * setting the consensus time, the reachability test checks
  935. * hs_in_period_between_tp_and_srv() and test the returned value against
  936. * this. */
  937. unsigned int service_in_new_tp;
  938. unsigned int client_in_new_tp;
  939. /* Some scenario requires a hint that the client, because of its consensus
  940. * time, will request the "next" service descriptor so this indicates if it
  941. * is the case or not. */
  942. unsigned int client_fetch_next_desc;
  943. } reachability_cfg_t;
  944. /* Some defines to help with semantic while reading a configuration below. */
  945. #define NOT_IN_NEW_TP 0
  946. #define IN_NEW_TP 1
  947. #define DONT_NEED_NEXT_DESC 0
  948. #define NEED_NEXT_DESC 1
  949. static reachability_cfg_t reachability_scenarios[] = {
  950. /* Scenario 1
  951. *
  952. * +------------------------------------------------------------------+
  953. * | |
  954. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  955. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  956. * | |
  957. * | $==========|-----------$===========|-----------$===========| |
  958. * | ^ ^ |
  959. * | S C |
  960. * +------------------------------------------------------------------+
  961. *
  962. * S: Service, C: Client
  963. *
  964. * Service consensus valid_after time is set to 13:00 and client to 15:00,
  965. * both are after TP#1 thus have access to SRV#1. Service and client should
  966. * be using TP#1.
  967. */
  968. { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
  969. "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
  970. "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
  971. "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
  972. &current_srv, NULL, /* Service current and previous SRV */
  973. &current_srv, NULL, /* Client current and previous SRV */
  974. hs_get_time_period_num, /* Service time period function. */
  975. IN_NEW_TP, /* Is service in new TP? */
  976. IN_NEW_TP, /* Is client in new TP? */
  977. NEED_NEXT_DESC },
  978. /* Scenario 2
  979. *
  980. * +------------------------------------------------------------------+
  981. * | |
  982. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  983. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  984. * | |
  985. * | $==========|-----------$===========|-----------$===========| |
  986. * | ^ ^ |
  987. * | S C |
  988. * +------------------------------------------------------------------+
  989. *
  990. * S: Service, C: Client
  991. *
  992. * Service consensus valid_after time is set to 23:00 and client to 01:00,
  993. * which makes the client after the SRV#2 and the service just before. The
  994. * service should only be using TP#1. The client should be using TP#1.
  995. */
  996. { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
  997. "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
  998. "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
  999. "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
  1000. &previous_srv, NULL, /* Service current and previous SRV */
  1001. &current_srv, &previous_srv, /* Client current and previous SRV */
  1002. hs_get_time_period_num, /* Service time period function. */
  1003. IN_NEW_TP, /* Is service in new TP? */
  1004. NOT_IN_NEW_TP, /* Is client in new TP? */
  1005. NEED_NEXT_DESC },
  1006. /* Scenario 3
  1007. *
  1008. * +------------------------------------------------------------------+
  1009. * | |
  1010. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1011. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1012. * | |
  1013. * | $==========|-----------$===========|----------$===========| |
  1014. * | ^ ^ |
  1015. * | S C |
  1016. * +------------------------------------------------------------------+
  1017. *
  1018. * S: Service, C: Client
  1019. *
  1020. * Service consensus valid_after time is set to 03:00 and client to 05:00,
  1021. * which makes both after SRV#2. The service should be using TP#1 as its
  1022. * current time period. The client should be using TP#1.
  1023. */
  1024. { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
  1025. "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
  1026. "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
  1027. "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
  1028. &current_srv, &previous_srv, /* Service current and previous SRV */
  1029. &current_srv, &previous_srv, /* Client current and previous SRV */
  1030. hs_get_time_period_num, /* Service time period function. */
  1031. NOT_IN_NEW_TP, /* Is service in new TP? */
  1032. NOT_IN_NEW_TP, /* Is client in new TP? */
  1033. DONT_NEED_NEXT_DESC },
  1034. /* Scenario 4
  1035. *
  1036. * +------------------------------------------------------------------+
  1037. * | |
  1038. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1039. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1040. * | |
  1041. * | $==========|-----------$===========|-----------$===========| |
  1042. * | ^ ^ |
  1043. * | S C |
  1044. * +------------------------------------------------------------------+
  1045. *
  1046. * S: Service, C: Client
  1047. *
  1048. * Service consensus valid_after time is set to 11:00 and client to 13:00,
  1049. * which makes the service before TP#2 and the client just after. The
  1050. * service should be using TP#1 as its current time period and TP#2 as the
  1051. * next. The client should be using TP#2 time period.
  1052. */
  1053. { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
  1054. "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
  1055. "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
  1056. "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
  1057. &current_srv, &previous_srv, /* Service current and previous SRV */
  1058. &current_srv, &previous_srv, /* Client current and previous SRV */
  1059. hs_get_next_time_period_num, /* Service time period function. */
  1060. NOT_IN_NEW_TP, /* Is service in new TP? */
  1061. IN_NEW_TP, /* Is client in new TP? */
  1062. NEED_NEXT_DESC },
  1063. /* Scenario 5
  1064. *
  1065. * +------------------------------------------------------------------+
  1066. * | |
  1067. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1068. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1069. * | |
  1070. * | $==========|-----------$===========|-----------$===========| |
  1071. * | ^ ^ |
  1072. * | C S |
  1073. * +------------------------------------------------------------------+
  1074. *
  1075. * S: Service, C: Client
  1076. *
  1077. * Service consensus valid_after time is set to 01:00 and client to 23:00,
  1078. * which makes the service after SRV#2 and the client just before. The
  1079. * service should be using TP#1 as its current time period and TP#2 as the
  1080. * next. The client should be using TP#1 time period.
  1081. */
  1082. { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
  1083. "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
  1084. "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
  1085. "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
  1086. &current_srv, &previous_srv, /* Service current and previous SRV */
  1087. &previous_srv, NULL, /* Client current and previous SRV */
  1088. hs_get_time_period_num, /* Service time period function. */
  1089. NOT_IN_NEW_TP, /* Is service in new TP? */
  1090. IN_NEW_TP, /* Is client in new TP? */
  1091. DONT_NEED_NEXT_DESC },
  1092. /* Scenario 6
  1093. *
  1094. * +------------------------------------------------------------------+
  1095. * | |
  1096. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1097. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1098. * | |
  1099. * | $==========|-----------$===========|-----------$===========| |
  1100. * | ^ ^ |
  1101. * | C S |
  1102. * +------------------------------------------------------------------+
  1103. *
  1104. * S: Service, C: Client
  1105. *
  1106. * Service consensus valid_after time is set to 13:00 and client to 11:00,
  1107. * which makes the service outside after TP#2 and the client just before.
  1108. * The service should be using TP#1 as its current time period and TP#2 as
  1109. * its next. The client should be using TP#1 time period.
  1110. */
  1111. { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
  1112. "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
  1113. "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
  1114. "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
  1115. &current_srv, &previous_srv, /* Service current and previous SRV */
  1116. &current_srv, &previous_srv, /* Client current and previous SRV */
  1117. get_previous_time_period, /* Service time period function. */
  1118. IN_NEW_TP, /* Is service in new TP? */
  1119. NOT_IN_NEW_TP, /* Is client in new TP? */
  1120. DONT_NEED_NEXT_DESC },
  1121. /* End marker. */
  1122. { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
  1123. };
  1124. /* Run a single reachability scenario. num_scenario is the corresponding
  1125. * scenario number from the documentation. It is used to log it in case of
  1126. * failure so we know which scenario fails. */
  1127. static int
  1128. run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
  1129. {
  1130. int ret = -1;
  1131. hs_service_t *service;
  1132. uint64_t service_tp, client_tp;
  1133. ed25519_public_key_t service_blinded_pk, client_blinded_pk;
  1134. setup_reachability_test();
  1135. tt_assert(cfg);
  1136. /* Set service consensus time. */
  1137. set_consensus_times(cfg->service_valid_after,
  1138. &mock_service_ns->valid_after);
  1139. set_consensus_times(cfg->service_valid_until,
  1140. &mock_service_ns->valid_until);
  1141. set_consensus_times(cfg->service_valid_until,
  1142. &mock_service_ns->fresh_until);
  1143. voting_schedule_recalculate_timing(get_options(),
  1144. mock_service_ns->valid_after);
  1145. /* Set client consensus time. */
  1146. set_consensus_times(cfg->client_valid_after,
  1147. &mock_client_ns->valid_after);
  1148. set_consensus_times(cfg->client_valid_until,
  1149. &mock_client_ns->valid_until);
  1150. set_consensus_times(cfg->client_valid_until,
  1151. &mock_client_ns->fresh_until);
  1152. voting_schedule_recalculate_timing(get_options(),
  1153. mock_client_ns->valid_after);
  1154. /* New time period checks for this scenario. */
  1155. tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
  1156. cfg->service_in_new_tp);
  1157. tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
  1158. cfg->client_in_new_tp);
  1159. /* Set the SRVs for this scenario. */
  1160. mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
  1161. mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
  1162. mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
  1163. mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
  1164. /* Initialize a service to get keys. */
  1165. service = helper_init_service(time(NULL));
  1166. /*
  1167. * === Client setup ===
  1168. */
  1169. MOCK(networkstatus_get_live_consensus,
  1170. mock_networkstatus_get_live_consensus_client);
  1171. MOCK(networkstatus_get_latest_consensus,
  1172. mock_networkstatus_get_latest_consensus_client);
  1173. /* Make networkstatus_is_live() happy. */
  1174. update_approx_time(mock_client_ns->valid_after);
  1175. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1176. helper_initialize_big_hash_ring(mock_client_ns);
  1177. /* Client ONLY use the current time period. This is the whole point of these
  1178. * reachability test that is to make sure the client can always reach the
  1179. * service using only its current time period. */
  1180. client_tp = hs_get_time_period_num(0);
  1181. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1182. client_tp, &client_blinded_pk);
  1183. hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
  1184. client_responsible_hsdirs);
  1185. /* Cleanup the nodelist so we can let the service computes its own set of
  1186. * node with its own hashring. */
  1187. cleanup_nodelist();
  1188. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  1189. UNMOCK(networkstatus_get_latest_consensus);
  1190. UNMOCK(networkstatus_get_live_consensus);
  1191. /*
  1192. * === Service setup ===
  1193. */
  1194. MOCK(networkstatus_get_live_consensus,
  1195. mock_networkstatus_get_live_consensus_service);
  1196. MOCK(networkstatus_get_latest_consensus,
  1197. mock_networkstatus_get_latest_consensus_service);
  1198. /* Make networkstatus_is_live() happy. */
  1199. update_approx_time(mock_service_ns->valid_after);
  1200. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1201. helper_initialize_big_hash_ring(mock_service_ns);
  1202. service_tp = cfg->service_time_period_fn(0);
  1203. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1204. service_tp, &service_blinded_pk);
  1205. /* A service builds two lists of responsible HSDir, for the current and the
  1206. * next descriptor. Depending on the scenario, the client timing indicate if
  1207. * it is fetching the current or the next descriptor so we use the
  1208. * "client_fetch_next_desc" to know which one the client is trying to get to
  1209. * confirm that the service computes the same hashring for the same blinded
  1210. * key and service time period function. */
  1211. hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
  1212. cfg->client_fetch_next_desc, 0,
  1213. service_responsible_hsdirs);
  1214. cleanup_nodelist();
  1215. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
  1216. UNMOCK(networkstatus_get_latest_consensus);
  1217. UNMOCK(networkstatus_get_live_consensus);
  1218. /* Some testing of the values we just got from the client and service. */
  1219. tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
  1220. ED25519_PUBKEY_LEN);
  1221. tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
  1222. /* Everything went well. */
  1223. ret = 0;
  1224. done:
  1225. cleanup_reachability_test();
  1226. if (ret == -1) {
  1227. /* Do this so we can know which scenario failed. */
  1228. char msg[32];
  1229. tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
  1230. tt_fail_msg(msg);
  1231. }
  1232. return ret;
  1233. }
  1234. static void
  1235. test_reachability(void *arg)
  1236. {
  1237. (void) arg;
  1238. /* NOTE: An important axiom to understand here is that SRV#N must only be
  1239. * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
  1240. * together. The HSDir index computation is based on this axiom.*/
  1241. for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
  1242. int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
  1243. if (ret < 0) {
  1244. return;
  1245. }
  1246. }
  1247. }
  1248. /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
  1249. * its identity digest in <b>hsdir_digest_out</b>. */
  1250. static void
  1251. helper_client_pick_hsdir(const ed25519_public_key_t *onion_identity_pk,
  1252. char *hsdir_digest_out)
  1253. {
  1254. tt_assert(onion_identity_pk);
  1255. routerstatus_t *client_hsdir = pick_hsdir_v3(onion_identity_pk);
  1256. tt_assert(client_hsdir);
  1257. digest_to_base64(hsdir_digest_out, client_hsdir->identity_digest);
  1258. done:
  1259. ;
  1260. }
  1261. static void
  1262. test_hs_indexes(void *arg)
  1263. {
  1264. int ret;
  1265. uint64_t period_num = 42;
  1266. ed25519_public_key_t pubkey;
  1267. (void) arg;
  1268. /* Build the hs_index */
  1269. {
  1270. uint8_t hs_index[DIGEST256_LEN];
  1271. const char *b32_test_vector =
  1272. "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
  1273. char test_vector[DIGEST256_LEN];
  1274. ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
  1275. strlen(b32_test_vector));
  1276. tt_int_op(ret, OP_EQ, sizeof(test_vector));
  1277. /* Our test vector uses a public key set to 32 bytes of \x42. */
  1278. memset(&pubkey, '\x42', sizeof(pubkey));
  1279. hs_build_hs_index(1, &pubkey, period_num, hs_index);
  1280. tt_mem_op(hs_index, OP_EQ, test_vector, sizeof(hs_index));
  1281. }
  1282. /* Build the hsdir_index */
  1283. {
  1284. uint8_t srv[DIGEST256_LEN];
  1285. uint8_t hsdir_index[DIGEST256_LEN];
  1286. const char *b32_test_vector =
  1287. "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
  1288. char test_vector[DIGEST256_LEN];
  1289. ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
  1290. strlen(b32_test_vector));
  1291. tt_int_op(ret, OP_EQ, sizeof(test_vector));
  1292. /* Our test vector uses a public key set to 32 bytes of \x42. */
  1293. memset(&pubkey, '\x42', sizeof(pubkey));
  1294. memset(srv, '\x43', sizeof(srv));
  1295. hs_build_hsdir_index(&pubkey, srv, period_num, hsdir_index);
  1296. tt_mem_op(hsdir_index, OP_EQ, test_vector, sizeof(hsdir_index));
  1297. }
  1298. done:
  1299. ;
  1300. }
  1301. #define EARLY_IN_SRV_TO_TP 0
  1302. #define LATE_IN_SRV_TO_TP 1
  1303. #define EARLY_IN_TP_TO_SRV 2
  1304. #define LATE_IN_TP_TO_SRV 3
  1305. /** Set the consensus and system time based on <b>position</b>. See the
  1306. * following diagram for details:
  1307. *
  1308. * +------------------------------------------------------------------+
  1309. * | |
  1310. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1311. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1312. * | |
  1313. * | $==========|-----------$===========|----------$===========| |
  1314. * | |
  1315. * | |
  1316. * +------------------------------------------------------------------+
  1317. */
  1318. static time_t
  1319. helper_set_consensus_and_system_time(networkstatus_t *ns, int position)
  1320. {
  1321. time_t real_time = 0;
  1322. /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
  1323. * valid_after is what matters here, the rest is just to specify the voting
  1324. * period correctly. */
  1325. if (position == LATE_IN_SRV_TO_TP) {
  1326. parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns->valid_after);
  1327. parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns->fresh_until);
  1328. parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->valid_until);
  1329. } else if (position == EARLY_IN_TP_TO_SRV) {
  1330. parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns->valid_after);
  1331. parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->fresh_until);
  1332. parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns->valid_until);
  1333. } else if (position == LATE_IN_TP_TO_SRV) {
  1334. parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns->valid_after);
  1335. parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns->fresh_until);
  1336. parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->valid_until);
  1337. } else if (position == EARLY_IN_SRV_TO_TP) {
  1338. parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns->valid_after);
  1339. parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->fresh_until);
  1340. parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns->valid_until);
  1341. } else {
  1342. tt_assert(0);
  1343. }
  1344. voting_schedule_recalculate_timing(get_options(), ns->valid_after);
  1345. /* Set system time: pretend to be just 2 minutes before consensus expiry */
  1346. real_time = ns->valid_until - 120;
  1347. update_approx_time(real_time);
  1348. done:
  1349. return real_time;
  1350. }
  1351. /** Helper function that carries out the actual test for
  1352. * test_client_service_sync() */
  1353. static void
  1354. helper_test_hsdir_sync(networkstatus_t *ns,
  1355. int service_position, int client_position,
  1356. int client_fetches_next_desc)
  1357. {
  1358. hs_service_descriptor_t *desc;
  1359. int retval;
  1360. /** Test logic:
  1361. * 1) Initialize service time: consensus and system time.
  1362. * 1.1) Initialize service hash ring
  1363. * 2) Initialize service and publish descriptors.
  1364. * 3) Initialize client time: consensus and system time.
  1365. * 3.1) Initialize client hash ring
  1366. * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
  1367. * the client was also picked by service.
  1368. */
  1369. /* 1) Initialize service time: consensus and real time */
  1370. time_t now = helper_set_consensus_and_system_time(ns, service_position);
  1371. helper_initialize_big_hash_ring(ns);
  1372. /* 2) Initialize service */
  1373. hs_service_t *service = helper_init_service(now);
  1374. desc = client_fetches_next_desc ? service->desc_next : service->desc_current;
  1375. /* Now let's upload our desc to all hsdirs */
  1376. upload_descriptor_to_all(service, desc);
  1377. /* Cleanup right now so we don't memleak on error. */
  1378. cleanup_nodelist();
  1379. /* Check that previous hsdirs were populated */
  1380. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 8);
  1381. /* 3) Initialize client time */
  1382. helper_set_consensus_and_system_time(ns, client_position);
  1383. cleanup_nodelist();
  1384. SMARTLIST_FOREACH(ns->routerstatus_list,
  1385. routerstatus_t *, rs, routerstatus_free(rs));
  1386. smartlist_clear(ns->routerstatus_list);
  1387. helper_initialize_big_hash_ring(ns);
  1388. /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
  1389. service. */
  1390. for (int y = 0 ; y < 6 ; y++) {
  1391. char client_hsdir_b64_digest[BASE64_DIGEST_LEN+1] = {0};
  1392. helper_client_pick_hsdir(&service->keys.identity_pk,
  1393. client_hsdir_b64_digest);
  1394. /* CHECK: Go through the hsdirs chosen by the service and make sure that it
  1395. * contains the one picked by the client! */
  1396. retval = smartlist_contains_string(desc->previous_hsdirs,
  1397. client_hsdir_b64_digest);
  1398. tt_int_op(retval, OP_EQ, 1);
  1399. }
  1400. /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
  1401. * exhausted all of them: */
  1402. tt_assert(!pick_hsdir_v3(&service->keys.identity_pk));
  1403. done:
  1404. /* At the end: free all services and initialize the subsystem again, we will
  1405. * need it for next scenario. */
  1406. cleanup_nodelist();
  1407. hs_service_free_all();
  1408. hs_service_init();
  1409. SMARTLIST_FOREACH(ns->routerstatus_list,
  1410. routerstatus_t *, rs, routerstatus_free(rs));
  1411. smartlist_clear(ns->routerstatus_list);
  1412. }
  1413. /** This test ensures that client and service will pick the same HSDirs, under
  1414. * various timing scenarios:
  1415. * a) Scenario where both client and service are in the time segment between
  1416. * SRV#N and TP#N:
  1417. * b) Scenario where both client and service are in the time segment between
  1418. * TP#N and SRV#N+1.
  1419. * c) Scenario where service is between SRV#N and TP#N, but client is between
  1420. * TP#N and SRV#N+1.
  1421. * d) Scenario where service is between TP#N and SRV#N+1, but client is
  1422. * between SRV#N and TP#N.
  1423. *
  1424. * This test is important because it tests that upload_descriptor_to_all() is
  1425. * in synch with pick_hsdir_v3(). That's not the case for the
  1426. * test_reachability() test which only compares the responsible hsdir sets.
  1427. */
  1428. static void
  1429. test_client_service_hsdir_set_sync(void *arg)
  1430. {
  1431. networkstatus_t *ns = NULL;
  1432. (void) arg;
  1433. MOCK(networkstatus_get_latest_consensus,
  1434. mock_networkstatus_get_latest_consensus);
  1435. MOCK(networkstatus_get_live_consensus,
  1436. mock_networkstatus_get_live_consensus);
  1437. MOCK(get_or_state,
  1438. get_or_state_replacement);
  1439. MOCK(hs_desc_encode_descriptor,
  1440. mock_hs_desc_encode_descriptor);
  1441. MOCK(directory_initiate_request,
  1442. mock_directory_initiate_request);
  1443. hs_init();
  1444. /* Initialize a big hash ring: we want it to be big so that client and
  1445. * service cannot accidentally select the same HSDirs */
  1446. ns = networkstatus_get_latest_consensus();
  1447. tt_assert(ns);
  1448. /** Now test the various synch scenarios. See the helper function for more
  1449. details: */
  1450. /* a) Scenario where both client and service are in the time segment between
  1451. * SRV#N and TP#N. At this time the client fetches the first HS desc:
  1452. *
  1453. * +------------------------------------------------------------------+
  1454. * | |
  1455. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1456. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1457. * | |
  1458. * | $==========|-----------$===========|----------$===========| |
  1459. * | ^ ^ |
  1460. * | S C |
  1461. * +------------------------------------------------------------------+
  1462. */
  1463. helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, LATE_IN_SRV_TO_TP, 0);
  1464. /* b) Scenario where both client and service are in the time segment between
  1465. * TP#N and SRV#N+1. At this time the client fetches the second HS
  1466. * desc:
  1467. *
  1468. * +------------------------------------------------------------------+
  1469. * | |
  1470. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1471. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1472. * | |
  1473. * | $==========|-----------$===========|-----------$===========| |
  1474. * | ^ ^ |
  1475. * | S C |
  1476. * +------------------------------------------------------------------+
  1477. */
  1478. helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, LATE_IN_TP_TO_SRV, 1);
  1479. /* c) Scenario where service is between SRV#N and TP#N, but client is
  1480. * between TP#N and SRV#N+1. Client is forward in time so it fetches the
  1481. * second HS desc.
  1482. *
  1483. * +------------------------------------------------------------------+
  1484. * | |
  1485. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1486. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1487. * | |
  1488. * | $==========|-----------$===========|-----------$===========| |
  1489. * | ^ ^ |
  1490. * | S C |
  1491. * +------------------------------------------------------------------+
  1492. */
  1493. helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, EARLY_IN_TP_TO_SRV, 1);
  1494. /* d) Scenario where service is between TP#N and SRV#N+1, but client is
  1495. * between SRV#N and TP#N. Client is backwards in time so it fetches the
  1496. * first HS desc.
  1497. *
  1498. * +------------------------------------------------------------------+
  1499. * | |
  1500. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1501. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1502. * | |
  1503. * | $==========|-----------$===========|-----------$===========| |
  1504. * | ^ ^ |
  1505. * | C S |
  1506. * +------------------------------------------------------------------+
  1507. */
  1508. helper_test_hsdir_sync(ns, EARLY_IN_TP_TO_SRV, LATE_IN_SRV_TO_TP, 0);
  1509. /* e) Scenario where service is between SRV#N and TP#N, but client is
  1510. * between TP#N-1 and SRV#3. Client is backwards in time so it fetches
  1511. * the first HS desc.
  1512. *
  1513. * +------------------------------------------------------------------+
  1514. * | |
  1515. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1516. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1517. * | |
  1518. * | $==========|-----------$===========|-----------$===========| |
  1519. * | ^ ^ |
  1520. * | C S |
  1521. * +------------------------------------------------------------------+
  1522. */
  1523. helper_test_hsdir_sync(ns, EARLY_IN_SRV_TO_TP, LATE_IN_TP_TO_SRV, 0);
  1524. /* f) Scenario where service is between TP#N and SRV#N+1, but client is
  1525. * between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
  1526. * the second HS desc.
  1527. *
  1528. * +------------------------------------------------------------------+
  1529. * | |
  1530. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1531. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1532. * | |
  1533. * | $==========|-----------$===========|-----------$===========| |
  1534. * | ^ ^ |
  1535. * | S C |
  1536. * +------------------------------------------------------------------+
  1537. */
  1538. helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, EARLY_IN_SRV_TO_TP, 1);
  1539. done:
  1540. networkstatus_vote_free(ns);
  1541. nodelist_free_all();
  1542. hs_free_all();
  1543. }
  1544. struct testcase_t hs_common_tests[] = {
  1545. { "build_address", test_build_address, TT_FORK,
  1546. NULL, NULL },
  1547. { "validate_address", test_validate_address, TT_FORK,
  1548. NULL, NULL },
  1549. { "time_period", test_time_period, TT_FORK,
  1550. NULL, NULL },
  1551. { "start_time_of_next_time_period", test_start_time_of_next_time_period,
  1552. TT_FORK, NULL, NULL },
  1553. { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
  1554. NULL, NULL },
  1555. { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
  1556. NULL, NULL },
  1557. { "disaster_srv", test_disaster_srv, TT_FORK,
  1558. NULL, NULL },
  1559. { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
  1560. NULL, NULL },
  1561. { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
  1562. NULL, NULL },
  1563. { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
  1564. NULL, NULL },
  1565. { "reachability", test_reachability, TT_FORK,
  1566. NULL, NULL },
  1567. { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync,
  1568. TT_FORK, NULL, NULL },
  1569. { "hs_indexes", test_hs_indexes, TT_FORK,
  1570. NULL, NULL },
  1571. END_OF_TESTCASES
  1572. };