test_hs_common.c 60 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685
  1. /* Copyright (c) 2017, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file test_hs_common.c
  5. * \brief Test hidden service common functionalities.
  6. */
  7. #define HS_COMMON_PRIVATE
  8. #define HS_CLIENT_PRIVATE
  9. #define HS_SERVICE_PRIVATE
  10. #define NODELIST_PRIVATE
  11. #include "test.h"
  12. #include "test_helpers.h"
  13. #include "log_test_helpers.h"
  14. #include "hs_test_helpers.h"
  15. #include "connection_edge.h"
  16. #include "hs_common.h"
  17. #include "hs_client.h"
  18. #include "hs_service.h"
  19. #include "config.h"
  20. #include "networkstatus.h"
  21. #include "directory.h"
  22. #include "nodelist.h"
  23. #include "routerlist.h"
  24. #include "statefile.h"
  25. #include "circuitlist.h"
  26. #include "shared_random.h"
  27. #include "util.h"
  28. /** Test the validation of HS v3 addresses */
  29. static void
  30. test_validate_address(void *arg)
  31. {
  32. int ret;
  33. (void) arg;
  34. /* Address too short and too long. */
  35. setup_full_capture_of_logs(LOG_WARN);
  36. ret = hs_address_is_valid("blah");
  37. tt_int_op(ret, OP_EQ, 0);
  38. expect_log_msg_containing("has an invalid length");
  39. teardown_capture_of_logs();
  40. setup_full_capture_of_logs(LOG_WARN);
  41. ret = hs_address_is_valid(
  42. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
  43. tt_int_op(ret, OP_EQ, 0);
  44. expect_log_msg_containing("has an invalid length");
  45. teardown_capture_of_logs();
  46. /* Invalid checksum (taken from prop224) */
  47. setup_full_capture_of_logs(LOG_WARN);
  48. ret = hs_address_is_valid(
  49. "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
  50. tt_int_op(ret, OP_EQ, 0);
  51. expect_log_msg_containing("invalid checksum");
  52. teardown_capture_of_logs();
  53. setup_full_capture_of_logs(LOG_WARN);
  54. ret = hs_address_is_valid(
  55. "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
  56. tt_int_op(ret, OP_EQ, 0);
  57. expect_log_msg_containing("invalid checksum");
  58. teardown_capture_of_logs();
  59. /* Non base32 decodable string. */
  60. setup_full_capture_of_logs(LOG_WARN);
  61. ret = hs_address_is_valid(
  62. "????????????????????????????????????????????????????????");
  63. tt_int_op(ret, OP_EQ, 0);
  64. expect_log_msg_containing("can't be decoded");
  65. teardown_capture_of_logs();
  66. /* Valid address. */
  67. ret = hs_address_is_valid(
  68. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnad");
  69. tt_int_op(ret, OP_EQ, 1);
  70. done:
  71. ;
  72. }
  73. static int
  74. mock_write_str_to_file(const char *path, const char *str, int bin)
  75. {
  76. (void)bin;
  77. tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
  78. tt_str_op(str, OP_EQ,
  79. "ijbeeqscijbeeqscijbeeqscijbeeqscijbeeqscijbeeqscijbezhid.onion\n");
  80. done:
  81. return 0;
  82. }
  83. /** Test building HS v3 onion addresses */
  84. static void
  85. test_build_address(void *arg)
  86. {
  87. int ret;
  88. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  89. ed25519_public_key_t pubkey;
  90. hs_service_t *service = NULL;
  91. (void) arg;
  92. MOCK(write_str_to_file, mock_write_str_to_file);
  93. /* The following has been created with hs_build_address.py script that
  94. * follows proposal 224 specification to build an onion address. */
  95. static const char *test_addr =
  96. "ijbeeqscijbeeqscijbeeqscijbeeqscijbeeqscijbeeqscijbezhid";
  97. /* Let's try to build the same onion address that the script can do. Key is
  98. * a long set of very random \x42 :). */
  99. memset(&pubkey, '\x42', sizeof(pubkey));
  100. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  101. tt_str_op(test_addr, OP_EQ, onion_addr);
  102. /* Validate that address. */
  103. ret = hs_address_is_valid(onion_addr);
  104. tt_int_op(ret, OP_EQ, 1);
  105. service = tor_malloc_zero(sizeof(hs_service_t));
  106. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  107. tor_asprintf(&service->config.directory_path, "/double/five");
  108. ret = write_address_to_file(service, "squared");
  109. tt_int_op(ret, OP_EQ, 0);
  110. done:
  111. hs_service_free(service);
  112. }
  113. /** Test that our HS time period calculation functions work properly */
  114. static void
  115. test_time_period(void *arg)
  116. {
  117. (void) arg;
  118. uint64_t tn;
  119. int retval;
  120. time_t fake_time, correct_time, start_time;
  121. /* Let's do the example in prop224 section [TIME-PERIODS] */
  122. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  123. &fake_time);
  124. tt_int_op(retval, OP_EQ, 0);
  125. /* Check that the time period number is right */
  126. tn = hs_get_time_period_num(fake_time);
  127. tt_u64_op(tn, OP_EQ, 16903);
  128. /* Increase current time to 11:59:59 UTC and check that the time period
  129. number is still the same */
  130. fake_time += 3599;
  131. tn = hs_get_time_period_num(fake_time);
  132. tt_u64_op(tn, OP_EQ, 16903);
  133. { /* Check start time of next time period */
  134. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  135. &correct_time);
  136. tt_int_op(retval, OP_EQ, 0);
  137. start_time = hs_get_start_time_of_next_time_period(fake_time);
  138. tt_int_op(start_time, OP_EQ, correct_time);
  139. }
  140. /* Now take time to 12:00:00 UTC and check that the time period rotated */
  141. fake_time += 1;
  142. tn = hs_get_time_period_num(fake_time);
  143. tt_u64_op(tn, OP_EQ, 16904);
  144. /* Now also check our hs_get_next_time_period_num() function */
  145. tn = hs_get_next_time_period_num(fake_time);
  146. tt_u64_op(tn, OP_EQ, 16905);
  147. { /* Check start time of next time period again */
  148. retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
  149. &correct_time);
  150. tt_int_op(retval, OP_EQ, 0);
  151. start_time = hs_get_start_time_of_next_time_period(fake_time);
  152. tt_int_op(start_time, OP_EQ, correct_time);
  153. }
  154. /* Now do another sanity check: The time period number at the start of the
  155. * next time period, must be the same time period number as the one returned
  156. * from hs_get_next_time_period_num() */
  157. {
  158. time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
  159. tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
  160. hs_get_next_time_period_num(fake_time));
  161. }
  162. done:
  163. ;
  164. }
  165. /** Test that we can correctly find the start time of the next time period */
  166. static void
  167. test_start_time_of_next_time_period(void *arg)
  168. {
  169. (void) arg;
  170. int retval;
  171. time_t fake_time;
  172. char tbuf[ISO_TIME_LEN + 1];
  173. time_t next_tp_start_time;
  174. /* Do some basic tests */
  175. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  176. &fake_time);
  177. tt_int_op(retval, OP_EQ, 0);
  178. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  179. /* Compare it with the correct result */
  180. format_iso_time(tbuf, next_tp_start_time);
  181. tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
  182. /* Another test with an edge-case time (start of TP) */
  183. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  184. &fake_time);
  185. tt_int_op(retval, OP_EQ, 0);
  186. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  187. format_iso_time(tbuf, next_tp_start_time);
  188. tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
  189. {
  190. /* Now pretend we are on a testing network and alter the voting schedule to
  191. be every 10 seconds. This means that a time period has length 10*24
  192. seconds (4 minutes). It also means that we apply a rotational offset of
  193. 120 seconds to the time period, so that it starts at 00:02:00 instead of
  194. 00:00:00. */
  195. or_options_t *options = get_options_mutable();
  196. options->TestingTorNetwork = 1;
  197. options->V3AuthVotingInterval = 10;
  198. options->TestingV3AuthInitialVotingInterval = 10;
  199. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
  200. &fake_time);
  201. tt_int_op(retval, OP_EQ, 0);
  202. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  203. /* Compare it with the correct result */
  204. format_iso_time(tbuf, next_tp_start_time);
  205. tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
  206. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
  207. &fake_time);
  208. tt_int_op(retval, OP_EQ, 0);
  209. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  210. /* Compare it with the correct result */
  211. format_iso_time(tbuf, next_tp_start_time);
  212. tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
  213. }
  214. done:
  215. ;
  216. }
  217. /* Cleanup the global nodelist. It also frees the "md" in the node_t because
  218. * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
  219. static void
  220. cleanup_nodelist(void)
  221. {
  222. smartlist_t *nodelist = nodelist_get_list();
  223. SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
  224. tor_free(node->md);
  225. node->md = NULL;
  226. } SMARTLIST_FOREACH_END(node);
  227. nodelist_free_all();
  228. }
  229. static void
  230. helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
  231. int identity_idx,
  232. const char *nickname,
  233. int is_hsdir)
  234. {
  235. routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
  236. routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
  237. uint8_t identity[DIGEST_LEN];
  238. tor_addr_t ipv4_addr;
  239. memset(identity, identity_idx, sizeof(identity));
  240. memcpy(rs->identity_digest, identity, DIGEST_LEN);
  241. rs->is_hs_dir = is_hsdir;
  242. rs->supports_v3_hsdir = 1;
  243. strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
  244. tor_addr_parse(&ipv4_addr, "1.2.3.4");
  245. ri->addr = tor_addr_to_ipv4h(&ipv4_addr);
  246. rs->addr = tor_addr_to_ipv4h(&ipv4_addr);
  247. ri->nickname = tor_strdup(nickname);
  248. ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
  249. memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
  250. ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
  251. /* Needed for the HSDir index computation. */
  252. memset(&ri->cache_info.signing_key_cert->signing_key,
  253. identity_idx, ED25519_PUBKEY_LEN);
  254. tt_assert(nodelist_set_routerinfo(ri, NULL));
  255. node_t *node = node_get_mutable_by_id(ri->cache_info.identity_digest);
  256. tt_assert(node);
  257. node->rs = rs;
  258. /* We need this to exist for node_has_descriptor() to return true. */
  259. node->md = tor_malloc_zero(sizeof(microdesc_t));
  260. /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
  261. * the indexes which it doesn't have when it is called. */
  262. node_set_hsdir_index(node, ns);
  263. node->ri = NULL;
  264. smartlist_add(ns->routerstatus_list, rs);
  265. done:
  266. routerinfo_free(ri);
  267. }
  268. static networkstatus_t *mock_ns = NULL;
  269. static networkstatus_t *
  270. mock_networkstatus_get_latest_consensus(void)
  271. {
  272. time_t now = approx_time();
  273. /* If initialized, return it */
  274. if (mock_ns) {
  275. return mock_ns;
  276. }
  277. /* Initialize fake consensus */
  278. mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
  279. /* This consensus is live */
  280. mock_ns->valid_after = now-1;
  281. mock_ns->fresh_until = now+1;
  282. mock_ns->valid_until = now+2;
  283. /* Create routerstatus list */
  284. mock_ns->routerstatus_list = smartlist_new();
  285. mock_ns->type = NS_TYPE_CONSENSUS;
  286. return mock_ns;
  287. }
  288. static networkstatus_t *
  289. mock_networkstatus_get_live_consensus(time_t now)
  290. {
  291. (void) now;
  292. tt_assert(mock_ns);
  293. done:
  294. return mock_ns;
  295. }
  296. /** Test the responsible HSDirs calculation function */
  297. static void
  298. test_responsible_hsdirs(void *arg)
  299. {
  300. time_t now = approx_time();
  301. smartlist_t *responsible_dirs = smartlist_new();
  302. networkstatus_t *ns = NULL;
  303. int retval;
  304. (void) arg;
  305. hs_init();
  306. MOCK(networkstatus_get_latest_consensus,
  307. mock_networkstatus_get_latest_consensus);
  308. ns = networkstatus_get_latest_consensus();
  309. { /* First router: HSdir */
  310. helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
  311. }
  312. { /* Second HSDir */
  313. helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
  314. }
  315. { /* Third relay but not HSDir */
  316. helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
  317. }
  318. ed25519_keypair_t kp;
  319. retval = ed25519_keypair_generate(&kp, 0);
  320. tt_int_op(retval, OP_EQ , 0);
  321. uint64_t time_period_num = hs_get_time_period_num(now);
  322. hs_get_responsible_hsdirs(&kp.pubkey, time_period_num,
  323. 0, 0, responsible_dirs);
  324. /* Make sure that we only found 2 responsible HSDirs.
  325. * The third relay was not an hsdir! */
  326. tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
  327. /** TODO: Build a bigger network and do more tests here */
  328. done:
  329. SMARTLIST_FOREACH(ns->routerstatus_list,
  330. routerstatus_t *, rs, routerstatus_free(rs));
  331. smartlist_free(responsible_dirs);
  332. smartlist_clear(ns->routerstatus_list);
  333. networkstatus_vote_free(mock_ns);
  334. cleanup_nodelist();
  335. }
  336. static void
  337. mock_directory_initiate_request(directory_request_t *req)
  338. {
  339. (void)req;
  340. return;
  341. }
  342. static int
  343. mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
  344. const ed25519_keypair_t *signing_kp,
  345. char **encoded_out)
  346. {
  347. (void)desc;
  348. (void)signing_kp;
  349. tor_asprintf(encoded_out, "lulu");
  350. return 0;
  351. }
  352. static or_state_t dummy_state;
  353. /* Mock function to get fake or state (used for rev counters) */
  354. static or_state_t *
  355. get_or_state_replacement(void)
  356. {
  357. return &dummy_state;
  358. }
  359. static int
  360. mock_router_have_minimum_dir_info(void)
  361. {
  362. return 1;
  363. }
  364. /** Test that we correctly detect when the HSDir hash ring changes so that we
  365. * reupload our descriptor. */
  366. static void
  367. test_desc_reupload_logic(void *arg)
  368. {
  369. networkstatus_t *ns = NULL;
  370. (void) arg;
  371. hs_init();
  372. MOCK(router_have_minimum_dir_info,
  373. mock_router_have_minimum_dir_info);
  374. MOCK(get_or_state,
  375. get_or_state_replacement);
  376. MOCK(networkstatus_get_latest_consensus,
  377. mock_networkstatus_get_latest_consensus);
  378. MOCK(directory_initiate_request,
  379. mock_directory_initiate_request);
  380. MOCK(hs_desc_encode_descriptor,
  381. mock_hs_desc_encode_descriptor);
  382. ns = networkstatus_get_latest_consensus();
  383. /** Test logic:
  384. * 1) Upload descriptor to HSDirs
  385. * CHECK that previous_hsdirs list was populated.
  386. * 2) Then call router_dir_info_changed() without an HSDir set change.
  387. * CHECK that no reuplod occurs.
  388. * 3) Now change the HSDir set, and call dir_info_changed() again.
  389. * CHECK that reupload occurs.
  390. * 4) Finally call service_desc_schedule_upload().
  391. * CHECK that previous_hsdirs list was cleared.
  392. **/
  393. /* Let's start by building our descriptor and service */
  394. hs_service_descriptor_t *desc = service_descriptor_new();
  395. hs_service_t *service = NULL;
  396. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  397. ed25519_public_key_t pubkey;
  398. memset(&pubkey, '\x42', sizeof(pubkey));
  399. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  400. service = tor_malloc_zero(sizeof(hs_service_t));
  401. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  402. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  403. ed25519_public_key_generate(&service->keys.identity_pk,
  404. &service->keys.identity_sk);
  405. service->desc_current = desc;
  406. /* Also add service to service map */
  407. hs_service_ht *service_map = get_hs_service_map();
  408. tt_assert(service_map);
  409. tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
  410. register_service(service_map, service);
  411. tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
  412. /* Now let's create our hash ring: */
  413. {
  414. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  415. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  416. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  417. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  418. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  419. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  420. }
  421. /* Now let's upload our desc to all hsdirs */
  422. upload_descriptor_to_all(service, desc);
  423. /* Check that previous hsdirs were populated */
  424. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  425. /* Poison next upload time so that we can see if it was changed by
  426. * router_dir_info_changed(). No changes in hash ring so far, so the upload
  427. * time should stay as is. */
  428. desc->next_upload_time = 42;
  429. router_dir_info_changed();
  430. tt_int_op(desc->next_upload_time, OP_EQ, 42);
  431. /* Now change the HSDir hash ring by swapping nora for aaron.
  432. * Start by clearing the hash ring */
  433. {
  434. SMARTLIST_FOREACH(ns->routerstatus_list,
  435. routerstatus_t *, rs, routerstatus_free(rs));
  436. smartlist_clear(ns->routerstatus_list);
  437. cleanup_nodelist();
  438. routerlist_free_all();
  439. }
  440. { /* Now add back all the nodes */
  441. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  442. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  443. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  444. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  445. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  446. helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
  447. }
  448. /* Now call service_desc_hsdirs_changed() and see that it detected the hash
  449. ring change */
  450. time_t now = approx_time();
  451. tt_assert(now);
  452. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  453. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  454. /* Now order another upload and see that we keep having 6 prev hsdirs */
  455. upload_descriptor_to_all(service, desc);
  456. /* Check that previous hsdirs were populated */
  457. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  458. /* Now restore the HSDir hash ring to its original state by swapping back
  459. aaron for nora */
  460. /* First clear up the hash ring */
  461. {
  462. SMARTLIST_FOREACH(ns->routerstatus_list,
  463. routerstatus_t *, rs, routerstatus_free(rs));
  464. smartlist_clear(ns->routerstatus_list);
  465. cleanup_nodelist();
  466. routerlist_free_all();
  467. }
  468. { /* Now populate the hash ring again */
  469. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  470. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  471. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  472. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  473. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  474. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  475. }
  476. /* Check that our algorithm catches this change of hsdirs */
  477. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  478. /* Now pretend that the descriptor changed, and order a reupload to all
  479. HSDirs. Make sure that the set of previous HSDirs was cleared. */
  480. service_desc_schedule_upload(desc, now, 1);
  481. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
  482. /* Now reupload again: see that the prev hsdir set got populated again. */
  483. upload_descriptor_to_all(service, desc);
  484. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  485. done:
  486. SMARTLIST_FOREACH(ns->routerstatus_list,
  487. routerstatus_t *, rs, routerstatus_free(rs));
  488. smartlist_clear(ns->routerstatus_list);
  489. networkstatus_vote_free(ns);
  490. cleanup_nodelist();
  491. hs_free_all();
  492. }
  493. /** Test disaster SRV computation and caching */
  494. static void
  495. test_disaster_srv(void *arg)
  496. {
  497. uint8_t *cached_disaster_srv_one = NULL;
  498. uint8_t *cached_disaster_srv_two = NULL;
  499. uint8_t srv_one[DIGEST256_LEN] = {0};
  500. uint8_t srv_two[DIGEST256_LEN] = {0};
  501. uint8_t srv_three[DIGEST256_LEN] = {0};
  502. uint8_t srv_four[DIGEST256_LEN] = {0};
  503. uint8_t srv_five[DIGEST256_LEN] = {0};
  504. (void) arg;
  505. /* Get the cached SRVs: we gonna use them later for verification */
  506. cached_disaster_srv_one = get_first_cached_disaster_srv();
  507. cached_disaster_srv_two = get_second_cached_disaster_srv();
  508. /* Compute some srvs */
  509. get_disaster_srv(1, srv_one);
  510. get_disaster_srv(2, srv_two);
  511. /* Check that the cached ones where updated */
  512. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  513. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  514. /* Ask for an SRV that has already been computed */
  515. get_disaster_srv(2, srv_two);
  516. /* and check that the cache entries have not changed */
  517. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  518. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  519. /* Ask for a new SRV */
  520. get_disaster_srv(3, srv_three);
  521. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  522. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  523. /* Ask for another SRV: none of the original SRVs should now be cached */
  524. get_disaster_srv(4, srv_four);
  525. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  526. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  527. /* Ask for yet another SRV */
  528. get_disaster_srv(5, srv_five);
  529. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
  530. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  531. done:
  532. ;
  533. }
  534. /** Test our HS descriptor request tracker by making various requests and
  535. * checking whether they get tracked properly. */
  536. static void
  537. test_hid_serv_request_tracker(void *arg)
  538. {
  539. (void) arg;
  540. time_t retval;
  541. routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
  542. time_t now = approx_time();
  543. const char *req_key_str_first =
  544. "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  545. const char *req_key_str_second =
  546. "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  547. const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
  548. /*************************** basic test *******************************/
  549. /* Get request tracker and make sure it's empty */
  550. strmap_t *request_tracker = get_last_hid_serv_requests();
  551. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  552. /* Let's register a hid serv request */
  553. hsdir = tor_malloc_zero(sizeof(routerstatus_t));
  554. memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
  555. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  556. now, 1);
  557. tt_int_op(retval, OP_EQ, now);
  558. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  559. /* Let's lookup a non-existent hidserv request */
  560. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
  561. now+1, 0);
  562. tt_int_op(retval, OP_EQ, 0);
  563. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  564. /* Let's lookup a real hidserv request */
  565. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  566. now+2, 0);
  567. tt_int_op(retval, OP_EQ, now); /* we got it */
  568. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  569. /**********************************************************************/
  570. /* Let's add another request for the same HS but on a different HSDir. */
  571. hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
  572. memset(hsdir2->identity_digest, 2, DIGEST_LEN);
  573. retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
  574. now+3, 1);
  575. tt_int_op(retval, OP_EQ, now+3);
  576. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  577. /* Check that we can clean the first request based on time */
  578. hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
  579. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  580. /* Check that it doesn't exist anymore */
  581. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  582. now+2, 0);
  583. tt_int_op(retval, OP_EQ, 0);
  584. /* Now let's add a smaller req key str */
  585. hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
  586. memset(hsdir3->identity_digest, 3, DIGEST_LEN);
  587. retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
  588. now+4, 1);
  589. tt_int_op(retval, OP_EQ, now+4);
  590. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  591. /*************************** deleting entries **************************/
  592. /* Add another request with very short key */
  593. retval = hs_lookup_last_hid_serv_request(hsdir, "l", now, 1);
  594. tt_int_op(retval, OP_EQ, now);
  595. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  596. /* Try deleting entries with a dummy key. Check that our previous requests
  597. * are still there */
  598. tor_capture_bugs_(1);
  599. hs_purge_hid_serv_from_last_hid_serv_requests("a");
  600. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  601. tor_end_capture_bugs_();
  602. /* Try another dummy key. Check that requests are still there */
  603. {
  604. char dummy[2000];
  605. memset(dummy, 'Z', 2000);
  606. dummy[1999] = '\x00';
  607. hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
  608. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  609. }
  610. /* Another dummy key! */
  611. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
  612. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  613. /* Now actually delete a request! */
  614. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
  615. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  616. /* Purge it all! */
  617. hs_purge_last_hid_serv_requests();
  618. request_tracker = get_last_hid_serv_requests();
  619. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  620. done:
  621. tor_free(hsdir);
  622. tor_free(hsdir2);
  623. tor_free(hsdir3);
  624. }
  625. static void
  626. test_parse_extended_hostname(void *arg)
  627. {
  628. (void) arg;
  629. char address1[] = "fooaddress.onion";
  630. char address2[] = "aaaaaaaaaaaaaaaa.onion";
  631. char address3[] = "fooaddress.exit";
  632. char address4[] = "www.torproject.org";
  633. char address5[] = "foo.abcdefghijklmnop.onion";
  634. char address6[] = "foo.bar.abcdefghijklmnop.onion";
  635. char address7[] = ".abcdefghijklmnop.onion";
  636. char address8[] =
  637. "www.p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnad.onion";
  638. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address1));
  639. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address2));
  640. tt_str_op(address2,OP_EQ, "aaaaaaaaaaaaaaaa");
  641. tt_assert(EXIT_HOSTNAME == parse_extended_hostname(address3));
  642. tt_assert(NORMAL_HOSTNAME == parse_extended_hostname(address4));
  643. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address5));
  644. tt_str_op(address5,OP_EQ, "abcdefghijklmnop");
  645. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address6));
  646. tt_str_op(address6,OP_EQ, "abcdefghijklmnop");
  647. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address7));
  648. tt_assert(ONION_V3_HOSTNAME == parse_extended_hostname(address8));
  649. tt_str_op(address8, OP_EQ,
  650. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnad");
  651. done: ;
  652. }
  653. static void
  654. test_time_between_tp_and_srv(void *arg)
  655. {
  656. int ret;
  657. networkstatus_t ns;
  658. (void) arg;
  659. /* This function should be returning true where "^" are:
  660. *
  661. * +------------------------------------------------------------------+
  662. * | |
  663. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  664. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  665. * | |
  666. * | $==========|-----------$===========|-----------$===========| |
  667. * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
  668. * | |
  669. * +------------------------------------------------------------------+
  670. */
  671. ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
  672. tt_int_op(ret, OP_EQ, 0);
  673. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  674. tt_int_op(ret, OP_EQ, 0);
  675. ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
  676. tt_int_op(ret, OP_EQ, 0);
  677. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  678. tt_int_op(ret, OP_EQ, 0);
  679. ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
  680. tt_int_op(ret, OP_EQ, 0);
  681. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  682. tt_int_op(ret, OP_EQ, 1);
  683. ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
  684. tt_int_op(ret, OP_EQ, 0);
  685. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  686. tt_int_op(ret, OP_EQ, 1);
  687. ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
  688. tt_int_op(ret, OP_EQ, 0);
  689. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  690. tt_int_op(ret, OP_EQ, 0);
  691. done:
  692. ;
  693. }
  694. /************ Reachability Test (it is huge) ****************/
  695. /* Simulate different consensus for client and service. Used by the
  696. * reachability test. The SRV and responsible HSDir list are used by all
  697. * reachability tests so make them common to simplify setup and teardown. */
  698. static networkstatus_t *mock_service_ns = NULL;
  699. static networkstatus_t *mock_client_ns = NULL;
  700. static sr_srv_t current_srv, previous_srv;
  701. static smartlist_t *service_responsible_hsdirs = NULL;
  702. static smartlist_t *client_responsible_hsdirs = NULL;
  703. static networkstatus_t *
  704. mock_networkstatus_get_live_consensus_service(time_t now)
  705. {
  706. (void) now;
  707. if (mock_service_ns) {
  708. return mock_service_ns;
  709. }
  710. mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
  711. mock_service_ns->routerstatus_list = smartlist_new();
  712. mock_service_ns->type = NS_TYPE_CONSENSUS;
  713. return mock_service_ns;
  714. }
  715. static networkstatus_t *
  716. mock_networkstatus_get_latest_consensus_service(void)
  717. {
  718. return mock_networkstatus_get_live_consensus_service(0);
  719. }
  720. static networkstatus_t *
  721. mock_networkstatus_get_live_consensus_client(time_t now)
  722. {
  723. (void) now;
  724. if (mock_client_ns) {
  725. return mock_client_ns;
  726. }
  727. mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
  728. mock_client_ns->routerstatus_list = smartlist_new();
  729. mock_client_ns->type = NS_TYPE_CONSENSUS;
  730. return mock_client_ns;
  731. }
  732. static networkstatus_t *
  733. mock_networkstatus_get_latest_consensus_client(void)
  734. {
  735. return mock_networkstatus_get_live_consensus_client(0);
  736. }
  737. /* Mock function because we are not trying to test the close circuit that does
  738. * an awful lot of checks on the circuit object. */
  739. static void
  740. mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
  741. const char *file)
  742. {
  743. (void) circ;
  744. (void) reason;
  745. (void) line;
  746. (void) file;
  747. return;
  748. }
  749. /* Initialize a big HSDir V3 hash ring. */
  750. static void
  751. helper_initialize_big_hash_ring(networkstatus_t *ns)
  752. {
  753. int ret;
  754. /* Generate 250 hsdirs! :) */
  755. for (int counter = 1 ; counter < 251 ; counter++) {
  756. /* Let's generate random nickname for each hsdir... */
  757. char nickname_binary[8];
  758. char nickname_str[13] = {0};
  759. crypto_rand(nickname_binary, sizeof(nickname_binary));
  760. ret = base64_encode(nickname_str, sizeof(nickname_str),
  761. nickname_binary, sizeof(nickname_binary), 0);
  762. tt_int_op(ret, OP_EQ, 12);
  763. helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
  764. }
  765. /* Make sure we have 200 hsdirs in our list */
  766. tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
  767. done:
  768. ;
  769. }
  770. /** Initialize service and publish its descriptor as needed. Return the newly
  771. * allocated service object to the caller. */
  772. static hs_service_t *
  773. helper_init_service(time_t now)
  774. {
  775. int retval;
  776. hs_service_t *service = hs_service_new(get_options());
  777. tt_assert(service);
  778. service->config.version = HS_VERSION_THREE;
  779. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  780. ed25519_public_key_generate(&service->keys.identity_pk,
  781. &service->keys.identity_sk);
  782. /* Register service to global map. */
  783. retval = register_service(get_hs_service_map(), service);
  784. tt_int_op(retval, OP_EQ, 0);
  785. /* Initialize service descriptor */
  786. build_all_descriptors(now);
  787. tt_assert(service->desc_current);
  788. tt_assert(service->desc_next);
  789. done:
  790. return service;
  791. }
  792. /* Helper function to set the RFC 1123 time string into t. */
  793. static void
  794. set_consensus_times(const char *time, time_t *t)
  795. {
  796. tt_assert(time);
  797. tt_assert(t);
  798. int ret = parse_rfc1123_time(time, t);
  799. tt_int_op(ret, OP_EQ, 0);
  800. done:
  801. return;
  802. }
  803. /* Helper function to cleanup the mock consensus (client and service) */
  804. static void
  805. cleanup_mock_ns(void)
  806. {
  807. if (mock_service_ns) {
  808. SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
  809. routerstatus_t *, rs, routerstatus_free(rs));
  810. smartlist_clear(mock_service_ns->routerstatus_list);
  811. mock_service_ns->sr_info.current_srv = NULL;
  812. mock_service_ns->sr_info.previous_srv = NULL;
  813. networkstatus_vote_free(mock_service_ns);
  814. mock_service_ns = NULL;
  815. }
  816. if (mock_client_ns) {
  817. SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
  818. routerstatus_t *, rs, routerstatus_free(rs));
  819. smartlist_clear(mock_client_ns->routerstatus_list);
  820. mock_client_ns->sr_info.current_srv = NULL;
  821. mock_client_ns->sr_info.previous_srv = NULL;
  822. networkstatus_vote_free(mock_client_ns);
  823. mock_client_ns = NULL;
  824. }
  825. }
  826. /* Helper function to setup a reachability test. Once called, the
  827. * cleanup_reachability_test MUST be called at the end. */
  828. static void
  829. setup_reachability_test(void)
  830. {
  831. MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
  832. MOCK(get_or_state, get_or_state_replacement);
  833. hs_init();
  834. /* Baseline to start with. */
  835. memset(&current_srv, 0, sizeof(current_srv));
  836. memset(&previous_srv, 1, sizeof(previous_srv));
  837. /* Initialize the consensuses. */
  838. mock_networkstatus_get_latest_consensus_service();
  839. mock_networkstatus_get_latest_consensus_client();
  840. service_responsible_hsdirs = smartlist_new();
  841. client_responsible_hsdirs = smartlist_new();
  842. }
  843. /* Helper function to cleanup a reachability test initial setup. */
  844. static void
  845. cleanup_reachability_test(void)
  846. {
  847. smartlist_free(service_responsible_hsdirs);
  848. service_responsible_hsdirs = NULL;
  849. smartlist_free(client_responsible_hsdirs);
  850. client_responsible_hsdirs = NULL;
  851. hs_free_all();
  852. cleanup_mock_ns();
  853. UNMOCK(get_or_state);
  854. UNMOCK(circuit_mark_for_close_);
  855. }
  856. /* A reachability test always check if the resulting service and client
  857. * responsible HSDir for the given parameters are equal.
  858. *
  859. * Return true iff the same exact nodes are in both list. */
  860. static int
  861. are_responsible_hsdirs_equal(void)
  862. {
  863. int count = 0;
  864. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  865. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 6);
  866. SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
  867. const routerstatus_t *, c_rs) {
  868. SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
  869. const routerstatus_t *, s_rs) {
  870. if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
  871. DIGEST_LEN)) {
  872. count++;
  873. break;
  874. }
  875. } SMARTLIST_FOREACH_END(s_rs);
  876. } SMARTLIST_FOREACH_END(c_rs);
  877. done:
  878. return (count == 6);
  879. }
  880. /* Tor doesn't use such a function to get the previous HSDir, it is only used
  881. * in node_set_hsdir_index(). We need it here so we can test the reachability
  882. * scenario 6 that requires the previous time period to compute the list of
  883. * responsible HSDir because of the client state timing. */
  884. static uint64_t
  885. get_previous_time_period(time_t now)
  886. {
  887. return hs_get_time_period_num(now) - 1;
  888. }
  889. /* Configuration of a reachability test scenario. */
  890. typedef struct reachability_cfg_t {
  891. /* Consensus timings to be set. They have to be compliant with
  892. * RFC 1123 time format. */
  893. const char *service_valid_after;
  894. const char *service_valid_until;
  895. const char *client_valid_after;
  896. const char *client_valid_until;
  897. /* SRVs that the service and client should use. */
  898. sr_srv_t *service_current_srv;
  899. sr_srv_t *service_previous_srv;
  900. sr_srv_t *client_current_srv;
  901. sr_srv_t *client_previous_srv;
  902. /* A time period function for the service to use for this scenario. For a
  903. * successful reachability test, the client always use the current time
  904. * period thus why no client function. */
  905. uint64_t (*service_time_period_fn)(time_t);
  906. /* Is the client and service expected to be in a new time period. After
  907. * setting the consensus time, the reachability test checks
  908. * hs_in_period_between_tp_and_srv() and test the returned value against
  909. * this. */
  910. unsigned int service_in_new_tp;
  911. unsigned int client_in_new_tp;
  912. /* Some scenario requires a hint that the client, because of its consensus
  913. * time, will request the "next" service descriptor so this indicates if it
  914. * is the case or not. */
  915. unsigned int client_fetch_next_desc;
  916. } reachability_cfg_t;
  917. /* Some defines to help with semantic while reading a configuration below. */
  918. #define NOT_IN_NEW_TP 0
  919. #define IN_NEW_TP 1
  920. #define DONT_NEED_NEXT_DESC 0
  921. #define NEED_NEXT_DESC 1
  922. static reachability_cfg_t reachability_scenarios[] = {
  923. /* Scenario 1
  924. *
  925. * +------------------------------------------------------------------+
  926. * | |
  927. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  928. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  929. * | |
  930. * | $==========|-----------$===========|-----------$===========| |
  931. * | ^ ^ |
  932. * | S C |
  933. * +------------------------------------------------------------------+
  934. *
  935. * S: Service, C: Client
  936. *
  937. * Service consensus valid_after time is set to 13:00 and client to 15:00,
  938. * both are after TP#1 thus have access to SRV#1. Service and client should
  939. * be using TP#1.
  940. */
  941. { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
  942. "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
  943. "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
  944. "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
  945. &current_srv, NULL, /* Service current and previous SRV */
  946. &current_srv, NULL, /* Client current and previous SRV */
  947. hs_get_time_period_num, /* Service time period function. */
  948. IN_NEW_TP, /* Is service in new TP? */
  949. IN_NEW_TP, /* Is client in new TP? */
  950. NEED_NEXT_DESC },
  951. /* Scenario 2
  952. *
  953. * +------------------------------------------------------------------+
  954. * | |
  955. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  956. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  957. * | |
  958. * | $==========|-----------$===========|-----------$===========| |
  959. * | ^ ^ |
  960. * | S C |
  961. * +------------------------------------------------------------------+
  962. *
  963. * S: Service, C: Client
  964. *
  965. * Service consensus valid_after time is set to 23:00 and client to 01:00,
  966. * which makes the client after the SRV#2 and the service just before. The
  967. * service should only be using TP#1. The client should be using TP#1.
  968. */
  969. { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
  970. "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
  971. "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
  972. "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
  973. &previous_srv, NULL, /* Service current and previous SRV */
  974. &current_srv, &previous_srv, /* Client current and previous SRV */
  975. hs_get_time_period_num, /* Service time period function. */
  976. IN_NEW_TP, /* Is service in new TP? */
  977. NOT_IN_NEW_TP, /* Is client in new TP? */
  978. NEED_NEXT_DESC },
  979. /* Scenario 3
  980. *
  981. * +------------------------------------------------------------------+
  982. * | |
  983. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  984. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  985. * | |
  986. * | $==========|-----------$===========|----------$===========| |
  987. * | ^ ^ |
  988. * | S C |
  989. * +------------------------------------------------------------------+
  990. *
  991. * S: Service, C: Client
  992. *
  993. * Service consensus valid_after time is set to 03:00 and client to 05:00,
  994. * which makes both after SRV#2. The service should be using TP#1 as its
  995. * current time period. The client should be using TP#1.
  996. */
  997. { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
  998. "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
  999. "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
  1000. "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
  1001. &current_srv, &previous_srv, /* Service current and previous SRV */
  1002. &current_srv, &previous_srv, /* Client current and previous SRV */
  1003. hs_get_time_period_num, /* Service time period function. */
  1004. NOT_IN_NEW_TP, /* Is service in new TP? */
  1005. NOT_IN_NEW_TP, /* Is client in new TP? */
  1006. DONT_NEED_NEXT_DESC },
  1007. /* Scenario 4
  1008. *
  1009. * +------------------------------------------------------------------+
  1010. * | |
  1011. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1012. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1013. * | |
  1014. * | $==========|-----------$===========|-----------$===========| |
  1015. * | ^ ^ |
  1016. * | S C |
  1017. * +------------------------------------------------------------------+
  1018. *
  1019. * S: Service, C: Client
  1020. *
  1021. * Service consensus valid_after time is set to 11:00 and client to 13:00,
  1022. * which makes the service before TP#2 and the client just after. The
  1023. * service should be using TP#1 as its current time period and TP#2 as the
  1024. * next. The client should be using TP#2 time period.
  1025. */
  1026. { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
  1027. "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
  1028. "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
  1029. "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
  1030. &current_srv, &previous_srv, /* Service current and previous SRV */
  1031. &current_srv, &previous_srv, /* Client current and previous SRV */
  1032. hs_get_next_time_period_num, /* Service time period function. */
  1033. NOT_IN_NEW_TP, /* Is service in new TP? */
  1034. IN_NEW_TP, /* Is client in new TP? */
  1035. NEED_NEXT_DESC },
  1036. /* Scenario 5
  1037. *
  1038. * +------------------------------------------------------------------+
  1039. * | |
  1040. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1041. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1042. * | |
  1043. * | $==========|-----------$===========|-----------$===========| |
  1044. * | ^ ^ |
  1045. * | C S |
  1046. * +------------------------------------------------------------------+
  1047. *
  1048. * S: Service, C: Client
  1049. *
  1050. * Service consensus valid_after time is set to 01:00 and client to 23:00,
  1051. * which makes the service after SRV#2 and the client just before. The
  1052. * service should be using TP#1 as its current time period and TP#2 as the
  1053. * next. The client should be using TP#1 time period.
  1054. */
  1055. { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
  1056. "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
  1057. "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
  1058. "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
  1059. &current_srv, &previous_srv, /* Service current and previous SRV */
  1060. &previous_srv, NULL, /* Client current and previous SRV */
  1061. hs_get_time_period_num, /* Service time period function. */
  1062. NOT_IN_NEW_TP, /* Is service in new TP? */
  1063. IN_NEW_TP, /* Is client in new TP? */
  1064. DONT_NEED_NEXT_DESC },
  1065. /* Scenario 6
  1066. *
  1067. * +------------------------------------------------------------------+
  1068. * | |
  1069. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1070. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1071. * | |
  1072. * | $==========|-----------$===========|-----------$===========| |
  1073. * | ^ ^ |
  1074. * | C S |
  1075. * +------------------------------------------------------------------+
  1076. *
  1077. * S: Service, C: Client
  1078. *
  1079. * Service consensus valid_after time is set to 13:00 and client to 11:00,
  1080. * which makes the service outside after TP#2 and the client just before.
  1081. * The service should be using TP#1 as its current time period and TP#2 as
  1082. * its next. The client should be using TP#1 time period.
  1083. */
  1084. { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
  1085. "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
  1086. "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
  1087. "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
  1088. &current_srv, &previous_srv, /* Service current and previous SRV */
  1089. &current_srv, &previous_srv, /* Client current and previous SRV */
  1090. get_previous_time_period, /* Service time period function. */
  1091. IN_NEW_TP, /* Is service in new TP? */
  1092. NOT_IN_NEW_TP, /* Is client in new TP? */
  1093. DONT_NEED_NEXT_DESC },
  1094. /* End marker. */
  1095. { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
  1096. };
  1097. /* Run a single reachability scenario. num_scenario is the corresponding
  1098. * scenario number from the documentation. It is used to log it in case of
  1099. * failure so we know which scenario fails. */
  1100. static int
  1101. run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
  1102. {
  1103. int ret = -1;
  1104. hs_service_t *service;
  1105. uint64_t service_tp, client_tp;
  1106. ed25519_public_key_t service_blinded_pk, client_blinded_pk;
  1107. setup_reachability_test();
  1108. tt_assert(cfg);
  1109. /* Set service consensus time. */
  1110. set_consensus_times(cfg->service_valid_after,
  1111. &mock_service_ns->valid_after);
  1112. set_consensus_times(cfg->service_valid_until,
  1113. &mock_service_ns->valid_until);
  1114. set_consensus_times(cfg->service_valid_until,
  1115. &mock_service_ns->fresh_until);
  1116. /* Set client consensus time. */
  1117. set_consensus_times(cfg->client_valid_after,
  1118. &mock_client_ns->valid_after);
  1119. set_consensus_times(cfg->client_valid_until,
  1120. &mock_client_ns->valid_until);
  1121. set_consensus_times(cfg->client_valid_until,
  1122. &mock_client_ns->fresh_until);
  1123. /* New time period checks for this scenario. */
  1124. tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
  1125. cfg->service_in_new_tp);
  1126. tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
  1127. cfg->client_in_new_tp);
  1128. /* Set the SRVs for this scenario. */
  1129. mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
  1130. mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
  1131. mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
  1132. mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
  1133. /* Initialize a service to get keys. */
  1134. service = helper_init_service(time(NULL));
  1135. /*
  1136. * === Client setup ===
  1137. */
  1138. MOCK(networkstatus_get_live_consensus,
  1139. mock_networkstatus_get_live_consensus_client);
  1140. MOCK(networkstatus_get_latest_consensus,
  1141. mock_networkstatus_get_latest_consensus_client);
  1142. /* Make networkstatus_is_live() happy. */
  1143. update_approx_time(mock_client_ns->valid_after);
  1144. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1145. helper_initialize_big_hash_ring(mock_client_ns);
  1146. /* Client ONLY use the current time period. This is the whole point of these
  1147. * reachability test that is to make sure the client can always reach the
  1148. * service using only its current time period. */
  1149. client_tp = hs_get_time_period_num(0);
  1150. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1151. client_tp, &client_blinded_pk);
  1152. hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
  1153. client_responsible_hsdirs);
  1154. /* Cleanup the nodelist so we can let the service computes its own set of
  1155. * node with its own hashring. */
  1156. cleanup_nodelist();
  1157. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  1158. UNMOCK(networkstatus_get_latest_consensus);
  1159. UNMOCK(networkstatus_get_live_consensus);
  1160. /*
  1161. * === Service setup ===
  1162. */
  1163. MOCK(networkstatus_get_live_consensus,
  1164. mock_networkstatus_get_live_consensus_service);
  1165. MOCK(networkstatus_get_latest_consensus,
  1166. mock_networkstatus_get_latest_consensus_service);
  1167. /* Make networkstatus_is_live() happy. */
  1168. update_approx_time(mock_service_ns->valid_after);
  1169. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1170. helper_initialize_big_hash_ring(mock_service_ns);
  1171. service_tp = cfg->service_time_period_fn(0);
  1172. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1173. service_tp, &service_blinded_pk);
  1174. /* A service builds two lists of responsible HSDir, for the current and the
  1175. * next descriptor. Depending on the scenario, the client timing indicate if
  1176. * it is fetching the current or the next descriptor so we use the
  1177. * "client_fetch_next_desc" to know which one the client is trying to get to
  1178. * confirm that the service computes the same hashring for the same blinded
  1179. * key and service time period function. */
  1180. hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
  1181. cfg->client_fetch_next_desc, 0,
  1182. service_responsible_hsdirs);
  1183. cleanup_nodelist();
  1184. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 6);
  1185. UNMOCK(networkstatus_get_latest_consensus);
  1186. UNMOCK(networkstatus_get_live_consensus);
  1187. /* Some testing of the values we just got from the client and service. */
  1188. tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
  1189. ED25519_PUBKEY_LEN);
  1190. tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
  1191. /* Everything went well. */
  1192. ret = 0;
  1193. done:
  1194. cleanup_reachability_test();
  1195. if (ret == -1) {
  1196. /* Do this so we can know which scenario failed. */
  1197. char msg[32];
  1198. tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
  1199. tt_fail_msg(msg);
  1200. }
  1201. return ret;
  1202. }
  1203. static void
  1204. test_reachability(void *arg)
  1205. {
  1206. (void) arg;
  1207. /* NOTE: An important axiom to understand here is that SRV#N must only be
  1208. * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
  1209. * together. The HSDir index computation is based on this axiom.*/
  1210. for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
  1211. int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
  1212. if (ret < 0) {
  1213. return;
  1214. }
  1215. }
  1216. }
  1217. /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
  1218. * its identity digest in <b>hsdir_digest_out</b>. */
  1219. static void
  1220. helper_client_pick_hsdir(const ed25519_public_key_t *onion_identity_pk,
  1221. char *hsdir_digest_out)
  1222. {
  1223. tt_assert(onion_identity_pk);
  1224. routerstatus_t *client_hsdir = pick_hsdir_v3(onion_identity_pk);
  1225. tt_assert(client_hsdir);
  1226. digest_to_base64(hsdir_digest_out, client_hsdir->identity_digest);
  1227. done:
  1228. ;
  1229. }
  1230. /** Set the consensus and system time based on <b>between_srv_and_tp</b>. If
  1231. * <b>between_srv_and_tp</b> is set, then set the time to be inside the time
  1232. * segment between SRV#N and TP#N. */
  1233. static time_t
  1234. helper_set_consensus_and_system_time(networkstatus_t *ns,
  1235. int between_srv_and_tp)
  1236. {
  1237. time_t real_time;
  1238. /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
  1239. * valid_after is what matters here, the rest is just to specify the voting
  1240. * period correctly. */
  1241. if (between_srv_and_tp) {
  1242. parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns->valid_after);
  1243. parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns->fresh_until);
  1244. parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->valid_until);
  1245. } else {
  1246. parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns->valid_after);
  1247. parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->fresh_until);
  1248. parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns->valid_until);
  1249. }
  1250. /* Set system time: pretend to be just 2 minutes before consensus expiry */
  1251. real_time = ns->valid_until - 120;
  1252. update_approx_time(real_time);
  1253. return real_time;
  1254. }
  1255. /** Helper function that carries out the actual test for
  1256. * test_client_service_sync() */
  1257. static void
  1258. helper_test_hsdir_sync(networkstatus_t *ns,
  1259. int service_between_srv_and_tp,
  1260. int client_between_srv_and_tp,
  1261. int client_fetches_next_desc)
  1262. {
  1263. hs_service_descriptor_t *desc;
  1264. int retval;
  1265. /** Test logic:
  1266. * 1) Initialize service time: consensus and system time.
  1267. * 1.1) Initialize service hash ring
  1268. * 2) Initialize service and publish descriptors.
  1269. * 3) Initialize client time: consensus and system time.
  1270. * 3.1) Initialize client hash ring
  1271. * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
  1272. * the client was also picked by service.
  1273. */
  1274. /* 1) Initialize service time: consensus and real time */
  1275. time_t now = helper_set_consensus_and_system_time(ns,
  1276. service_between_srv_and_tp);
  1277. helper_initialize_big_hash_ring(ns);
  1278. /* 2) Initialize service */
  1279. hs_service_t *service = helper_init_service(now);
  1280. desc = client_fetches_next_desc ? service->desc_next : service->desc_current;
  1281. /* Now let's upload our desc to all hsdirs */
  1282. upload_descriptor_to_all(service, desc);
  1283. /* Cleanup right now so we don't memleak on error. */
  1284. cleanup_nodelist();
  1285. /* Check that previous hsdirs were populated */
  1286. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  1287. /* 3) Initialize client time */
  1288. helper_set_consensus_and_system_time(ns, client_between_srv_and_tp);
  1289. cleanup_nodelist();
  1290. SMARTLIST_FOREACH(ns->routerstatus_list,
  1291. routerstatus_t *, rs, routerstatus_free(rs));
  1292. smartlist_clear(ns->routerstatus_list);
  1293. helper_initialize_big_hash_ring(ns);
  1294. /* 4) Fetch desc as client */
  1295. char client_hsdir_b64_digest[BASE64_DIGEST_LEN+1] = {0};
  1296. helper_client_pick_hsdir(&service->keys.identity_pk,
  1297. client_hsdir_b64_digest);
  1298. /* Cleanup right now so we don't memleak on error. */
  1299. cleanup_nodelist();
  1300. /* CHECK: Go through the hsdirs chosen by the service and make sure that it
  1301. * contains the one picked by the client! */
  1302. retval = smartlist_contains_string(desc->previous_hsdirs,
  1303. client_hsdir_b64_digest);
  1304. tt_int_op(retval, OP_EQ, 1);
  1305. done:
  1306. /* At the end: free all services and initialize the subsystem again, we will
  1307. * need it for next scenario. */
  1308. hs_service_free_all();
  1309. hs_service_init();
  1310. SMARTLIST_FOREACH(ns->routerstatus_list,
  1311. routerstatus_t *, rs, routerstatus_free(rs));
  1312. smartlist_clear(ns->routerstatus_list);
  1313. }
  1314. /** This test ensures that client and service will pick the same HSDirs, under
  1315. * various timing scenarios:
  1316. * a) Scenario where both client and service are in the time segment between
  1317. * SRV#N and TP#N:
  1318. * b) Scenario where both client and service are in the time segment between
  1319. * TP#N and SRV#N+1.
  1320. * c) Scenario where service is between SRV#N and TP#N, but client is between
  1321. * TP#N and SRV#N+1.
  1322. * d) Scenario where service is between TP#N and SRV#N+1, but client is
  1323. * between SRV#N and TP#N.
  1324. *
  1325. * This test is important because it tests that upload_descriptor_to_all() is
  1326. * in synch with pick_hsdir_v3(). That's not the case for the
  1327. * test_reachability() test which only compares the responsible hsdir sets.
  1328. */
  1329. static void
  1330. test_client_service_hsdir_set_sync(void *arg)
  1331. {
  1332. networkstatus_t *ns = NULL;
  1333. (void) arg;
  1334. MOCK(networkstatus_get_latest_consensus,
  1335. mock_networkstatus_get_latest_consensus);
  1336. MOCK(networkstatus_get_live_consensus,
  1337. mock_networkstatus_get_live_consensus);
  1338. MOCK(get_or_state,
  1339. get_or_state_replacement);
  1340. MOCK(hs_desc_encode_descriptor,
  1341. mock_hs_desc_encode_descriptor);
  1342. MOCK(directory_initiate_request,
  1343. mock_directory_initiate_request);
  1344. hs_init();
  1345. /* Initialize a big hash ring: we want it to be big so that client and
  1346. * service cannot accidentally select the same HSDirs */
  1347. ns = networkstatus_get_latest_consensus();
  1348. tt_assert(ns);
  1349. /** Now test the various synch scenarios. See the helper function for more
  1350. details: */
  1351. /* a) Scenario where both client and service are in the time segment between
  1352. * SRV#N and TP#N. At this time the client fetches the first HS desc:
  1353. *
  1354. * +------------------------------------------------------------------+
  1355. * | |
  1356. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1357. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1358. * | |
  1359. * | $==========|-----------$===========|----------$===========| |
  1360. * | ^ ^ |
  1361. * | S C |
  1362. * +------------------------------------------------------------------+
  1363. */
  1364. helper_test_hsdir_sync(ns, 1, 1, 0);
  1365. /* b) Scenario where both client and service are in the time segment between
  1366. * TP#N and SRV#N+1. At this time the client fetches the second HS
  1367. * desc:
  1368. *
  1369. * +------------------------------------------------------------------+
  1370. * | |
  1371. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1372. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1373. * | |
  1374. * | $==========|-----------$===========|-----------$===========| |
  1375. * | ^ ^ |
  1376. * | S C |
  1377. * +------------------------------------------------------------------+
  1378. */
  1379. helper_test_hsdir_sync(ns, 0, 0, 1);
  1380. /* c) Scenario where service is between SRV#N and TP#N, but client is
  1381. * between TP#N and SRV#N+1. Client is forward in time so it fetches the
  1382. * second HS desc.
  1383. *
  1384. * +------------------------------------------------------------------+
  1385. * | |
  1386. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1387. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1388. * | |
  1389. * | $==========|-----------$===========|-----------$===========| |
  1390. * | ^ ^ |
  1391. * | S C |
  1392. * +------------------------------------------------------------------+
  1393. */
  1394. helper_test_hsdir_sync(ns, 1, 0, 1);
  1395. /* d) Scenario where service is between TP#N and SRV#N+1, but client is
  1396. * between SRV#N and TP#N. Client is backwards in time so it fetches the
  1397. * first HS desc.
  1398. *
  1399. * +------------------------------------------------------------------+
  1400. * | |
  1401. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1402. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1403. * | |
  1404. * | $==========|-----------$===========|-----------$===========| |
  1405. * | ^ ^ |
  1406. * | C S |
  1407. * +------------------------------------------------------------------+
  1408. */
  1409. helper_test_hsdir_sync(ns, 0, 1, 0);
  1410. done:
  1411. networkstatus_vote_free(ns);
  1412. nodelist_free_all();
  1413. hs_free_all();
  1414. }
  1415. struct testcase_t hs_common_tests[] = {
  1416. { "build_address", test_build_address, TT_FORK,
  1417. NULL, NULL },
  1418. { "validate_address", test_validate_address, TT_FORK,
  1419. NULL, NULL },
  1420. { "time_period", test_time_period, TT_FORK,
  1421. NULL, NULL },
  1422. { "start_time_of_next_time_period", test_start_time_of_next_time_period,
  1423. TT_FORK, NULL, NULL },
  1424. { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
  1425. NULL, NULL },
  1426. { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
  1427. NULL, NULL },
  1428. { "disaster_srv", test_disaster_srv, TT_FORK,
  1429. NULL, NULL },
  1430. { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
  1431. NULL, NULL },
  1432. { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
  1433. NULL, NULL },
  1434. { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
  1435. NULL, NULL },
  1436. { "reachability", test_reachability, TT_FORK,
  1437. NULL, NULL },
  1438. { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync,
  1439. TT_FORK, NULL, NULL },
  1440. END_OF_TESTCASES
  1441. };