test_hs_common.c 68 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844
  1. /* Copyright (c) 2017-2019, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file test_hs_common.c
  5. * \brief Test hidden service common functionalities.
  6. */
  7. #define HS_COMMON_PRIVATE
  8. #define HS_CLIENT_PRIVATE
  9. #define HS_SERVICE_PRIVATE
  10. #define NODELIST_PRIVATE
  11. #include "test/test.h"
  12. #include "test/test_helpers.h"
  13. #include "test/log_test_helpers.h"
  14. #include "test/hs_test_helpers.h"
  15. #include "core/or/connection_edge.h"
  16. #include "lib/crypt_ops/crypto_format.h"
  17. #include "lib/crypt_ops/crypto_rand.h"
  18. #include "feature/hs/hs_common.h"
  19. #include "feature/hs/hs_client.h"
  20. #include "feature/hs/hs_service.h"
  21. #include "app/config/config.h"
  22. #include "feature/nodelist/networkstatus.h"
  23. #include "feature/dirclient/dirclient.h"
  24. #include "feature/dirauth/dirvote.h"
  25. #include "feature/nodelist/nodelist.h"
  26. #include "feature/nodelist/routerlist.h"
  27. #include "app/config/statefile.h"
  28. #include "core/or/circuitlist.h"
  29. #include "feature/dirauth/shared_random.h"
  30. #include "feature/dircommon/voting_schedule.h"
  31. #include "feature/nodelist/microdesc_st.h"
  32. #include "feature/nodelist/networkstatus_st.h"
  33. #include "feature/nodelist/node_st.h"
  34. #include "app/config/or_state_st.h"
  35. #include "feature/nodelist/routerinfo_st.h"
  36. #include "feature/nodelist/routerstatus_st.h"
  37. /** Test the validation of HS v3 addresses */
  38. static void
  39. test_validate_address(void *arg)
  40. {
  41. int ret;
  42. (void) arg;
  43. /* Address too short and too long. */
  44. setup_full_capture_of_logs(LOG_WARN);
  45. ret = hs_address_is_valid("blah");
  46. tt_int_op(ret, OP_EQ, 0);
  47. expect_log_msg_containing("has an invalid length");
  48. teardown_capture_of_logs();
  49. setup_full_capture_of_logs(LOG_WARN);
  50. ret = hs_address_is_valid(
  51. "p3xnclpu4mu22dwaurjtsybyqk4xfjmcfz6z62yl24uwmhjatiwnlnadb");
  52. tt_int_op(ret, OP_EQ, 0);
  53. expect_log_msg_containing("has an invalid length");
  54. teardown_capture_of_logs();
  55. /* Invalid checksum (taken from prop224) */
  56. setup_full_capture_of_logs(LOG_WARN);
  57. ret = hs_address_is_valid(
  58. "l5satjgud6gucryazcyvyvhuxhr74u6ygigiuyixe3a6ysis67ororad");
  59. tt_int_op(ret, OP_EQ, 0);
  60. expect_log_msg_containing("invalid checksum");
  61. teardown_capture_of_logs();
  62. setup_full_capture_of_logs(LOG_WARN);
  63. ret = hs_address_is_valid(
  64. "btojiu7nu5y5iwut64eufevogqdw4wmqzugnoluw232r4t3ecsfv37ad");
  65. tt_int_op(ret, OP_EQ, 0);
  66. expect_log_msg_containing("invalid checksum");
  67. teardown_capture_of_logs();
  68. /* Non base32 decodable string. */
  69. setup_full_capture_of_logs(LOG_WARN);
  70. ret = hs_address_is_valid(
  71. "????????????????????????????????????????????????????????");
  72. tt_int_op(ret, OP_EQ, 0);
  73. expect_log_msg_containing("can't be decoded");
  74. teardown_capture_of_logs();
  75. /* Valid address. */
  76. ret = hs_address_is_valid(
  77. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
  78. tt_int_op(ret, OP_EQ, 1);
  79. done:
  80. ;
  81. }
  82. static int
  83. mock_write_str_to_file(const char *path, const char *str, int bin)
  84. {
  85. (void)bin;
  86. tt_str_op(path, OP_EQ, "/double/five"PATH_SEPARATOR"squared");
  87. tt_str_op(str, OP_EQ,
  88. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion\n");
  89. done:
  90. return 0;
  91. }
  92. /** Test building HS v3 onion addresses. Uses test vectors from the
  93. * ./hs_build_address.py script. */
  94. static void
  95. test_build_address(void *arg)
  96. {
  97. int ret;
  98. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  99. ed25519_public_key_t pubkey;
  100. /* hex-encoded ed25519 pubkey used in hs_build_address.py */
  101. char pubkey_hex[] =
  102. "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
  103. hs_service_t *service = NULL;
  104. (void) arg;
  105. MOCK(write_str_to_file, mock_write_str_to_file);
  106. /* The following has been created with hs_build_address.py script that
  107. * follows proposal 224 specification to build an onion address. */
  108. static const char *test_addr =
  109. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid";
  110. /* Let's try to build the same onion address as the script */
  111. base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
  112. pubkey_hex, strlen(pubkey_hex));
  113. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  114. tt_str_op(test_addr, OP_EQ, onion_addr);
  115. /* Validate that address. */
  116. ret = hs_address_is_valid(onion_addr);
  117. tt_int_op(ret, OP_EQ, 1);
  118. service = tor_malloc_zero(sizeof(hs_service_t));
  119. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  120. tor_asprintf(&service->config.directory_path, "/double/five");
  121. ret = write_address_to_file(service, "squared");
  122. tt_int_op(ret, OP_EQ, 0);
  123. done:
  124. hs_service_free(service);
  125. }
  126. /** Test that our HS time period calculation functions work properly */
  127. static void
  128. test_time_period(void *arg)
  129. {
  130. (void) arg;
  131. uint64_t tn;
  132. int retval;
  133. time_t fake_time, correct_time, start_time;
  134. /* Let's do the example in prop224 section [TIME-PERIODS] */
  135. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  136. &fake_time);
  137. tt_int_op(retval, OP_EQ, 0);
  138. /* Check that the time period number is right */
  139. tn = hs_get_time_period_num(fake_time);
  140. tt_u64_op(tn, OP_EQ, 16903);
  141. /* Increase current time to 11:59:59 UTC and check that the time period
  142. number is still the same */
  143. fake_time += 3599;
  144. tn = hs_get_time_period_num(fake_time);
  145. tt_u64_op(tn, OP_EQ, 16903);
  146. { /* Check start time of next time period */
  147. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  148. &correct_time);
  149. tt_int_op(retval, OP_EQ, 0);
  150. start_time = hs_get_start_time_of_next_time_period(fake_time);
  151. tt_int_op(start_time, OP_EQ, correct_time);
  152. }
  153. /* Now take time to 12:00:00 UTC and check that the time period rotated */
  154. fake_time += 1;
  155. tn = hs_get_time_period_num(fake_time);
  156. tt_u64_op(tn, OP_EQ, 16904);
  157. /* Now also check our hs_get_next_time_period_num() function */
  158. tn = hs_get_next_time_period_num(fake_time);
  159. tt_u64_op(tn, OP_EQ, 16905);
  160. { /* Check start time of next time period again */
  161. retval = parse_rfc1123_time("Wed, 14 Apr 2016 12:00:00 UTC",
  162. &correct_time);
  163. tt_int_op(retval, OP_EQ, 0);
  164. start_time = hs_get_start_time_of_next_time_period(fake_time);
  165. tt_int_op(start_time, OP_EQ, correct_time);
  166. }
  167. /* Now do another sanity check: The time period number at the start of the
  168. * next time period, must be the same time period number as the one returned
  169. * from hs_get_next_time_period_num() */
  170. {
  171. time_t next_tp_start = hs_get_start_time_of_next_time_period(fake_time);
  172. tt_u64_op(hs_get_time_period_num(next_tp_start), OP_EQ,
  173. hs_get_next_time_period_num(fake_time));
  174. }
  175. done:
  176. ;
  177. }
  178. /** Test that we can correctly find the start time of the next time period */
  179. static void
  180. test_start_time_of_next_time_period(void *arg)
  181. {
  182. (void) arg;
  183. int retval;
  184. time_t fake_time;
  185. char tbuf[ISO_TIME_LEN + 1];
  186. time_t next_tp_start_time;
  187. /* Do some basic tests */
  188. retval = parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC",
  189. &fake_time);
  190. tt_int_op(retval, OP_EQ, 0);
  191. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  192. /* Compare it with the correct result */
  193. format_iso_time(tbuf, next_tp_start_time);
  194. tt_str_op("2016-04-13 12:00:00", OP_EQ, tbuf);
  195. /* Another test with an edge-case time (start of TP) */
  196. retval = parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC",
  197. &fake_time);
  198. tt_int_op(retval, OP_EQ, 0);
  199. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  200. format_iso_time(tbuf, next_tp_start_time);
  201. tt_str_op("2016-04-14 12:00:00", OP_EQ, tbuf);
  202. {
  203. /* Now pretend we are on a testing network and alter the voting schedule to
  204. be every 10 seconds. This means that a time period has length 10*24
  205. seconds (4 minutes). It also means that we apply a rotational offset of
  206. 120 seconds to the time period, so that it starts at 00:02:00 instead of
  207. 00:00:00. */
  208. or_options_t *options = get_options_mutable();
  209. options->TestingTorNetwork = 1;
  210. options->V3AuthVotingInterval = 10;
  211. options->TestingV3AuthInitialVotingInterval = 10;
  212. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:00:00 UTC",
  213. &fake_time);
  214. tt_int_op(retval, OP_EQ, 0);
  215. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  216. /* Compare it with the correct result */
  217. format_iso_time(tbuf, next_tp_start_time);
  218. tt_str_op("2016-04-13 00:02:00", OP_EQ, tbuf);
  219. retval = parse_rfc1123_time("Wed, 13 Apr 2016 00:02:00 UTC",
  220. &fake_time);
  221. tt_int_op(retval, OP_EQ, 0);
  222. next_tp_start_time = hs_get_start_time_of_next_time_period(fake_time);
  223. /* Compare it with the correct result */
  224. format_iso_time(tbuf, next_tp_start_time);
  225. tt_str_op("2016-04-13 00:06:00", OP_EQ, tbuf);
  226. }
  227. done:
  228. ;
  229. }
  230. /* Cleanup the global nodelist. It also frees the "md" in the node_t because
  231. * we allocate the memory in helper_add_hsdir_to_networkstatus(). */
  232. static void
  233. cleanup_nodelist(void)
  234. {
  235. const smartlist_t *nodelist = nodelist_get_list();
  236. SMARTLIST_FOREACH_BEGIN(nodelist, node_t *, node) {
  237. tor_free(node->md);
  238. node->md = NULL;
  239. } SMARTLIST_FOREACH_END(node);
  240. nodelist_free_all();
  241. }
  242. static void
  243. helper_add_hsdir_to_networkstatus(networkstatus_t *ns,
  244. int identity_idx,
  245. const char *nickname,
  246. int is_hsdir)
  247. {
  248. routerstatus_t *rs = tor_malloc_zero(sizeof(routerstatus_t));
  249. routerinfo_t *ri = tor_malloc_zero(sizeof(routerinfo_t));
  250. uint8_t identity[DIGEST_LEN];
  251. tor_addr_t ipv4_addr;
  252. node_t *node = NULL;
  253. memset(identity, identity_idx, sizeof(identity));
  254. memcpy(rs->identity_digest, identity, DIGEST_LEN);
  255. rs->is_hs_dir = is_hsdir;
  256. rs->pv.supports_v3_hsdir = 1;
  257. strlcpy(rs->nickname, nickname, sizeof(rs->nickname));
  258. tor_addr_parse(&ipv4_addr, "1.2.3.4");
  259. ri->addr = tor_addr_to_ipv4h(&ipv4_addr);
  260. rs->addr = tor_addr_to_ipv4h(&ipv4_addr);
  261. ri->nickname = tor_strdup(nickname);
  262. ri->protocol_list = tor_strdup("HSDir=1-2 LinkAuth=3");
  263. memcpy(ri->cache_info.identity_digest, identity, DIGEST_LEN);
  264. ri->cache_info.signing_key_cert = tor_malloc_zero(sizeof(tor_cert_t));
  265. /* Needed for the HSDir index computation. */
  266. memset(&ri->cache_info.signing_key_cert->signing_key,
  267. identity_idx, ED25519_PUBKEY_LEN);
  268. tt_assert(nodelist_set_routerinfo(ri, NULL));
  269. node = node_get_mutable_by_id(ri->cache_info.identity_digest);
  270. tt_assert(node);
  271. node->rs = rs;
  272. /* We need this to exist for node_has_preferred_descriptor() to return
  273. * true. */
  274. node->md = tor_malloc_zero(sizeof(microdesc_t));
  275. /* Do this now the nodelist_set_routerinfo() function needs a "rs" to set
  276. * the indexes which it doesn't have when it is called. */
  277. node_set_hsdir_index(node, ns);
  278. node->ri = NULL;
  279. smartlist_add(ns->routerstatus_list, rs);
  280. done:
  281. if (node == NULL)
  282. routerstatus_free(rs);
  283. routerinfo_free(ri);
  284. }
  285. static networkstatus_t *mock_ns = NULL;
  286. static networkstatus_t *
  287. mock_networkstatus_get_latest_consensus(void)
  288. {
  289. time_t now = approx_time();
  290. /* If initialized, return it */
  291. if (mock_ns) {
  292. return mock_ns;
  293. }
  294. /* Initialize fake consensus */
  295. mock_ns = tor_malloc_zero(sizeof(networkstatus_t));
  296. /* This consensus is live */
  297. mock_ns->valid_after = now-1;
  298. mock_ns->fresh_until = now+1;
  299. mock_ns->valid_until = now+2;
  300. /* Create routerstatus list */
  301. mock_ns->routerstatus_list = smartlist_new();
  302. mock_ns->type = NS_TYPE_CONSENSUS;
  303. return mock_ns;
  304. }
  305. static networkstatus_t *
  306. mock_networkstatus_get_live_consensus(time_t now)
  307. {
  308. (void) now;
  309. tt_assert(mock_ns);
  310. done:
  311. return mock_ns;
  312. }
  313. /** Test the responsible HSDirs calculation function */
  314. static void
  315. test_responsible_hsdirs(void *arg)
  316. {
  317. smartlist_t *responsible_dirs = smartlist_new();
  318. networkstatus_t *ns = NULL;
  319. (void) arg;
  320. hs_init();
  321. MOCK(networkstatus_get_latest_consensus,
  322. mock_networkstatus_get_latest_consensus);
  323. ns = networkstatus_get_latest_consensus();
  324. { /* First router: HSdir */
  325. helper_add_hsdir_to_networkstatus(ns, 1, "igor", 1);
  326. }
  327. { /* Second HSDir */
  328. helper_add_hsdir_to_networkstatus(ns, 2, "victor", 1);
  329. }
  330. { /* Third relay but not HSDir */
  331. helper_add_hsdir_to_networkstatus(ns, 3, "spyro", 0);
  332. }
  333. /* Use a fixed time period and pub key so we always take the same path */
  334. ed25519_public_key_t pubkey;
  335. uint64_t time_period_num = 17653; // 2 May, 2018, 14:00.
  336. memset(&pubkey, 42, sizeof(pubkey));
  337. hs_get_responsible_hsdirs(&pubkey, time_period_num,
  338. 0, 0, responsible_dirs);
  339. /* Make sure that we only found 2 responsible HSDirs.
  340. * The third relay was not an hsdir! */
  341. tt_int_op(smartlist_len(responsible_dirs), OP_EQ, 2);
  342. /** TODO: Build a bigger network and do more tests here */
  343. done:
  344. SMARTLIST_FOREACH(ns->routerstatus_list,
  345. routerstatus_t *, rs, routerstatus_free(rs));
  346. smartlist_free(responsible_dirs);
  347. smartlist_clear(ns->routerstatus_list);
  348. networkstatus_vote_free(mock_ns);
  349. cleanup_nodelist();
  350. }
  351. static void
  352. mock_directory_initiate_request(directory_request_t *req)
  353. {
  354. (void)req;
  355. return;
  356. }
  357. static int
  358. mock_hs_desc_encode_descriptor(const hs_descriptor_t *desc,
  359. const ed25519_keypair_t *signing_kp,
  360. const uint8_t *descriptor_cookie,
  361. char **encoded_out)
  362. {
  363. (void)desc;
  364. (void)signing_kp;
  365. (void)descriptor_cookie;
  366. tor_asprintf(encoded_out, "lulu");
  367. return 0;
  368. }
  369. static or_state_t dummy_state;
  370. /* Mock function to get fake or state (used for rev counters) */
  371. static or_state_t *
  372. get_or_state_replacement(void)
  373. {
  374. return &dummy_state;
  375. }
  376. static int
  377. mock_router_have_minimum_dir_info(void)
  378. {
  379. return 1;
  380. }
  381. /** Test that we correctly detect when the HSDir hash ring changes so that we
  382. * reupload our descriptor. */
  383. static void
  384. test_desc_reupload_logic(void *arg)
  385. {
  386. networkstatus_t *ns = NULL;
  387. (void) arg;
  388. hs_init();
  389. MOCK(router_have_minimum_dir_info,
  390. mock_router_have_minimum_dir_info);
  391. MOCK(get_or_state,
  392. get_or_state_replacement);
  393. MOCK(networkstatus_get_latest_consensus,
  394. mock_networkstatus_get_latest_consensus);
  395. MOCK(directory_initiate_request,
  396. mock_directory_initiate_request);
  397. MOCK(hs_desc_encode_descriptor,
  398. mock_hs_desc_encode_descriptor);
  399. ns = networkstatus_get_latest_consensus();
  400. /** Test logic:
  401. * 1) Upload descriptor to HSDirs
  402. * CHECK that previous_hsdirs list was populated.
  403. * 2) Then call router_dir_info_changed() without an HSDir set change.
  404. * CHECK that no reuplod occurs.
  405. * 3) Now change the HSDir set, and call dir_info_changed() again.
  406. * CHECK that reupload occurs.
  407. * 4) Finally call service_desc_schedule_upload().
  408. * CHECK that previous_hsdirs list was cleared.
  409. **/
  410. /* Let's start by building our descriptor and service */
  411. hs_service_descriptor_t *desc = service_descriptor_new();
  412. hs_service_t *service = NULL;
  413. /* hex-encoded ed25519 pubkey used in hs_build_address.py */
  414. char pubkey_hex[] =
  415. "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a";
  416. char onion_addr[HS_SERVICE_ADDR_LEN_BASE32 + 1];
  417. ed25519_public_key_t pubkey;
  418. base16_decode((char*)pubkey.pubkey, sizeof(pubkey.pubkey),
  419. pubkey_hex, strlen(pubkey_hex));
  420. hs_build_address(&pubkey, HS_VERSION_THREE, onion_addr);
  421. service = tor_malloc_zero(sizeof(hs_service_t));
  422. tt_assert(service);
  423. memcpy(service->onion_address, onion_addr, sizeof(service->onion_address));
  424. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  425. ed25519_public_key_generate(&service->keys.identity_pk,
  426. &service->keys.identity_sk);
  427. service->desc_current = desc;
  428. /* Also add service to service map */
  429. hs_service_ht *service_map = get_hs_service_map();
  430. tt_assert(service_map);
  431. tt_int_op(hs_service_get_num_services(), OP_EQ, 0);
  432. register_service(service_map, service);
  433. tt_int_op(hs_service_get_num_services(), OP_EQ, 1);
  434. /* Now let's create our hash ring: */
  435. {
  436. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  437. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  438. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  439. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  440. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  441. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  442. }
  443. /* Now let's upload our desc to all hsdirs */
  444. upload_descriptor_to_all(service, desc);
  445. /* Check that previous hsdirs were populated */
  446. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  447. /* Poison next upload time so that we can see if it was changed by
  448. * router_dir_info_changed(). No changes in hash ring so far, so the upload
  449. * time should stay as is. */
  450. desc->next_upload_time = 42;
  451. router_dir_info_changed();
  452. tt_int_op(desc->next_upload_time, OP_EQ, 42);
  453. /* Now change the HSDir hash ring by swapping nora for aaron.
  454. * Start by clearing the hash ring */
  455. {
  456. SMARTLIST_FOREACH(ns->routerstatus_list,
  457. routerstatus_t *, rs, routerstatus_free(rs));
  458. smartlist_clear(ns->routerstatus_list);
  459. cleanup_nodelist();
  460. routerlist_free_all();
  461. }
  462. { /* Now add back all the nodes */
  463. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  464. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  465. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  466. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  467. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  468. helper_add_hsdir_to_networkstatus(ns, 7, "nora", 1);
  469. }
  470. /* Now call service_desc_hsdirs_changed() and see that it detected the hash
  471. ring change */
  472. time_t now = approx_time();
  473. tt_assert(now);
  474. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  475. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  476. /* Now order another upload and see that we keep having 6 prev hsdirs */
  477. upload_descriptor_to_all(service, desc);
  478. /* Check that previous hsdirs were populated */
  479. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  480. /* Now restore the HSDir hash ring to its original state by swapping back
  481. aaron for nora */
  482. /* First clear up the hash ring */
  483. {
  484. SMARTLIST_FOREACH(ns->routerstatus_list,
  485. routerstatus_t *, rs, routerstatus_free(rs));
  486. smartlist_clear(ns->routerstatus_list);
  487. cleanup_nodelist();
  488. routerlist_free_all();
  489. }
  490. { /* Now populate the hash ring again */
  491. helper_add_hsdir_to_networkstatus(ns, 1, "dingus", 1);
  492. helper_add_hsdir_to_networkstatus(ns, 2, "clive", 1);
  493. helper_add_hsdir_to_networkstatus(ns, 3, "aaron", 1);
  494. helper_add_hsdir_to_networkstatus(ns, 4, "lizzie", 1);
  495. helper_add_hsdir_to_networkstatus(ns, 5, "daewon", 1);
  496. helper_add_hsdir_to_networkstatus(ns, 6, "clarke", 1);
  497. }
  498. /* Check that our algorithm catches this change of hsdirs */
  499. tt_int_op(service_desc_hsdirs_changed(service, desc), OP_EQ, 1);
  500. /* Now pretend that the descriptor changed, and order a reupload to all
  501. HSDirs. Make sure that the set of previous HSDirs was cleared. */
  502. service_desc_schedule_upload(desc, now, 1);
  503. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 0);
  504. /* Now reupload again: see that the prev hsdir set got populated again. */
  505. upload_descriptor_to_all(service, desc);
  506. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 6);
  507. done:
  508. SMARTLIST_FOREACH(ns->routerstatus_list,
  509. routerstatus_t *, rs, routerstatus_free(rs));
  510. smartlist_clear(ns->routerstatus_list);
  511. if (service) {
  512. remove_service(get_hs_service_map(), service);
  513. hs_service_free(service);
  514. }
  515. networkstatus_vote_free(ns);
  516. cleanup_nodelist();
  517. hs_free_all();
  518. }
  519. /** Test disaster SRV computation and caching */
  520. static void
  521. test_disaster_srv(void *arg)
  522. {
  523. uint8_t *cached_disaster_srv_one = NULL;
  524. uint8_t *cached_disaster_srv_two = NULL;
  525. uint8_t srv_one[DIGEST256_LEN] = {0};
  526. uint8_t srv_two[DIGEST256_LEN] = {0};
  527. uint8_t srv_three[DIGEST256_LEN] = {0};
  528. uint8_t srv_four[DIGEST256_LEN] = {0};
  529. uint8_t srv_five[DIGEST256_LEN] = {0};
  530. (void) arg;
  531. /* Get the cached SRVs: we gonna use them later for verification */
  532. cached_disaster_srv_one = get_first_cached_disaster_srv();
  533. cached_disaster_srv_two = get_second_cached_disaster_srv();
  534. /* Compute some srvs */
  535. get_disaster_srv(1, srv_one);
  536. get_disaster_srv(2, srv_two);
  537. /* Check that the cached ones were updated */
  538. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  539. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  540. /* Ask for an SRV that has already been computed */
  541. get_disaster_srv(2, srv_two);
  542. /* and check that the cache entries have not changed */
  543. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_one, DIGEST256_LEN);
  544. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  545. /* Ask for a new SRV */
  546. get_disaster_srv(3, srv_three);
  547. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  548. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_two, DIGEST256_LEN);
  549. /* Ask for another SRV: none of the original SRVs should now be cached */
  550. get_disaster_srv(4, srv_four);
  551. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_three, DIGEST256_LEN);
  552. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  553. /* Ask for yet another SRV */
  554. get_disaster_srv(5, srv_five);
  555. tt_mem_op(cached_disaster_srv_one, OP_EQ, srv_five, DIGEST256_LEN);
  556. tt_mem_op(cached_disaster_srv_two, OP_EQ, srv_four, DIGEST256_LEN);
  557. done:
  558. ;
  559. }
  560. /** Test our HS descriptor request tracker by making various requests and
  561. * checking whether they get tracked properly. */
  562. static void
  563. test_hid_serv_request_tracker(void *arg)
  564. {
  565. (void) arg;
  566. time_t retval;
  567. routerstatus_t *hsdir = NULL, *hsdir2 = NULL, *hsdir3 = NULL;
  568. time_t now = approx_time();
  569. const char *req_key_str_first =
  570. "vd4zb6zesaubtrjvdqcr2w7x7lhw2up4Xnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  571. const char *req_key_str_second =
  572. "g53o7iavcd62oihswhr24u6czmqws5kpXnw4526ThUNbL5o1go+EdUuEqlKxHkNbnK41pRzizzs";
  573. const char *req_key_str_small = "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ";
  574. /*************************** basic test *******************************/
  575. /* Get request tracker and make sure it's empty */
  576. strmap_t *request_tracker = get_last_hid_serv_requests();
  577. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  578. /* Let's register a hid serv request */
  579. hsdir = tor_malloc_zero(sizeof(routerstatus_t));
  580. memset(hsdir->identity_digest, 'Z', DIGEST_LEN);
  581. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  582. now, 1);
  583. tt_int_op(retval, OP_EQ, now);
  584. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  585. /* Let's lookup a non-existent hidserv request */
  586. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_second,
  587. now+1, 0);
  588. tt_int_op(retval, OP_EQ, 0);
  589. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  590. /* Let's lookup a real hidserv request */
  591. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  592. now+2, 0);
  593. tt_int_op(retval, OP_EQ, now); /* we got it */
  594. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  595. /**********************************************************************/
  596. /* Let's add another request for the same HS but on a different HSDir. */
  597. hsdir2 = tor_malloc_zero(sizeof(routerstatus_t));
  598. memset(hsdir2->identity_digest, 2, DIGEST_LEN);
  599. retval = hs_lookup_last_hid_serv_request(hsdir2, req_key_str_first,
  600. now+3, 1);
  601. tt_int_op(retval, OP_EQ, now+3);
  602. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  603. /* Check that we can clean the first request based on time */
  604. hs_clean_last_hid_serv_requests(now+3+REND_HID_SERV_DIR_REQUERY_PERIOD);
  605. tt_int_op(strmap_size(request_tracker),OP_EQ, 1);
  606. /* Check that it doesn't exist anymore */
  607. retval = hs_lookup_last_hid_serv_request(hsdir, req_key_str_first,
  608. now+2, 0);
  609. tt_int_op(retval, OP_EQ, 0);
  610. /* Now let's add a smaller req key str */
  611. hsdir3 = tor_malloc_zero(sizeof(routerstatus_t));
  612. memset(hsdir3->identity_digest, 3, DIGEST_LEN);
  613. retval = hs_lookup_last_hid_serv_request(hsdir3, req_key_str_small,
  614. now+4, 1);
  615. tt_int_op(retval, OP_EQ, now+4);
  616. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  617. /*************************** deleting entries **************************/
  618. /* Add another request with very short key */
  619. retval = hs_lookup_last_hid_serv_request(hsdir, "l", now, 1);
  620. tt_int_op(retval, OP_EQ, now);
  621. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  622. /* Try deleting entries with a dummy key. Check that our previous requests
  623. * are still there */
  624. tor_capture_bugs_(1);
  625. hs_purge_hid_serv_from_last_hid_serv_requests("a");
  626. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  627. tor_end_capture_bugs_();
  628. /* Try another dummy key. Check that requests are still there */
  629. {
  630. char dummy[2000];
  631. memset(dummy, 'Z', 2000);
  632. dummy[1999] = '\x00';
  633. hs_purge_hid_serv_from_last_hid_serv_requests(dummy);
  634. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  635. }
  636. /* Another dummy key! */
  637. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_second);
  638. tt_int_op(strmap_size(request_tracker),OP_EQ, 3);
  639. /* Now actually delete a request! */
  640. hs_purge_hid_serv_from_last_hid_serv_requests(req_key_str_first);
  641. tt_int_op(strmap_size(request_tracker),OP_EQ, 2);
  642. /* Purge it all! */
  643. hs_purge_last_hid_serv_requests();
  644. request_tracker = get_last_hid_serv_requests();
  645. tt_int_op(strmap_size(request_tracker),OP_EQ, 0);
  646. done:
  647. tor_free(hsdir);
  648. tor_free(hsdir2);
  649. tor_free(hsdir3);
  650. }
  651. static void
  652. test_parse_extended_hostname(void *arg)
  653. {
  654. (void) arg;
  655. char address1[] = "fooaddress.onion";
  656. char address2[] = "aaaaaaaaaaaaaaaa.onion";
  657. char address3[] = "fooaddress.exit";
  658. char address4[] = "www.torproject.org";
  659. char address5[] = "foo.abcdefghijklmnop.onion";
  660. char address6[] = "foo.bar.abcdefghijklmnop.onion";
  661. char address7[] = ".abcdefghijklmnop.onion";
  662. char address8[] =
  663. "www.25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid.onion";
  664. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address1));
  665. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address2));
  666. tt_str_op(address2,OP_EQ, "aaaaaaaaaaaaaaaa");
  667. tt_assert(EXIT_HOSTNAME == parse_extended_hostname(address3));
  668. tt_assert(NORMAL_HOSTNAME == parse_extended_hostname(address4));
  669. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address5));
  670. tt_str_op(address5,OP_EQ, "abcdefghijklmnop");
  671. tt_assert(ONION_V2_HOSTNAME == parse_extended_hostname(address6));
  672. tt_str_op(address6,OP_EQ, "abcdefghijklmnop");
  673. tt_assert(BAD_HOSTNAME == parse_extended_hostname(address7));
  674. tt_assert(ONION_V3_HOSTNAME == parse_extended_hostname(address8));
  675. tt_str_op(address8, OP_EQ,
  676. "25njqamcweflpvkl73j4szahhihoc4xt3ktcgjnpaingr5yhkenl5sid");
  677. done: ;
  678. }
  679. static void
  680. test_time_between_tp_and_srv(void *arg)
  681. {
  682. int ret;
  683. networkstatus_t ns;
  684. (void) arg;
  685. /* This function should be returning true where "^" are:
  686. *
  687. * +------------------------------------------------------------------+
  688. * | |
  689. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  690. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  691. * | |
  692. * | $==========|-----------$===========|-----------$===========| |
  693. * | ^^^^^^^^^^^^ ^^^^^^^^^^^^ |
  694. * | |
  695. * +------------------------------------------------------------------+
  696. */
  697. ret = parse_rfc1123_time("Sat, 26 Oct 1985 00:00:00 UTC", &ns.valid_after);
  698. tt_int_op(ret, OP_EQ, 0);
  699. ret = parse_rfc1123_time("Sat, 26 Oct 1985 01:00:00 UTC", &ns.fresh_until);
  700. tt_int_op(ret, OP_EQ, 0);
  701. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  702. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  703. tt_int_op(ret, OP_EQ, 0);
  704. ret = parse_rfc1123_time("Sat, 26 Oct 1985 11:00:00 UTC", &ns.valid_after);
  705. tt_int_op(ret, OP_EQ, 0);
  706. ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.fresh_until);
  707. tt_int_op(ret, OP_EQ, 0);
  708. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  709. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  710. tt_int_op(ret, OP_EQ, 0);
  711. ret = parse_rfc1123_time("Sat, 26 Oct 1985 12:00:00 UTC", &ns.valid_after);
  712. tt_int_op(ret, OP_EQ, 0);
  713. ret = parse_rfc1123_time("Sat, 26 Oct 1985 13:00:00 UTC", &ns.fresh_until);
  714. tt_int_op(ret, OP_EQ, 0);
  715. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  716. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  717. tt_int_op(ret, OP_EQ, 1);
  718. ret = parse_rfc1123_time("Sat, 26 Oct 1985 23:00:00 UTC", &ns.valid_after);
  719. tt_int_op(ret, OP_EQ, 0);
  720. ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.fresh_until);
  721. tt_int_op(ret, OP_EQ, 0);
  722. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  723. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  724. tt_int_op(ret, OP_EQ, 1);
  725. ret = parse_rfc1123_time("Sat, 27 Oct 1985 00:00:00 UTC", &ns.valid_after);
  726. tt_int_op(ret, OP_EQ, 0);
  727. ret = parse_rfc1123_time("Sat, 27 Oct 1985 01:00:00 UTC", &ns.fresh_until);
  728. tt_int_op(ret, OP_EQ, 0);
  729. voting_schedule_recalculate_timing(get_options(), ns.valid_after);
  730. ret = hs_in_period_between_tp_and_srv(&ns, 0);
  731. tt_int_op(ret, OP_EQ, 0);
  732. done:
  733. ;
  734. }
  735. /************ Reachability Test (it is huge) ****************/
  736. /* Simulate different consensus for client and service. Used by the
  737. * reachability test. The SRV and responsible HSDir list are used by all
  738. * reachability tests so make them common to simplify setup and teardown. */
  739. static networkstatus_t *mock_service_ns = NULL;
  740. static networkstatus_t *mock_client_ns = NULL;
  741. static sr_srv_t current_srv, previous_srv;
  742. static smartlist_t *service_responsible_hsdirs = NULL;
  743. static smartlist_t *client_responsible_hsdirs = NULL;
  744. static networkstatus_t *
  745. mock_networkstatus_get_live_consensus_service(time_t now)
  746. {
  747. (void) now;
  748. if (mock_service_ns) {
  749. return mock_service_ns;
  750. }
  751. mock_service_ns = tor_malloc_zero(sizeof(networkstatus_t));
  752. mock_service_ns->routerstatus_list = smartlist_new();
  753. mock_service_ns->type = NS_TYPE_CONSENSUS;
  754. return mock_service_ns;
  755. }
  756. static networkstatus_t *
  757. mock_networkstatus_get_latest_consensus_service(void)
  758. {
  759. return mock_networkstatus_get_live_consensus_service(0);
  760. }
  761. static networkstatus_t *
  762. mock_networkstatus_get_live_consensus_client(time_t now)
  763. {
  764. (void) now;
  765. if (mock_client_ns) {
  766. return mock_client_ns;
  767. }
  768. mock_client_ns = tor_malloc_zero(sizeof(networkstatus_t));
  769. mock_client_ns->routerstatus_list = smartlist_new();
  770. mock_client_ns->type = NS_TYPE_CONSENSUS;
  771. return mock_client_ns;
  772. }
  773. static networkstatus_t *
  774. mock_networkstatus_get_latest_consensus_client(void)
  775. {
  776. return mock_networkstatus_get_live_consensus_client(0);
  777. }
  778. /* Mock function because we are not trying to test the close circuit that does
  779. * an awful lot of checks on the circuit object. */
  780. static void
  781. mock_circuit_mark_for_close(circuit_t *circ, int reason, int line,
  782. const char *file)
  783. {
  784. (void) circ;
  785. (void) reason;
  786. (void) line;
  787. (void) file;
  788. return;
  789. }
  790. /* Initialize a big HSDir V3 hash ring. */
  791. static void
  792. helper_initialize_big_hash_ring(networkstatus_t *ns)
  793. {
  794. int ret;
  795. /* Generate 250 hsdirs! :) */
  796. for (int counter = 1 ; counter < 251 ; counter++) {
  797. /* Let's generate random nickname for each hsdir... */
  798. char nickname_binary[8];
  799. char nickname_str[13] = {0};
  800. crypto_rand(nickname_binary, sizeof(nickname_binary));
  801. ret = base64_encode(nickname_str, sizeof(nickname_str),
  802. nickname_binary, sizeof(nickname_binary), 0);
  803. tt_int_op(ret, OP_EQ, 12);
  804. helper_add_hsdir_to_networkstatus(ns, counter, nickname_str, 1);
  805. }
  806. /* Make sure we have 200 hsdirs in our list */
  807. tt_int_op(smartlist_len(ns->routerstatus_list), OP_EQ, 250);
  808. done:
  809. ;
  810. }
  811. /** Initialize service and publish its descriptor as needed. Return the newly
  812. * allocated service object to the caller. */
  813. static hs_service_t *
  814. helper_init_service(time_t now)
  815. {
  816. int retval;
  817. hs_service_t *service = hs_service_new(get_options());
  818. tt_assert(service);
  819. service->config.version = HS_VERSION_THREE;
  820. ed25519_secret_key_generate(&service->keys.identity_sk, 0);
  821. ed25519_public_key_generate(&service->keys.identity_pk,
  822. &service->keys.identity_sk);
  823. /* Register service to global map. */
  824. retval = register_service(get_hs_service_map(), service);
  825. tt_int_op(retval, OP_EQ, 0);
  826. /* Initialize service descriptor */
  827. build_all_descriptors(now);
  828. tt_assert(service->desc_current);
  829. tt_assert(service->desc_next);
  830. done:
  831. return service;
  832. }
  833. /* Helper function to set the RFC 1123 time string into t. */
  834. static void
  835. set_consensus_times(const char *timestr, time_t *t)
  836. {
  837. tt_assert(timestr);
  838. tt_assert(t);
  839. int ret = parse_rfc1123_time(timestr, t);
  840. tt_int_op(ret, OP_EQ, 0);
  841. done:
  842. return;
  843. }
  844. /* Helper function to cleanup the mock consensus (client and service) */
  845. static void
  846. cleanup_mock_ns(void)
  847. {
  848. if (mock_service_ns) {
  849. SMARTLIST_FOREACH(mock_service_ns->routerstatus_list,
  850. routerstatus_t *, rs, routerstatus_free(rs));
  851. smartlist_clear(mock_service_ns->routerstatus_list);
  852. mock_service_ns->sr_info.current_srv = NULL;
  853. mock_service_ns->sr_info.previous_srv = NULL;
  854. networkstatus_vote_free(mock_service_ns);
  855. mock_service_ns = NULL;
  856. }
  857. if (mock_client_ns) {
  858. SMARTLIST_FOREACH(mock_client_ns->routerstatus_list,
  859. routerstatus_t *, rs, routerstatus_free(rs));
  860. smartlist_clear(mock_client_ns->routerstatus_list);
  861. mock_client_ns->sr_info.current_srv = NULL;
  862. mock_client_ns->sr_info.previous_srv = NULL;
  863. networkstatus_vote_free(mock_client_ns);
  864. mock_client_ns = NULL;
  865. }
  866. }
  867. /* Helper function to setup a reachability test. Once called, the
  868. * cleanup_reachability_test MUST be called at the end. */
  869. static void
  870. setup_reachability_test(void)
  871. {
  872. MOCK(circuit_mark_for_close_, mock_circuit_mark_for_close);
  873. MOCK(get_or_state, get_or_state_replacement);
  874. hs_init();
  875. /* Baseline to start with. */
  876. memset(&current_srv, 0, sizeof(current_srv));
  877. memset(&previous_srv, 1, sizeof(previous_srv));
  878. /* Initialize the consensuses. */
  879. mock_networkstatus_get_latest_consensus_service();
  880. mock_networkstatus_get_latest_consensus_client();
  881. service_responsible_hsdirs = smartlist_new();
  882. client_responsible_hsdirs = smartlist_new();
  883. }
  884. /* Helper function to cleanup a reachability test initial setup. */
  885. static void
  886. cleanup_reachability_test(void)
  887. {
  888. smartlist_free(service_responsible_hsdirs);
  889. service_responsible_hsdirs = NULL;
  890. smartlist_free(client_responsible_hsdirs);
  891. client_responsible_hsdirs = NULL;
  892. hs_free_all();
  893. cleanup_mock_ns();
  894. UNMOCK(get_or_state);
  895. UNMOCK(circuit_mark_for_close_);
  896. }
  897. /* A reachability test always check if the resulting service and client
  898. * responsible HSDir for the given parameters are equal.
  899. *
  900. * Return true iff the same exact nodes are in both list. */
  901. static int
  902. are_responsible_hsdirs_equal(void)
  903. {
  904. int count = 0;
  905. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  906. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
  907. SMARTLIST_FOREACH_BEGIN(client_responsible_hsdirs,
  908. const routerstatus_t *, c_rs) {
  909. SMARTLIST_FOREACH_BEGIN(service_responsible_hsdirs,
  910. const routerstatus_t *, s_rs) {
  911. if (tor_memeq(c_rs->identity_digest, s_rs->identity_digest,
  912. DIGEST_LEN)) {
  913. count++;
  914. break;
  915. }
  916. } SMARTLIST_FOREACH_END(s_rs);
  917. } SMARTLIST_FOREACH_END(c_rs);
  918. done:
  919. return (count == 6);
  920. }
  921. /* Tor doesn't use such a function to get the previous HSDir, it is only used
  922. * in node_set_hsdir_index(). We need it here so we can test the reachability
  923. * scenario 6 that requires the previous time period to compute the list of
  924. * responsible HSDir because of the client state timing. */
  925. static uint64_t
  926. get_previous_time_period(time_t now)
  927. {
  928. return hs_get_time_period_num(now) - 1;
  929. }
  930. /* Configuration of a reachability test scenario. */
  931. typedef struct reachability_cfg_t {
  932. /* Consensus timings to be set. They have to be compliant with
  933. * RFC 1123 time format. */
  934. const char *service_valid_after;
  935. const char *service_valid_until;
  936. const char *client_valid_after;
  937. const char *client_valid_until;
  938. /* SRVs that the service and client should use. */
  939. sr_srv_t *service_current_srv;
  940. sr_srv_t *service_previous_srv;
  941. sr_srv_t *client_current_srv;
  942. sr_srv_t *client_previous_srv;
  943. /* A time period function for the service to use for this scenario. For a
  944. * successful reachability test, the client always use the current time
  945. * period thus why no client function. */
  946. uint64_t (*service_time_period_fn)(time_t);
  947. /* Is the client and service expected to be in a new time period. After
  948. * setting the consensus time, the reachability test checks
  949. * hs_in_period_between_tp_and_srv() and test the returned value against
  950. * this. */
  951. unsigned int service_in_new_tp;
  952. unsigned int client_in_new_tp;
  953. /* Some scenario requires a hint that the client, because of its consensus
  954. * time, will request the "next" service descriptor so this indicates if it
  955. * is the case or not. */
  956. unsigned int client_fetch_next_desc;
  957. } reachability_cfg_t;
  958. /* Some defines to help with semantic while reading a configuration below. */
  959. #define NOT_IN_NEW_TP 0
  960. #define IN_NEW_TP 1
  961. #define DONT_NEED_NEXT_DESC 0
  962. #define NEED_NEXT_DESC 1
  963. static reachability_cfg_t reachability_scenarios[] = {
  964. /* Scenario 1
  965. *
  966. * +------------------------------------------------------------------+
  967. * | |
  968. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  969. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  970. * | |
  971. * | $==========|-----------$===========|-----------$===========| |
  972. * | ^ ^ |
  973. * | S C |
  974. * +------------------------------------------------------------------+
  975. *
  976. * S: Service, C: Client
  977. *
  978. * Service consensus valid_after time is set to 13:00 and client to 15:00,
  979. * both are after TP#1 thus have access to SRV#1. Service and client should
  980. * be using TP#1.
  981. */
  982. { "Sat, 26 Oct 1985 13:00:00 UTC", /* Service valid_after */
  983. "Sat, 26 Oct 1985 14:00:00 UTC", /* Service valid_until */
  984. "Sat, 26 Oct 1985 15:00:00 UTC", /* Client valid_after */
  985. "Sat, 26 Oct 1985 16:00:00 UTC", /* Client valid_until. */
  986. &current_srv, NULL, /* Service current and previous SRV */
  987. &current_srv, NULL, /* Client current and previous SRV */
  988. hs_get_time_period_num, /* Service time period function. */
  989. IN_NEW_TP, /* Is service in new TP? */
  990. IN_NEW_TP, /* Is client in new TP? */
  991. NEED_NEXT_DESC },
  992. /* Scenario 2
  993. *
  994. * +------------------------------------------------------------------+
  995. * | |
  996. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  997. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  998. * | |
  999. * | $==========|-----------$===========|-----------$===========| |
  1000. * | ^ ^ |
  1001. * | S C |
  1002. * +------------------------------------------------------------------+
  1003. *
  1004. * S: Service, C: Client
  1005. *
  1006. * Service consensus valid_after time is set to 23:00 and client to 01:00,
  1007. * which makes the client after the SRV#2 and the service just before. The
  1008. * service should only be using TP#1. The client should be using TP#1.
  1009. */
  1010. { "Sat, 26 Oct 1985 23:00:00 UTC", /* Service valid_after */
  1011. "Sat, 27 Oct 1985 00:00:00 UTC", /* Service valid_until */
  1012. "Sat, 27 Oct 1985 01:00:00 UTC", /* Client valid_after */
  1013. "Sat, 27 Oct 1985 02:00:00 UTC", /* Client valid_until. */
  1014. &previous_srv, NULL, /* Service current and previous SRV */
  1015. &current_srv, &previous_srv, /* Client current and previous SRV */
  1016. hs_get_time_period_num, /* Service time period function. */
  1017. IN_NEW_TP, /* Is service in new TP? */
  1018. NOT_IN_NEW_TP, /* Is client in new TP? */
  1019. NEED_NEXT_DESC },
  1020. /* Scenario 3
  1021. *
  1022. * +------------------------------------------------------------------+
  1023. * | |
  1024. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1025. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1026. * | |
  1027. * | $==========|-----------$===========|----------$===========| |
  1028. * | ^ ^ |
  1029. * | S C |
  1030. * +------------------------------------------------------------------+
  1031. *
  1032. * S: Service, C: Client
  1033. *
  1034. * Service consensus valid_after time is set to 03:00 and client to 05:00,
  1035. * which makes both after SRV#2. The service should be using TP#1 as its
  1036. * current time period. The client should be using TP#1.
  1037. */
  1038. { "Sat, 27 Oct 1985 03:00:00 UTC", /* Service valid_after */
  1039. "Sat, 27 Oct 1985 04:00:00 UTC", /* Service valid_until */
  1040. "Sat, 27 Oct 1985 05:00:00 UTC", /* Client valid_after */
  1041. "Sat, 27 Oct 1985 06:00:00 UTC", /* Client valid_until. */
  1042. &current_srv, &previous_srv, /* Service current and previous SRV */
  1043. &current_srv, &previous_srv, /* Client current and previous SRV */
  1044. hs_get_time_period_num, /* Service time period function. */
  1045. NOT_IN_NEW_TP, /* Is service in new TP? */
  1046. NOT_IN_NEW_TP, /* Is client in new TP? */
  1047. DONT_NEED_NEXT_DESC },
  1048. /* Scenario 4
  1049. *
  1050. * +------------------------------------------------------------------+
  1051. * | |
  1052. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1053. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1054. * | |
  1055. * | $==========|-----------$===========|-----------$===========| |
  1056. * | ^ ^ |
  1057. * | S C |
  1058. * +------------------------------------------------------------------+
  1059. *
  1060. * S: Service, C: Client
  1061. *
  1062. * Service consensus valid_after time is set to 11:00 and client to 13:00,
  1063. * which makes the service before TP#2 and the client just after. The
  1064. * service should be using TP#1 as its current time period and TP#2 as the
  1065. * next. The client should be using TP#2 time period.
  1066. */
  1067. { "Sat, 27 Oct 1985 11:00:00 UTC", /* Service valid_after */
  1068. "Sat, 27 Oct 1985 12:00:00 UTC", /* Service valid_until */
  1069. "Sat, 27 Oct 1985 13:00:00 UTC", /* Client valid_after */
  1070. "Sat, 27 Oct 1985 14:00:00 UTC", /* Client valid_until. */
  1071. &current_srv, &previous_srv, /* Service current and previous SRV */
  1072. &current_srv, &previous_srv, /* Client current and previous SRV */
  1073. hs_get_next_time_period_num, /* Service time period function. */
  1074. NOT_IN_NEW_TP, /* Is service in new TP? */
  1075. IN_NEW_TP, /* Is client in new TP? */
  1076. NEED_NEXT_DESC },
  1077. /* Scenario 5
  1078. *
  1079. * +------------------------------------------------------------------+
  1080. * | |
  1081. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1082. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1083. * | |
  1084. * | $==========|-----------$===========|-----------$===========| |
  1085. * | ^ ^ |
  1086. * | C S |
  1087. * +------------------------------------------------------------------+
  1088. *
  1089. * S: Service, C: Client
  1090. *
  1091. * Service consensus valid_after time is set to 01:00 and client to 23:00,
  1092. * which makes the service after SRV#2 and the client just before. The
  1093. * service should be using TP#1 as its current time period and TP#2 as the
  1094. * next. The client should be using TP#1 time period.
  1095. */
  1096. { "Sat, 27 Oct 1985 01:00:00 UTC", /* Service valid_after */
  1097. "Sat, 27 Oct 1985 02:00:00 UTC", /* Service valid_until */
  1098. "Sat, 26 Oct 1985 23:00:00 UTC", /* Client valid_after */
  1099. "Sat, 27 Oct 1985 00:00:00 UTC", /* Client valid_until. */
  1100. &current_srv, &previous_srv, /* Service current and previous SRV */
  1101. &previous_srv, NULL, /* Client current and previous SRV */
  1102. hs_get_time_period_num, /* Service time period function. */
  1103. NOT_IN_NEW_TP, /* Is service in new TP? */
  1104. IN_NEW_TP, /* Is client in new TP? */
  1105. DONT_NEED_NEXT_DESC },
  1106. /* Scenario 6
  1107. *
  1108. * +------------------------------------------------------------------+
  1109. * | |
  1110. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1111. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1112. * | |
  1113. * | $==========|-----------$===========|-----------$===========| |
  1114. * | ^ ^ |
  1115. * | C S |
  1116. * +------------------------------------------------------------------+
  1117. *
  1118. * S: Service, C: Client
  1119. *
  1120. * Service consensus valid_after time is set to 13:00 and client to 11:00,
  1121. * which makes the service outside after TP#2 and the client just before.
  1122. * The service should be using TP#1 as its current time period and TP#2 as
  1123. * its next. The client should be using TP#1 time period.
  1124. */
  1125. { "Sat, 27 Oct 1985 13:00:00 UTC", /* Service valid_after */
  1126. "Sat, 27 Oct 1985 14:00:00 UTC", /* Service valid_until */
  1127. "Sat, 27 Oct 1985 11:00:00 UTC", /* Client valid_after */
  1128. "Sat, 27 Oct 1985 12:00:00 UTC", /* Client valid_until. */
  1129. &current_srv, &previous_srv, /* Service current and previous SRV */
  1130. &current_srv, &previous_srv, /* Client current and previous SRV */
  1131. get_previous_time_period, /* Service time period function. */
  1132. IN_NEW_TP, /* Is service in new TP? */
  1133. NOT_IN_NEW_TP, /* Is client in new TP? */
  1134. DONT_NEED_NEXT_DESC },
  1135. /* End marker. */
  1136. { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, 0, 0, 0}
  1137. };
  1138. /* Run a single reachability scenario. num_scenario is the corresponding
  1139. * scenario number from the documentation. It is used to log it in case of
  1140. * failure so we know which scenario fails. */
  1141. static int
  1142. run_reachability_scenario(const reachability_cfg_t *cfg, int num_scenario)
  1143. {
  1144. int ret = -1;
  1145. hs_service_t *service;
  1146. uint64_t service_tp, client_tp;
  1147. ed25519_public_key_t service_blinded_pk, client_blinded_pk;
  1148. setup_reachability_test();
  1149. tt_assert(cfg);
  1150. /* Set service consensus time. */
  1151. set_consensus_times(cfg->service_valid_after,
  1152. &mock_service_ns->valid_after);
  1153. set_consensus_times(cfg->service_valid_until,
  1154. &mock_service_ns->valid_until);
  1155. set_consensus_times(cfg->service_valid_until,
  1156. &mock_service_ns->fresh_until);
  1157. voting_schedule_recalculate_timing(get_options(),
  1158. mock_service_ns->valid_after);
  1159. /* Check that service is in the right time period point */
  1160. tt_int_op(hs_in_period_between_tp_and_srv(mock_service_ns, 0), OP_EQ,
  1161. cfg->service_in_new_tp);
  1162. /* Set client consensus time. */
  1163. set_consensus_times(cfg->client_valid_after,
  1164. &mock_client_ns->valid_after);
  1165. set_consensus_times(cfg->client_valid_until,
  1166. &mock_client_ns->valid_until);
  1167. set_consensus_times(cfg->client_valid_until,
  1168. &mock_client_ns->fresh_until);
  1169. voting_schedule_recalculate_timing(get_options(),
  1170. mock_client_ns->valid_after);
  1171. /* Check that client is in the right time period point */
  1172. tt_int_op(hs_in_period_between_tp_and_srv(mock_client_ns, 0), OP_EQ,
  1173. cfg->client_in_new_tp);
  1174. /* Set the SRVs for this scenario. */
  1175. mock_client_ns->sr_info.current_srv = cfg->client_current_srv;
  1176. mock_client_ns->sr_info.previous_srv = cfg->client_previous_srv;
  1177. mock_service_ns->sr_info.current_srv = cfg->service_current_srv;
  1178. mock_service_ns->sr_info.previous_srv = cfg->service_previous_srv;
  1179. /* Initialize a service to get keys. */
  1180. update_approx_time(mock_service_ns->valid_after);
  1181. service = helper_init_service(mock_service_ns->valid_after+1);
  1182. /*
  1183. * === Client setup ===
  1184. */
  1185. MOCK(networkstatus_get_live_consensus,
  1186. mock_networkstatus_get_live_consensus_client);
  1187. MOCK(networkstatus_get_latest_consensus,
  1188. mock_networkstatus_get_latest_consensus_client);
  1189. /* Make networkstatus_is_live() happy. */
  1190. update_approx_time(mock_client_ns->valid_after);
  1191. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1192. helper_initialize_big_hash_ring(mock_client_ns);
  1193. /* Client ONLY use the current time period. This is the whole point of these
  1194. * reachability test that is to make sure the client can always reach the
  1195. * service using only its current time period. */
  1196. client_tp = hs_get_time_period_num(0);
  1197. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1198. client_tp, &client_blinded_pk);
  1199. hs_get_responsible_hsdirs(&client_blinded_pk, client_tp, 0, 1,
  1200. client_responsible_hsdirs);
  1201. /* Cleanup the nodelist so we can let the service computes its own set of
  1202. * node with its own hashring. */
  1203. cleanup_nodelist();
  1204. tt_int_op(smartlist_len(client_responsible_hsdirs), OP_EQ, 6);
  1205. UNMOCK(networkstatus_get_latest_consensus);
  1206. UNMOCK(networkstatus_get_live_consensus);
  1207. /*
  1208. * === Service setup ===
  1209. */
  1210. MOCK(networkstatus_get_live_consensus,
  1211. mock_networkstatus_get_live_consensus_service);
  1212. MOCK(networkstatus_get_latest_consensus,
  1213. mock_networkstatus_get_latest_consensus_service);
  1214. /* Make networkstatus_is_live() happy. */
  1215. update_approx_time(mock_service_ns->valid_after);
  1216. /* Initialize a big hashring for this consensus with the hsdir index set. */
  1217. helper_initialize_big_hash_ring(mock_service_ns);
  1218. service_tp = cfg->service_time_period_fn(0);
  1219. hs_build_blinded_pubkey(&service->keys.identity_pk, NULL, 0,
  1220. service_tp, &service_blinded_pk);
  1221. /* A service builds two lists of responsible HSDir, for the current and the
  1222. * next descriptor. Depending on the scenario, the client timing indicate if
  1223. * it is fetching the current or the next descriptor so we use the
  1224. * "client_fetch_next_desc" to know which one the client is trying to get to
  1225. * confirm that the service computes the same hashring for the same blinded
  1226. * key and service time period function. */
  1227. hs_get_responsible_hsdirs(&service_blinded_pk, service_tp,
  1228. cfg->client_fetch_next_desc, 0,
  1229. service_responsible_hsdirs);
  1230. cleanup_nodelist();
  1231. tt_int_op(smartlist_len(service_responsible_hsdirs), OP_EQ, 8);
  1232. UNMOCK(networkstatus_get_latest_consensus);
  1233. UNMOCK(networkstatus_get_live_consensus);
  1234. /* Some testing of the values we just got from the client and service. */
  1235. tt_mem_op(&client_blinded_pk, OP_EQ, &service_blinded_pk,
  1236. ED25519_PUBKEY_LEN);
  1237. tt_int_op(are_responsible_hsdirs_equal(), OP_EQ, 1);
  1238. /* Everything went well. */
  1239. ret = 0;
  1240. done:
  1241. cleanup_reachability_test();
  1242. if (ret == -1) {
  1243. /* Do this so we can know which scenario failed. */
  1244. char msg[32];
  1245. tor_snprintf(msg, sizeof(msg), "Scenario %d failed", num_scenario);
  1246. tt_fail_msg(msg);
  1247. }
  1248. return ret;
  1249. }
  1250. static void
  1251. test_reachability(void *arg)
  1252. {
  1253. (void) arg;
  1254. /* NOTE: An important axiom to understand here is that SRV#N must only be
  1255. * used with TP#N value. For example, SRV#2 with TP#1 should NEVER be used
  1256. * together. The HSDir index computation is based on this axiom.*/
  1257. for (int i = 0; reachability_scenarios[i].service_valid_after; ++i) {
  1258. int ret = run_reachability_scenario(&reachability_scenarios[i], i + 1);
  1259. if (ret < 0) {
  1260. return;
  1261. }
  1262. }
  1263. }
  1264. /** Pick an HSDir for service with <b>onion_identity_pk</b> as a client. Put
  1265. * its identity digest in <b>hsdir_digest_out</b>. */
  1266. static void
  1267. helper_client_pick_hsdir(const ed25519_public_key_t *onion_identity_pk,
  1268. char *hsdir_digest_out)
  1269. {
  1270. tt_assert(onion_identity_pk);
  1271. routerstatus_t *client_hsdir = pick_hsdir_v3(onion_identity_pk);
  1272. tt_assert(client_hsdir);
  1273. digest_to_base64(hsdir_digest_out, client_hsdir->identity_digest);
  1274. done:
  1275. ;
  1276. }
  1277. static void
  1278. test_hs_indexes(void *arg)
  1279. {
  1280. int ret;
  1281. uint64_t period_num = 42;
  1282. ed25519_public_key_t pubkey;
  1283. (void) arg;
  1284. /* Build the hs_index */
  1285. {
  1286. uint8_t hs_index[DIGEST256_LEN];
  1287. const char *b32_test_vector =
  1288. "37e5cbbd56a22823714f18f1623ece5983a0d64c78495a8cfab854245e5f9a8a";
  1289. char test_vector[DIGEST256_LEN];
  1290. ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
  1291. strlen(b32_test_vector));
  1292. tt_int_op(ret, OP_EQ, sizeof(test_vector));
  1293. /* Our test vector uses a public key set to 32 bytes of \x42. */
  1294. memset(&pubkey, '\x42', sizeof(pubkey));
  1295. hs_build_hs_index(1, &pubkey, period_num, hs_index);
  1296. tt_mem_op(hs_index, OP_EQ, test_vector, sizeof(hs_index));
  1297. }
  1298. /* Build the hsdir_index */
  1299. {
  1300. uint8_t srv[DIGEST256_LEN];
  1301. uint8_t hsdir_index[DIGEST256_LEN];
  1302. const char *b32_test_vector =
  1303. "db475361014a09965e7e5e4d4a25b8f8d4b8f16cb1d8a7e95eed50249cc1a2d5";
  1304. char test_vector[DIGEST256_LEN];
  1305. ret = base16_decode(test_vector, sizeof(test_vector), b32_test_vector,
  1306. strlen(b32_test_vector));
  1307. tt_int_op(ret, OP_EQ, sizeof(test_vector));
  1308. /* Our test vector uses a public key set to 32 bytes of \x42. */
  1309. memset(&pubkey, '\x42', sizeof(pubkey));
  1310. memset(srv, '\x43', sizeof(srv));
  1311. hs_build_hsdir_index(&pubkey, srv, period_num, hsdir_index);
  1312. tt_mem_op(hsdir_index, OP_EQ, test_vector, sizeof(hsdir_index));
  1313. }
  1314. done:
  1315. ;
  1316. }
  1317. #define EARLY_IN_SRV_TO_TP 0
  1318. #define LATE_IN_SRV_TO_TP 1
  1319. #define EARLY_IN_TP_TO_SRV 2
  1320. #define LATE_IN_TP_TO_SRV 3
  1321. /** Set the consensus and system time based on <b>position</b>. See the
  1322. * following diagram for details:
  1323. *
  1324. * +------------------------------------------------------------------+
  1325. * | |
  1326. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1327. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1328. * | |
  1329. * | $==========|-----------$===========|----------$===========| |
  1330. * | |
  1331. * | |
  1332. * +------------------------------------------------------------------+
  1333. */
  1334. static time_t
  1335. helper_set_consensus_and_system_time(networkstatus_t *ns, int position)
  1336. {
  1337. time_t real_time = 0;
  1338. /* The period between SRV#N and TP#N is from 00:00 to 12:00 UTC. Consensus
  1339. * valid_after is what matters here, the rest is just to specify the voting
  1340. * period correctly. */
  1341. if (position == LATE_IN_SRV_TO_TP) {
  1342. parse_rfc1123_time("Wed, 13 Apr 2016 11:00:00 UTC", &ns->valid_after);
  1343. parse_rfc1123_time("Wed, 13 Apr 2016 12:00:00 UTC", &ns->fresh_until);
  1344. parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->valid_until);
  1345. } else if (position == EARLY_IN_TP_TO_SRV) {
  1346. parse_rfc1123_time("Wed, 13 Apr 2016 13:00:00 UTC", &ns->valid_after);
  1347. parse_rfc1123_time("Wed, 13 Apr 2016 14:00:00 UTC", &ns->fresh_until);
  1348. parse_rfc1123_time("Wed, 13 Apr 2016 16:00:00 UTC", &ns->valid_until);
  1349. } else if (position == LATE_IN_TP_TO_SRV) {
  1350. parse_rfc1123_time("Wed, 13 Apr 2016 23:00:00 UTC", &ns->valid_after);
  1351. parse_rfc1123_time("Wed, 14 Apr 2016 00:00:00 UTC", &ns->fresh_until);
  1352. parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->valid_until);
  1353. } else if (position == EARLY_IN_SRV_TO_TP) {
  1354. parse_rfc1123_time("Wed, 14 Apr 2016 01:00:00 UTC", &ns->valid_after);
  1355. parse_rfc1123_time("Wed, 14 Apr 2016 02:00:00 UTC", &ns->fresh_until);
  1356. parse_rfc1123_time("Wed, 14 Apr 2016 04:00:00 UTC", &ns->valid_until);
  1357. } else {
  1358. tt_assert(0);
  1359. }
  1360. voting_schedule_recalculate_timing(get_options(), ns->valid_after);
  1361. /* Set system time: pretend to be just 2 minutes before consensus expiry */
  1362. real_time = ns->valid_until - 120;
  1363. update_approx_time(real_time);
  1364. done:
  1365. return real_time;
  1366. }
  1367. /** Helper function that carries out the actual test for
  1368. * test_client_service_sync() */
  1369. static void
  1370. helper_test_hsdir_sync(networkstatus_t *ns,
  1371. int service_position, int client_position,
  1372. int client_fetches_next_desc)
  1373. {
  1374. hs_service_descriptor_t *desc;
  1375. int retval;
  1376. /** Test logic:
  1377. * 1) Initialize service time: consensus and system time.
  1378. * 1.1) Initialize service hash ring
  1379. * 2) Initialize service and publish descriptors.
  1380. * 3) Initialize client time: consensus and system time.
  1381. * 3.1) Initialize client hash ring
  1382. * 4) Try to fetch descriptor as client, and CHECK that the HSDir picked by
  1383. * the client was also picked by service.
  1384. */
  1385. /* 1) Initialize service time: consensus and real time */
  1386. time_t now = helper_set_consensus_and_system_time(ns, service_position);
  1387. helper_initialize_big_hash_ring(ns);
  1388. /* 2) Initialize service */
  1389. hs_service_t *service = helper_init_service(now);
  1390. desc = client_fetches_next_desc ? service->desc_next : service->desc_current;
  1391. /* Now let's upload our desc to all hsdirs */
  1392. upload_descriptor_to_all(service, desc);
  1393. /* Cleanup right now so we don't memleak on error. */
  1394. cleanup_nodelist();
  1395. /* Check that previous hsdirs were populated */
  1396. tt_int_op(smartlist_len(desc->previous_hsdirs), OP_EQ, 8);
  1397. /* 3) Initialize client time */
  1398. helper_set_consensus_and_system_time(ns, client_position);
  1399. cleanup_nodelist();
  1400. SMARTLIST_FOREACH(ns->routerstatus_list,
  1401. routerstatus_t *, rs, routerstatus_free(rs));
  1402. smartlist_clear(ns->routerstatus_list);
  1403. helper_initialize_big_hash_ring(ns);
  1404. /* 4) Pick 6 HSDirs as a client and check that they were also chosen by the
  1405. service. */
  1406. for (int y = 0 ; y < 6 ; y++) {
  1407. char client_hsdir_b64_digest[BASE64_DIGEST_LEN+1] = {0};
  1408. helper_client_pick_hsdir(&service->keys.identity_pk,
  1409. client_hsdir_b64_digest);
  1410. /* CHECK: Go through the hsdirs chosen by the service and make sure that it
  1411. * contains the one picked by the client! */
  1412. retval = smartlist_contains_string(desc->previous_hsdirs,
  1413. client_hsdir_b64_digest);
  1414. tt_int_op(retval, OP_EQ, 1);
  1415. }
  1416. /* Finally, try to pick a 7th hsdir and see that NULL is returned since we
  1417. * exhausted all of them: */
  1418. tt_assert(!pick_hsdir_v3(&service->keys.identity_pk));
  1419. done:
  1420. /* At the end: free all services and initialize the subsystem again, we will
  1421. * need it for next scenario. */
  1422. cleanup_nodelist();
  1423. hs_service_free_all();
  1424. hs_service_init();
  1425. SMARTLIST_FOREACH(ns->routerstatus_list,
  1426. routerstatus_t *, rs, routerstatus_free(rs));
  1427. smartlist_clear(ns->routerstatus_list);
  1428. }
  1429. /** This test ensures that client and service will pick the same HSDirs, under
  1430. * various timing scenarios:
  1431. * a) Scenario where both client and service are in the time segment between
  1432. * SRV#N and TP#N:
  1433. * b) Scenario where both client and service are in the time segment between
  1434. * TP#N and SRV#N+1.
  1435. * c) Scenario where service is between SRV#N and TP#N, but client is between
  1436. * TP#N and SRV#N+1.
  1437. * d) Scenario where service is between TP#N and SRV#N+1, but client is
  1438. * between SRV#N and TP#N.
  1439. *
  1440. * This test is important because it tests that upload_descriptor_to_all() is
  1441. * in synch with pick_hsdir_v3(). That's not the case for the
  1442. * test_reachability() test which only compares the responsible hsdir sets.
  1443. */
  1444. static void
  1445. test_client_service_hsdir_set_sync(void *arg)
  1446. {
  1447. networkstatus_t *ns = NULL;
  1448. (void) arg;
  1449. MOCK(networkstatus_get_latest_consensus,
  1450. mock_networkstatus_get_latest_consensus);
  1451. MOCK(networkstatus_get_live_consensus,
  1452. mock_networkstatus_get_live_consensus);
  1453. MOCK(get_or_state,
  1454. get_or_state_replacement);
  1455. MOCK(hs_desc_encode_descriptor,
  1456. mock_hs_desc_encode_descriptor);
  1457. MOCK(directory_initiate_request,
  1458. mock_directory_initiate_request);
  1459. hs_init();
  1460. /* Initialize a big hash ring: we want it to be big so that client and
  1461. * service cannot accidentally select the same HSDirs */
  1462. ns = networkstatus_get_latest_consensus();
  1463. tt_assert(ns);
  1464. /** Now test the various synch scenarios. See the helper function for more
  1465. details: */
  1466. /* a) Scenario where both client and service are in the time segment between
  1467. * SRV#N and TP#N. At this time the client fetches the first HS desc:
  1468. *
  1469. * +------------------------------------------------------------------+
  1470. * | |
  1471. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1472. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1473. * | |
  1474. * | $==========|-----------$===========|----------$===========| |
  1475. * | ^ ^ |
  1476. * | S C |
  1477. * +------------------------------------------------------------------+
  1478. */
  1479. helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, LATE_IN_SRV_TO_TP, 0);
  1480. /* b) Scenario where both client and service are in the time segment between
  1481. * TP#N and SRV#N+1. At this time the client fetches the second HS
  1482. * desc:
  1483. *
  1484. * +------------------------------------------------------------------+
  1485. * | |
  1486. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1487. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1488. * | |
  1489. * | $==========|-----------$===========|-----------$===========| |
  1490. * | ^ ^ |
  1491. * | S C |
  1492. * +------------------------------------------------------------------+
  1493. */
  1494. helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, LATE_IN_TP_TO_SRV, 1);
  1495. /* c) Scenario where service is between SRV#N and TP#N, but client is
  1496. * between TP#N and SRV#N+1. Client is forward in time so it fetches the
  1497. * second HS desc.
  1498. *
  1499. * +------------------------------------------------------------------+
  1500. * | |
  1501. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1502. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1503. * | |
  1504. * | $==========|-----------$===========|-----------$===========| |
  1505. * | ^ ^ |
  1506. * | S C |
  1507. * +------------------------------------------------------------------+
  1508. */
  1509. helper_test_hsdir_sync(ns, LATE_IN_SRV_TO_TP, EARLY_IN_TP_TO_SRV, 1);
  1510. /* d) Scenario where service is between TP#N and SRV#N+1, but client is
  1511. * between SRV#N and TP#N. Client is backwards in time so it fetches the
  1512. * first HS desc.
  1513. *
  1514. * +------------------------------------------------------------------+
  1515. * | |
  1516. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1517. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1518. * | |
  1519. * | $==========|-----------$===========|-----------$===========| |
  1520. * | ^ ^ |
  1521. * | C S |
  1522. * +------------------------------------------------------------------+
  1523. */
  1524. helper_test_hsdir_sync(ns, EARLY_IN_TP_TO_SRV, LATE_IN_SRV_TO_TP, 0);
  1525. /* e) Scenario where service is between SRV#N and TP#N, but client is
  1526. * between TP#N-1 and SRV#3. Client is backwards in time so it fetches
  1527. * the first HS desc.
  1528. *
  1529. * +------------------------------------------------------------------+
  1530. * | |
  1531. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1532. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1533. * | |
  1534. * | $==========|-----------$===========|-----------$===========| |
  1535. * | ^ ^ |
  1536. * | C S |
  1537. * +------------------------------------------------------------------+
  1538. */
  1539. helper_test_hsdir_sync(ns, EARLY_IN_SRV_TO_TP, LATE_IN_TP_TO_SRV, 0);
  1540. /* f) Scenario where service is between TP#N and SRV#N+1, but client is
  1541. * between SRV#N+1 and TP#N+1. Client is forward in time so it fetches
  1542. * the second HS desc.
  1543. *
  1544. * +------------------------------------------------------------------+
  1545. * | |
  1546. * | 00:00 12:00 00:00 12:00 00:00 12:00 |
  1547. * | SRV#1 TP#1 SRV#2 TP#2 SRV#3 TP#3 |
  1548. * | |
  1549. * | $==========|-----------$===========|-----------$===========| |
  1550. * | ^ ^ |
  1551. * | S C |
  1552. * +------------------------------------------------------------------+
  1553. */
  1554. helper_test_hsdir_sync(ns, LATE_IN_TP_TO_SRV, EARLY_IN_SRV_TO_TP, 1);
  1555. done:
  1556. networkstatus_vote_free(ns);
  1557. nodelist_free_all();
  1558. hs_free_all();
  1559. }
  1560. struct testcase_t hs_common_tests[] = {
  1561. { "build_address", test_build_address, TT_FORK,
  1562. NULL, NULL },
  1563. { "validate_address", test_validate_address, TT_FORK,
  1564. NULL, NULL },
  1565. { "time_period", test_time_period, TT_FORK,
  1566. NULL, NULL },
  1567. { "start_time_of_next_time_period", test_start_time_of_next_time_period,
  1568. TT_FORK, NULL, NULL },
  1569. { "responsible_hsdirs", test_responsible_hsdirs, TT_FORK,
  1570. NULL, NULL },
  1571. { "desc_reupload_logic", test_desc_reupload_logic, TT_FORK,
  1572. NULL, NULL },
  1573. { "disaster_srv", test_disaster_srv, TT_FORK,
  1574. NULL, NULL },
  1575. { "hid_serv_request_tracker", test_hid_serv_request_tracker, TT_FORK,
  1576. NULL, NULL },
  1577. { "parse_extended_hostname", test_parse_extended_hostname, TT_FORK,
  1578. NULL, NULL },
  1579. { "time_between_tp_and_srv", test_time_between_tp_and_srv, TT_FORK,
  1580. NULL, NULL },
  1581. { "reachability", test_reachability, TT_FORK,
  1582. NULL, NULL },
  1583. { "client_service_hsdir_set_sync", test_client_service_hsdir_set_sync,
  1584. TT_FORK, NULL, NULL },
  1585. { "hs_indexes", test_hs_indexes, TT_FORK,
  1586. NULL, NULL },
  1587. END_OF_TESTCASES
  1588. };