verify.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. import time
  2. import chutney
  3. def run_test(network):
  4. wait_time = network._dfltEnv['bootstrap_time']
  5. start_time = time.time()
  6. end_time = start_time + wait_time
  7. print("Verifying data transmission: (retrying for up to %d seconds)"
  8. % wait_time)
  9. status = False
  10. # Keep on retrying the verify until it succeeds or times out
  11. while not status and time.time() < end_time:
  12. # TrafficTester connections time out after ~3 seconds
  13. # a TrafficTester times out after ~10 seconds if no data is being sent
  14. status = _verify_traffic(network)
  15. # Avoid madly spewing output if we fail immediately each time
  16. if not status:
  17. time.sleep(5)
  18. print("Transmission: %s" % ("Success" if status else "Failure"))
  19. if not status:
  20. print("Set CHUTNEY_DEBUG to diagnose.")
  21. return status
  22. def _verify_traffic(network):
  23. """Verify (parts of) the network by sending traffic through it
  24. and verify what is received."""
  25. # TODO: IPv6 SOCKSPorts, SOCKSPorts with IPv6Traffic, and IPv6 Exits
  26. LISTEN_ADDR = network._dfltEnv['ip']
  27. LISTEN_PORT = 4747 # FIXME: Do better! Note the default exit policy.
  28. # HSs must have a HiddenServiceDir with
  29. # "HiddenServicePort <HS_PORT> <CHUTNEY_LISTEN_ADDRESS>:<LISTEN_PORT>"
  30. # TODO: Test <CHUTNEY_LISTEN_ADDRESS_V6>:<LISTEN_PORT>
  31. HS_PORT = 5858
  32. # The amount of data to send between each source-sink pair,
  33. # each time the source connects.
  34. # We create a source-sink pair for each (bridge) client to an exit,
  35. # and a source-sink pair for a (bridge) client to each hidden service
  36. DATALEN = network._dfltEnv['data_bytes']
  37. # Print a dot each time a sink verifies this much data
  38. DOTDATALEN = 5 * 1024 * 1024 # Octets.
  39. TIMEOUT = 3 # Seconds.
  40. # Calculate the amount of random data we should use
  41. randomlen = _calculate_randomlen(DATALEN)
  42. reps = _calculate_reps(DATALEN, randomlen)
  43. connection_count = network._dfltEnv['connection_count']
  44. # sanity check
  45. if reps == 0:
  46. DATALEN = 0
  47. # Get the random data
  48. if randomlen > 0:
  49. # print a dot after every DOTDATALEN data is verified, rounding up
  50. dot_reps = _calculate_reps(DOTDATALEN, randomlen)
  51. # make sure we get at least one dot per transmission
  52. dot_reps = min(reps, dot_reps)
  53. with open('/dev/urandom', 'rb') as randfp:
  54. tmpdata = randfp.read(randomlen)
  55. else:
  56. dot_reps = 0
  57. tmpdata = {}
  58. # now make the connections
  59. bind_to = (LISTEN_ADDR, LISTEN_PORT)
  60. tt = chutney.Traffic.TrafficTester(bind_to, tmpdata, TIMEOUT, reps,
  61. dot_reps)
  62. # _env does not implement get() due to its fallback to parent behaviour
  63. client_list = filter(lambda n:
  64. n._env['tag'].startswith('c') or
  65. n._env['tag'].startswith('bc') or
  66. ('client' in n._env.keys() and n._env['client'] == 1),
  67. network._nodes)
  68. exit_list = filter(lambda n:
  69. ('exit' in n._env.keys() and n._env['exit'] == 1),
  70. network._nodes)
  71. hs_list = filter(lambda n:
  72. n._env['tag'].startswith('h') or
  73. ('hs' in n._env.keys() and n._env['hs'] == 1),
  74. network._nodes)
  75. # Make sure these lists are actually lists. (It would probably
  76. # be better to do list comprehensions here.)
  77. client_list = list(client_list)
  78. exit_list = list(exit_list)
  79. hs_list = list(hs_list)
  80. if len(client_list) == 0:
  81. print(" Unable to verify network: no client nodes available")
  82. return False
  83. if len(exit_list) == 0 and len(hs_list) == 0:
  84. print(" Unable to verify network: no exit/hs nodes available")
  85. print(" Exit nodes must be declared 'relay=1, exit=1'")
  86. print(" HS nodes must be declared 'tag=\"hs\"'")
  87. return False
  88. print("Connecting:")
  89. # the number of tor nodes in paths which will send DATALEN data
  90. # if a node is used in two paths, we count it twice
  91. # this is a lower bound, as cannabilised circuits are one node longer
  92. total_path_node_count = 0
  93. total_path_node_count += _configure_exits(tt, bind_to, tmpdata, reps,
  94. client_list, exit_list,
  95. LISTEN_ADDR, LISTEN_PORT,
  96. connection_count)
  97. total_path_node_count += _configure_hs(tt, tmpdata, reps, client_list,
  98. hs_list, HS_PORT, LISTEN_ADDR,
  99. LISTEN_PORT, connection_count,
  100. network._dfltEnv['hs_multi_client'])
  101. print("Transmitting Data:")
  102. start_time = time.time()
  103. status = tt.run()
  104. end_time = time.time()
  105. # if we fail, don't report the bandwidth
  106. if not status:
  107. return status
  108. # otherwise, report bandwidth used, if sufficient data was transmitted
  109. _report_bandwidth(DATALEN, total_path_node_count, start_time, end_time)
  110. return status
  111. # In order to performance test a tor network, we need to transmit
  112. # several hundred megabytes of data or more. Passing around this
  113. # much data in Python has its own performance impacts, so we provide
  114. # a smaller amount of random data instead, and repeat it to DATALEN
  115. def _calculate_randomlen(datalen):
  116. MAX_RANDOMLEN = 128 * 1024 # Octets.
  117. if datalen > MAX_RANDOMLEN:
  118. return MAX_RANDOMLEN
  119. else:
  120. return datalen
  121. def _calculate_reps(datalen, replen):
  122. # sanity checks
  123. if datalen == 0 or replen == 0:
  124. return 0
  125. # effectively rounds datalen up to the nearest replen
  126. if replen < datalen:
  127. return (datalen + replen - 1) / replen
  128. else:
  129. return 1
  130. # if there are any exits, each client / bridge client transmits
  131. # via 4 nodes (including the client) to an arbitrary exit
  132. # Each client binds directly to <CHUTNEY_LISTEN_ADDRESS>:LISTEN_PORT
  133. # via an Exit relay
  134. def _configure_exits(tt, bind_to, tmpdata, reps, client_list, exit_list,
  135. LISTEN_ADDR, LISTEN_PORT, connection_count):
  136. CLIENT_EXIT_PATH_NODES = 4
  137. exit_path_node_count = 0
  138. if len(exit_list) > 0:
  139. exit_path_node_count += (len(client_list) *
  140. CLIENT_EXIT_PATH_NODES *
  141. connection_count)
  142. for op in client_list:
  143. print(" Exit to %s:%d via client %s:%s"
  144. % (LISTEN_ADDR, LISTEN_PORT,
  145. 'localhost', op._env['socksport']))
  146. for _ in range(connection_count):
  147. proxy = ('localhost', int(op._env['socksport']))
  148. tt.add_client(bind_to, proxy)
  149. return exit_path_node_count
  150. # The HS redirects .onion connections made to hs_hostname:HS_PORT
  151. # to the Traffic Tester's CHUTNEY_LISTEN_ADDRESS:LISTEN_PORT
  152. # an arbitrary client / bridge client transmits via 8 nodes
  153. # (including the client and hs) to each hidden service
  154. # Instead of binding directly to LISTEN_PORT via an Exit relay,
  155. # we bind to hs_hostname:HS_PORT via a hidden service connection
  156. def _configure_hs(tt, tmpdata, reps, client_list, hs_list, HS_PORT,
  157. LISTEN_ADDR, LISTEN_PORT, connection_count, hs_multi_client):
  158. CLIENT_HS_PATH_NODES = 8
  159. hs_path_node_count = (len(hs_list) * CLIENT_HS_PATH_NODES *
  160. connection_count)
  161. # Each client in hs_client_list connects to each hs
  162. if hs_multi_client:
  163. hs_client_list = client_list
  164. hs_path_node_count *= len(client_list)
  165. else:
  166. # only use the first client in the list
  167. hs_client_list = client_list[:1]
  168. # Setup the connections from each client in hs_client_list to each hs
  169. for hs in hs_list:
  170. hs_bind_to = (hs._env['hs_hostname'], HS_PORT)
  171. for client in hs_client_list:
  172. print(" HS to %s:%d (%s:%d) via client %s:%s"
  173. % (hs._env['hs_hostname'], HS_PORT,
  174. LISTEN_ADDR, LISTEN_PORT,
  175. 'localhost', client._env['socksport']))
  176. for _ in range(connection_count):
  177. proxy = ('localhost', int(client._env['socksport']))
  178. tt.add_client(hs_bind_to, proxy)
  179. return hs_path_node_count
  180. # calculate the single stream bandwidth and overall tor bandwidth
  181. # the single stream bandwidth is the bandwidth of the
  182. # slowest stream of all the simultaneously transmitted streams
  183. # the overall bandwidth estimates the simultaneous bandwidth between
  184. # all tor nodes over all simultaneous streams, assuming:
  185. # * minimum path lengths (no cannibalized circuits)
  186. # * unlimited network bandwidth (that is, localhost)
  187. # * tor performance is CPU-limited
  188. # This be used to estimate the bandwidth capacity of a CPU-bound
  189. # tor relay running on this machine
  190. def _report_bandwidth(data_length, total_path_node_count, start_time,
  191. end_time):
  192. # otherwise, if we sent at least 5 MB cumulative total, and
  193. # it took us at least a second to send, report bandwidth
  194. MIN_BWDATA = 5 * 1024 * 1024 # Octets.
  195. MIN_ELAPSED_TIME = 1.0 # Seconds.
  196. cumulative_data_sent = total_path_node_count * data_length
  197. elapsed_time = end_time - start_time
  198. if (cumulative_data_sent >= MIN_BWDATA and
  199. elapsed_time >= MIN_ELAPSED_TIME):
  200. # Report megabytes per second
  201. BWDIVISOR = 1024*1024
  202. single_stream_bandwidth = (data_length / elapsed_time / BWDIVISOR)
  203. overall_bandwidth = (cumulative_data_sent / elapsed_time /
  204. BWDIVISOR)
  205. print("Single Stream Bandwidth: %.2f MBytes/s"
  206. % single_stream_bandwidth)
  207. print("Overall tor Bandwidth: %.2f MBytes/s"
  208. % overall_bandwidth)