verify.py 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. import time
  2. import chutney
  3. def run_test(network):
  4. wait_time = network._dfltEnv['bootstrap_time']
  5. start_time = time.time()
  6. end_time = start_time + wait_time
  7. print("Verifying data transmission: (retrying for up to %d seconds)"
  8. % wait_time)
  9. status = False
  10. # Keep on retrying the verify until it succeeds or times out
  11. while not status and time.time() < end_time:
  12. # TrafficTester connections time out after ~3 seconds
  13. # a TrafficTester times out after ~10 seconds if no data is being sent
  14. status = _verify_traffic(network)
  15. # Avoid madly spewing output if we fail immediately each time
  16. if not status:
  17. time.sleep(5)
  18. print("Transmission: %s" % ("Success" if status else "Failure"))
  19. if not status:
  20. print("Set CHUTNEY_DEBUG to diagnose.")
  21. return status
  22. def _verify_traffic(network):
  23. """Verify (parts of) the network by sending traffic through it
  24. and verify what is received."""
  25. # TODO: IPv6 SOCKSPorts, SOCKSPorts with IPv6Traffic, and IPv6 Exits
  26. LISTEN_ADDR = network._dfltEnv['ip']
  27. LISTEN_PORT = 4747 # FIXME: Do better! Note the default exit policy.
  28. # HSs must have a HiddenServiceDir with
  29. # "HiddenServicePort <HS_PORT> <CHUTNEY_LISTEN_ADDRESS>:<LISTEN_PORT>"
  30. # TODO: Test <CHUTNEY_LISTEN_ADDRESS_V6>:<LISTEN_PORT>
  31. HS_PORT = 5858
  32. # The amount of data to send between each source-sink pair,
  33. # each time the source connects.
  34. # We create a source-sink pair for each (bridge) client to an exit,
  35. # and a source-sink pair for a (bridge) client to each hidden service
  36. DATALEN = network._dfltEnv['data_bytes']
  37. # Print a dot each time a sink verifies this much data
  38. DOTDATALEN = 5 * 1024 * 1024 # Octets.
  39. TIMEOUT = 3 # Seconds.
  40. # Calculate the amount of random data we should use
  41. randomlen = _calculate_randomlen(DATALEN)
  42. reps = _calculate_reps(DATALEN, randomlen)
  43. connection_count = network._dfltEnv['connection_count']
  44. # sanity check
  45. if reps == 0:
  46. DATALEN = 0
  47. # Get the random data
  48. if randomlen > 0:
  49. # print a dot after every DOTDATALEN data is verified, rounding up
  50. dot_reps = _calculate_reps(DOTDATALEN, randomlen)
  51. # make sure we get at least one dot per transmission
  52. dot_reps = min(reps, dot_reps)
  53. with open('/dev/urandom', 'r') as randfp:
  54. tmpdata = randfp.read(randomlen)
  55. else:
  56. dot_reps = 0
  57. tmpdata = {}
  58. # now make the connections
  59. bind_to = (LISTEN_ADDR, LISTEN_PORT)
  60. tt = chutney.Traffic.TrafficTester(bind_to, tmpdata, TIMEOUT, reps,
  61. dot_reps)
  62. # _env does not implement get() due to its fallback to parent behaviour
  63. client_list = filter(lambda n:
  64. n._env['tag'].startswith('c') or
  65. n._env['tag'].startswith('bc') or
  66. ('client' in n._env.keys() and n._env['client'] == 1),
  67. network._nodes)
  68. exit_list = filter(lambda n:
  69. ('exit' in n._env.keys() and n._env['exit'] == 1),
  70. network._nodes)
  71. hs_list = filter(lambda n:
  72. n._env['tag'].startswith('h') or
  73. ('hs' in n._env.keys() and n._env['hs'] == 1),
  74. network._nodes)
  75. if len(client_list) == 0:
  76. print(" Unable to verify network: no client nodes available")
  77. return False
  78. if len(exit_list) == 0 and len(hs_list) == 0:
  79. print(" Unable to verify network: no exit/hs nodes available")
  80. print(" Exit nodes must be declared 'relay=1, exit=1'")
  81. print(" HS nodes must be declared 'tag=\"hs\"'")
  82. return False
  83. print("Connecting:")
  84. # the number of tor nodes in paths which will send DATALEN data
  85. # if a node is used in two paths, we count it twice
  86. # this is a lower bound, as cannabilised circuits are one node longer
  87. total_path_node_count = 0
  88. total_path_node_count += _configure_exits(tt, bind_to, tmpdata, reps,
  89. client_list, exit_list,
  90. LISTEN_ADDR, LISTEN_PORT,
  91. connection_count)
  92. total_path_node_count += _configure_hs(tt, tmpdata, reps, client_list,
  93. hs_list, HS_PORT, LISTEN_ADDR,
  94. LISTEN_PORT, connection_count,
  95. network._dfltEnv['hs_multi_client'])
  96. print("Transmitting Data:")
  97. start_time = time.time()
  98. status = tt.run()
  99. end_time = time.time()
  100. # if we fail, don't report the bandwidth
  101. if not status:
  102. return status
  103. # otherwise, report bandwidth used, if sufficient data was transmitted
  104. _report_bandwidth(DATALEN, total_path_node_count, start_time, end_time)
  105. return status
  106. # In order to performance test a tor network, we need to transmit
  107. # several hundred megabytes of data or more. Passing around this
  108. # much data in Python has its own performance impacts, so we provide
  109. # a smaller amount of random data instead, and repeat it to DATALEN
  110. def _calculate_randomlen(datalen):
  111. MAX_RANDOMLEN = 128 * 1024 # Octets.
  112. if datalen > MAX_RANDOMLEN:
  113. return MAX_RANDOMLEN
  114. else:
  115. return datalen
  116. def _calculate_reps(datalen, replen):
  117. # sanity checks
  118. if datalen == 0 or replen == 0:
  119. return 0
  120. # effectively rounds datalen up to the nearest replen
  121. if replen < datalen:
  122. return (datalen + replen - 1) / replen
  123. else:
  124. return 1
  125. # if there are any exits, each client / bridge client transmits
  126. # via 4 nodes (including the client) to an arbitrary exit
  127. # Each client binds directly to <CHUTNEY_LISTEN_ADDRESS>:LISTEN_PORT
  128. # via an Exit relay
  129. def _configure_exits(tt, bind_to, tmpdata, reps, client_list, exit_list,
  130. LISTEN_ADDR, LISTEN_PORT, connection_count):
  131. CLIENT_EXIT_PATH_NODES = 4
  132. exit_path_node_count = 0
  133. if len(exit_list) > 0:
  134. exit_path_node_count += (len(client_list) *
  135. CLIENT_EXIT_PATH_NODES *
  136. connection_count)
  137. for op in client_list:
  138. print(" Exit to %s:%d via client %s:%s"
  139. % (LISTEN_ADDR, LISTEN_PORT,
  140. 'localhost', op._env['socksport']))
  141. for _ in range(connection_count):
  142. proxy = ('localhost', int(op._env['socksport']))
  143. tt.add(chutney.Traffic.Source(tt, bind_to, tmpdata, proxy,
  144. reps))
  145. return exit_path_node_count
  146. # The HS redirects .onion connections made to hs_hostname:HS_PORT
  147. # to the Traffic Tester's CHUTNEY_LISTEN_ADDRESS:LISTEN_PORT
  148. # an arbitrary client / bridge client transmits via 8 nodes
  149. # (including the client and hs) to each hidden service
  150. # Instead of binding directly to LISTEN_PORT via an Exit relay,
  151. # we bind to hs_hostname:HS_PORT via a hidden service connection
  152. def _configure_hs(tt, tmpdata, reps, client_list, hs_list, HS_PORT,
  153. LISTEN_ADDR, LISTEN_PORT, connection_count, hs_multi_client):
  154. CLIENT_HS_PATH_NODES = 8
  155. hs_path_node_count = (len(hs_list) * CLIENT_HS_PATH_NODES *
  156. connection_count)
  157. # Each client in hs_client_list connects to each hs
  158. if hs_multi_client:
  159. hs_client_list = client_list
  160. hs_path_node_count *= len(client_list)
  161. else:
  162. # only use the first client in the list
  163. hs_client_list = client_list[:1]
  164. # Setup the connections from each client in hs_client_list to each hs
  165. for hs in hs_list:
  166. hs_bind_to = (hs._env['hs_hostname'], HS_PORT)
  167. for client in hs_client_list:
  168. print(" HS to %s:%d (%s:%d) via client %s:%s"
  169. % (hs._env['hs_hostname'], HS_PORT,
  170. LISTEN_ADDR, LISTEN_PORT,
  171. 'localhost', client._env['socksport']))
  172. for _ in range(connection_count):
  173. proxy = ('localhost', int(client._env['socksport']))
  174. tt.add(chutney.Traffic.Source(tt, hs_bind_to, tmpdata,
  175. proxy, reps))
  176. return hs_path_node_count
  177. # calculate the single stream bandwidth and overall tor bandwidth
  178. # the single stream bandwidth is the bandwidth of the
  179. # slowest stream of all the simultaneously transmitted streams
  180. # the overall bandwidth estimates the simultaneous bandwidth between
  181. # all tor nodes over all simultaneous streams, assuming:
  182. # * minimum path lengths (no cannibalized circuits)
  183. # * unlimited network bandwidth (that is, localhost)
  184. # * tor performance is CPU-limited
  185. # This be used to estimate the bandwidth capacity of a CPU-bound
  186. # tor relay running on this machine
  187. def _report_bandwidth(data_length, total_path_node_count, start_time,
  188. end_time):
  189. # otherwise, if we sent at least 5 MB cumulative total, and
  190. # it took us at least a second to send, report bandwidth
  191. MIN_BWDATA = 5 * 1024 * 1024 # Octets.
  192. MIN_ELAPSED_TIME = 1.0 # Seconds.
  193. cumulative_data_sent = total_path_node_count * data_length
  194. elapsed_time = end_time - start_time
  195. if (cumulative_data_sent >= MIN_BWDATA and
  196. elapsed_time >= MIN_ELAPSED_TIME):
  197. # Report megabytes per second
  198. BWDIVISOR = 1024*1024
  199. single_stream_bandwidth = (data_length / elapsed_time / BWDIVISOR)
  200. overall_bandwidth = (cumulative_data_sent / elapsed_time /
  201. BWDIVISOR)
  202. print("Single Stream Bandwidth: %.2f MBytes/s"
  203. % single_stream_bandwidth)
  204. print("Overall tor Bandwidth: %.2f MBytes/s"
  205. % overall_bandwidth)