verify.py 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. import time
  2. import chutney
  3. def run_test(network):
  4. wait_time = network._dfltEnv['bootstrap_time']
  5. start_time = time.time()
  6. end_time = start_time + wait_time
  7. print("Verifying data transmission: (retrying for up to %d seconds)"
  8. % wait_time)
  9. status = False
  10. # Keep on retrying the verify until it succeeds or times out
  11. while not status and time.time() < end_time:
  12. # TrafficTester connections time out after ~3 seconds
  13. # a TrafficTester times out after ~10 seconds if no data is being sent
  14. status = _verify_traffic(network)
  15. # Avoid madly spewing output if we fail immediately each time
  16. if not status:
  17. time.sleep(5)
  18. print("Transmission: %s" % ("Success" if status else "Failure"))
  19. if not status:
  20. # TODO: allow the debug flag to be passed as an argument to
  21. # src/test/test-network.sh and chutney
  22. print("Set 'debug_flag = True' in Traffic.py to diagnose.")
  23. return status
  24. def _verify_traffic(network):
  25. """Verify (parts of) the network by sending traffic through it
  26. and verify what is received."""
  27. # TODO: IPv6 SOCKSPorts, SOCKSPorts with IPv6Traffic, and IPv6 Exits
  28. LISTEN_ADDR = network._dfltEnv['ip']
  29. LISTEN_PORT = 4747 # FIXME: Do better! Note the default exit policy.
  30. # HSs must have a HiddenServiceDir with
  31. # "HiddenServicePort <HS_PORT> <CHUTNEY_LISTEN_ADDRESS>:<LISTEN_PORT>"
  32. # TODO: Test <CHUTNEY_LISTEN_ADDRESS_V6>:<LISTEN_PORT>
  33. HS_PORT = 5858
  34. # The amount of data to send between each source-sink pair,
  35. # each time the source connects.
  36. # We create a source-sink pair for each (bridge) client to an exit,
  37. # and a source-sink pair for a (bridge) client to each hidden service
  38. DATALEN = network._dfltEnv['data_bytes']
  39. # Print a dot each time a sink verifies this much data
  40. DOTDATALEN = 5 * 1024 * 1024 # Octets.
  41. TIMEOUT = 3 # Seconds.
  42. # Calculate the amount of random data we should use
  43. randomlen = _calculate_randomlen(DATALEN)
  44. reps = _calculate_reps(DATALEN, randomlen)
  45. connection_count = network._dfltEnv['connection_count']
  46. # sanity check
  47. if reps == 0:
  48. DATALEN = 0
  49. # Get the random data
  50. if randomlen > 0:
  51. # print a dot after every DOTDATALEN data is verified, rounding up
  52. dot_reps = _calculate_reps(DOTDATALEN, randomlen)
  53. # make sure we get at least one dot per transmission
  54. dot_reps = min(reps, dot_reps)
  55. with open('/dev/urandom', 'r') as randfp:
  56. tmpdata = randfp.read(randomlen)
  57. else:
  58. dot_reps = 0
  59. tmpdata = {}
  60. # now make the connections
  61. bind_to = (LISTEN_ADDR, LISTEN_PORT)
  62. tt = chutney.Traffic.TrafficTester(bind_to, tmpdata, TIMEOUT, reps,
  63. dot_reps)
  64. client_list = filter(lambda n:
  65. n._env['tag'] == 'c' or n._env['tag'] == 'bc',
  66. network._nodes)
  67. exit_list = filter(lambda n:
  68. ('exit' in n._env.keys()) and n._env['exit'] == 1,
  69. network._nodes)
  70. hs_list = filter(lambda n:
  71. n._env['tag'] == 'h',
  72. network._nodes)
  73. if len(client_list) == 0:
  74. print(" Unable to verify network: no client nodes available")
  75. return False
  76. if len(exit_list) == 0 and len(hs_list) == 0:
  77. print(" Unable to verify network: no exit/hs nodes available")
  78. print(" Exit nodes must be declared 'relay=1, exit=1'")
  79. print(" HS nodes must be declared 'tag=\"hs\"'")
  80. return False
  81. print("Connecting:")
  82. # the number of tor nodes in paths which will send DATALEN data
  83. # if a node is used in two paths, we count it twice
  84. # this is a lower bound, as cannabilised circuits are one node longer
  85. total_path_node_count = 0
  86. total_path_node_count += _configure_exits(tt, bind_to, tmpdata, reps,
  87. client_list, exit_list,
  88. LISTEN_ADDR, LISTEN_PORT,
  89. connection_count)
  90. total_path_node_count += _configure_hs(tt, tmpdata, reps, client_list,
  91. hs_list, HS_PORT, LISTEN_ADDR,
  92. LISTEN_PORT, connection_count,
  93. network._dfltEnv['hs_multi_client'])
  94. print("Transmitting Data:")
  95. start_time = time.time()
  96. status = tt.run()
  97. end_time = time.time()
  98. # if we fail, don't report the bandwidth
  99. if not status:
  100. return status
  101. # otherwise, report bandwidth used, if sufficient data was transmitted
  102. _report_bandwidth(DATALEN, total_path_node_count, start_time, end_time)
  103. return status
  104. # In order to performance test a tor network, we need to transmit
  105. # several hundred megabytes of data or more. Passing around this
  106. # much data in Python has its own performance impacts, so we provide
  107. # a smaller amount of random data instead, and repeat it to DATALEN
  108. def _calculate_randomlen(datalen):
  109. MAX_RANDOMLEN = 128 * 1024 # Octets.
  110. if datalen > MAX_RANDOMLEN:
  111. return MAX_RANDOMLEN
  112. else:
  113. return datalen
  114. def _calculate_reps(datalen, replen):
  115. # sanity checks
  116. if datalen == 0 or replen == 0:
  117. return 0
  118. # effectively rounds datalen up to the nearest replen
  119. if replen < datalen:
  120. return (datalen + replen - 1) / replen
  121. else:
  122. return 1
  123. # if there are any exits, each client / bridge client transmits
  124. # via 4 nodes (including the client) to an arbitrary exit
  125. # Each client binds directly to <CHUTNEY_LISTEN_ADDRESS>:LISTEN_PORT
  126. # via an Exit relay
  127. def _configure_exits(tt, bind_to, tmpdata, reps, client_list, exit_list,
  128. LISTEN_ADDR, LISTEN_PORT, connection_count):
  129. CLIENT_EXIT_PATH_NODES = 4
  130. exit_path_node_count = 0
  131. if len(exit_list) > 0:
  132. exit_path_node_count += (len(client_list) *
  133. CLIENT_EXIT_PATH_NODES *
  134. connection_count)
  135. for op in client_list:
  136. print(" Exit to %s:%d via client %s:%s"
  137. % (LISTEN_ADDR, LISTEN_PORT,
  138. 'localhost', op._env['socksport']))
  139. for _ in range(connection_count):
  140. proxy = ('localhost', int(op._env['socksport']))
  141. tt.add(chutney.Traffic.Source(tt, bind_to, tmpdata, proxy,
  142. reps))
  143. return exit_path_node_count
  144. # The HS redirects .onion connections made to hs_hostname:HS_PORT
  145. # to the Traffic Tester's CHUTNEY_LISTEN_ADDRESS:LISTEN_PORT
  146. # an arbitrary client / bridge client transmits via 8 nodes
  147. # (including the client and hs) to each hidden service
  148. # Instead of binding directly to LISTEN_PORT via an Exit relay,
  149. # we bind to hs_hostname:HS_PORT via a hidden service connection
  150. def _configure_hs(tt, tmpdata, reps, client_list, hs_list, HS_PORT,
  151. LISTEN_ADDR, LISTEN_PORT, connection_count, hs_multi_client):
  152. CLIENT_HS_PATH_NODES = 8
  153. hs_path_node_count = (len(hs_list) * CLIENT_HS_PATH_NODES *
  154. connection_count)
  155. # Each client in hs_client_list connects to each hs
  156. if hs_multi_client:
  157. hs_client_list = client_list
  158. hs_path_node_count *= len(client_list)
  159. else:
  160. # only use the first client in the list
  161. hs_client_list = client_list[:1]
  162. # Setup the connections from each client in hs_client_list to each hs
  163. for hs in hs_list:
  164. hs_bind_to = (hs._env['hs_hostname'], HS_PORT)
  165. for client in hs_client_list:
  166. print(" HS to %s:%d (%s:%d) via client %s:%s"
  167. % (hs._env['hs_hostname'], HS_PORT,
  168. LISTEN_ADDR, LISTEN_PORT,
  169. 'localhost', client._env['socksport']))
  170. for _ in range(connection_count):
  171. proxy = ('localhost', int(client._env['socksport']))
  172. tt.add(chutney.Traffic.Source(tt, hs_bind_to, tmpdata,
  173. proxy, reps))
  174. return hs_path_node_count
  175. # calculate the single stream bandwidth and overall tor bandwidth
  176. # the single stream bandwidth is the bandwidth of the
  177. # slowest stream of all the simultaneously transmitted streams
  178. # the overall bandwidth estimates the simultaneous bandwidth between
  179. # all tor nodes over all simultaneous streams, assuming:
  180. # * minimum path lengths (no cannibalized circuits)
  181. # * unlimited network bandwidth (that is, localhost)
  182. # * tor performance is CPU-limited
  183. # This be used to estimate the bandwidth capacity of a CPU-bound
  184. # tor relay running on this machine
  185. def _report_bandwidth(data_length, total_path_node_count, start_time,
  186. end_time):
  187. # otherwise, if we sent at least 5 MB cumulative total, and
  188. # it took us at least a second to send, report bandwidth
  189. MIN_BWDATA = 5 * 1024 * 1024 # Octets.
  190. MIN_ELAPSED_TIME = 1.0 # Seconds.
  191. cumulative_data_sent = total_path_node_count * data_length
  192. elapsed_time = end_time - start_time
  193. if (cumulative_data_sent >= MIN_BWDATA and
  194. elapsed_time >= MIN_ELAPSED_TIME):
  195. # Report megabytes per second
  196. BWDIVISOR = 1024*1024
  197. single_stream_bandwidth = (data_length / elapsed_time / BWDIVISOR)
  198. overall_bandwidth = (cumulative_data_sent / elapsed_time /
  199. BWDIVISOR)
  200. print("Single Stream Bandwidth: %.2f MBytes/s"
  201. % single_stream_bandwidth)
  202. print("Overall tor Bandwidth: %.2f MBytes/s"
  203. % overall_bandwidth)