Przeglądaj źródła

Deleted some old unused files

Steven Engler 4 lat temu
rodzic
commit
c5df0b28b0

+ 0 - 35
chutney.patch

@@ -1,35 +0,0 @@
-diff --git a/lib/chutney/TorNet.py b/lib/chutney/TorNet.py
-index bf26978..73f4e5f 100644
---- a/lib/chutney/TorNet.py
-+++ b/lib/chutney/TorNet.py
-@@ -682,7 +682,7 @@ class LocalNodeController(NodeController):
-     def waitOnLaunch(self):
-         """Check whether we can wait() for the tor process to launch"""
-         # TODO: is this the best place for this code?
--        # RunAsDaemon default is 0
-+        # RunAsDaemon default is 1
-         runAsDaemon = False
-         with open(self._getTorrcFname(), 'r') as f:
-             for line in f.readlines():
-@@ -691,7 +691,7 @@ class LocalNodeController(NodeController):
-                 if len(stline) > 0:
-                     splline = stline.split()
-                     # if the line has at least two tokens on it
--                    if (len(splline) > 0 and
-+                    if (len(splline) > 1 and
-                             splline[0].lower() == "RunAsDaemon".lower() and
-                             splline[1] == "1"):
-                         # use the RunAsDaemon value from the torrc
-@@ -874,9 +874,9 @@ class TorEnviron(chutney.Templating.Environ):
-         ocp_line = ('__OwningControllerProcess %d' % (cpid))
-         # if we want to leave the network running, or controlling_pid is 1
-         # (or invalid)
--        if (os.environ.get('CHUTNEY_START_TIME', 0) < 0 or
--            os.environ.get('CHUTNEY_BOOTSTRAP_TIME', 0) < 0 or
--            os.environ.get('CHUTNEY_STOP_TIME', 0) < 0 or
-+        if (int(os.environ.get('CHUTNEY_START_TIME', 0)) < 0 or
-+            int(os.environ.get('CHUTNEY_BOOTSTRAP_TIME', 0)) < 0 or
-+            int(os.environ.get('CHUTNEY_STOP_TIME', 0)) < 0 or
-             cpid <= 1):
-             return '#' + ocp_line
-         else:

+ 0 - 302
src/experiment-errors

@@ -1,302 +0,0 @@
-DEBUG:root:Starting client protocol (id: 55, desc: ('127.0.0.1', 9118) -> 16: ['test003r', 'test030target', 'test048e'])
-DEBUG:root:Starting client protocol (id: 5, desc: ('127.0.0.1', 9068) -> 17: ['test006r', 'test030target', 'test047e'])
-DEBUG:root:Starting client protocol (id: 7, desc: ('127.0.0.1', 9070) -> 18: ['test003r', 'test030target', 'test048e'])
-DEBUG:root:Starting client protocol (id: 15, desc: ('127.0.0.1', 9078) -> 17: ['test024r', 'test030target', 'test059e'])
-
-
-DEBUG:root:Circuit 17 (CONTROLLER, controller=('127.0.0.1', 8068)) CLOSED: FINISHED; None
-
-
-
-ERROR:root:Client protocol id: 55 had an error (07:23:07.419891)
-ERROR:root:Client protocol id: 5 had an error (07:24:56.158961)
-ERROR:root:Client protocol id: 7 had an error (07:24:56.194669)
-ERROR:root:Client protocol id: 15 had an error (07:24:57.395246)
-
-
-
-DEBUG:root:Stream 74 (None, controller=('127.0.0.1', 8119)) FAILED: TIMEOUT; None
-Starting client protocol
-Socket 2565 connected
-
-
-ERROR:root:Client protocol id: 55 had an error (07:23:07.419891)
-
-
-
-Feb 12 07:23:07.184 [info] connection_edge_process_relay_cell_not_open(): 'connected' received for circid 2355205080 streamid 34331 after 8 seconds.
-Feb 12 07:23:07.184 [info] exit circ (length 3): $5AFB76E7105B036D4A5297712ED84F2B4D1D7AF0(open) $C0DE6D7DDCDB12B59FF33E123BA7AB3CA58D7CEA(open) $3A73C6B8BDC470EFBC0BF55D56B59001006E1483(open)
-Feb 12 07:23:07.416 [info] handle_relay_cell_command(): 34: end cell (connection reset) for stream 34331. Removing stream.
-
-
-Feb 12 07:23:40.710 [info] connection_edge_process_relay_cell_not_open(): 'connected' received for circid 2818801185 streamid 24523 after 3 seconds.
-Feb 12 07:23:40.710 [info] exit circ (length 3): $E4E39348FCB5CC9494CFA3FBD6386630B64662A6(open) $C0DE6D7DDCDB12B59FF33E123BA7AB3CA58D7CEA(open) $335338CD4C5E48852859E29D8371FACB4A2155DE(open)
-Feb 12 07:25:27.298 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 16 to 172.19.156.16:5021 (9AB7F01CA3AA2749B2115C49B5CE4596B9169E1B) after 6060 ms. Delta 3ms
-Feb 12 07:25:27.395 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 13 to 172.19.156.16:5009 (9B20E5578F75081B63326B7C8A762DBE081C8256) after 5716 ms. Delta 1ms
-Feb 12 07:25:28.742 [info] handle_relay_cell_command(): 37: end cell (connection reset) for stream 24523. Removing stream.
-Feb 12 07:25:29.558 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 8 to 172.19.156.16:5027 (03813EFF1620E9F45CAB94808011FA966C2A8869) after 6872 ms. Delta 1ms
-Feb 12 07:25:29.940 [info] connection_handle_listener_read(): New SOCKS connection opened from 127.0.0.1
-
-
-
-ERROR:root:Client protocol id: 514 had an error (07:25:28.743687)
-Traceback (most recent call last):
-  File "/home/sengler/code/working/tor-benchmarking/dev/experiment_client.py", line 245, in _run_client
-    protocol.run()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 130, in _run_iteration
-    if self.protocols[self.current_protocol] is None or self.protocols[self.current_protocol].run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/throughput_protocols.py", line 30, in _run_iteration
-    if self.sub_protocol.run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 434, in _run_iteration
-    if self.protocol_helper.recv(self.socket, response_size):
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 39, in recv
-    raise ProtocolException('The socket was closed.')
-basic_protocols.ProtocolException: The socket was closed.
-WARNING:root:Client protocol with error successfully added self to global queue
-
-ERROR:root:Client protocol id: 55 had an error (07:23:07.419891)
-Traceback (most recent call last):
-  File "/home/sengler/code/working/tor-benchmarking/dev/experiment_client.py", line 245, in _run_client
-    protocol.run()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 130, in _run_iteration
-    if self.protocols[self.current_protocol] is None or self.protocols[self.current_protocol].run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/throughput_protocols.py", line 30, in _run_iteration
-    if self.sub_protocol.run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 434, in _run_iteration
-    if self.protocol_helper.recv(self.socket, response_size):
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 39, in recv
-    raise ProtocolException('The socket was closed.')
-basic_protocols.ProtocolException: The socket was closed.
-WARNING:root:Client protocol with error successfully added self to global queue
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-DEBUG:root:Starting client protocol (id: 1379, desc: ('127.0.0.1', 9092) -> 26: ['test002r', 'test030target', 'test043e'])
-
-ERROR:root:Client protocol id: 1379 had an error (07:27:50.439308)
-Traceback (most recent call last):
-  File "/home/sengler/code/working/tor-benchmarking/dev/experiment_client.py", line 245, in _run_client
-    protocol.run()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 130, in _run_iteration
-    if self.protocols[self.current_protocol] is None or self.protocols[self.current_protocol].run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 172, in _run_iteration
-    raise ProtocolException('Could not connect to SOCKS proxy, msg: %x'%(response[1],))
-basic_protocols.ProtocolException: Could not connect to SOCKS proxy, msg: 5b
-
-Feb 12 07:27:50.386 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-Feb 12 07:27:50.414 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-Feb 12 07:27:50.414 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-Feb 12 07:27:50.438 [notice] Tried for 120 seconds to get a connection to 127.0.0.1:12353. Giving up. (waiting for controller)
-Feb 12 07:27:50.440 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-Feb 12 07:27:50.440 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-Feb 12 07:27:50.466 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-
-
-
-
-
-
-DEBUG:root:Starting client protocol (id: 545, desc: ('127.0.0.1', 9158) -> 20: ['test000a', 'test030target', 'test054e'])
-
-ERROR:root:Client protocol id: 545 had an error (07:25:41.147103)
-Traceback (most recent call last):
-  File "/home/sengler/code/working/tor-benchmarking/dev/experiment_client.py", line 245, in _run_client
-    protocol.run()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 130, in _run_iteration
-    if self.protocols[self.current_protocol] is None or self.protocols[self.current_protocol].run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 172, in _run_iteration
-    raise ProtocolException('Could not connect to SOCKS proxy, msg: %x'%(response[1],))
-basic_protocols.ProtocolException: Could not connect to SOCKS proxy, msg: 5b
-
-Feb 12 07:25:38.260 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 9 to 172.19.156.16:5017 (A1986143EF2C1F7EF93742DDBB0AD5119EC33156) after 5924 ms. Delta 0ms
-Feb 12 07:25:38.755 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 5 to 172.19.156.16:5009 (9B20E5578F75081B63326B7C8A762DBE081C8256) after 6728 ms. Delta 3ms
-Feb 12 07:25:41.146 [notice] Tried for 120 seconds to get a connection to 127.0.0.1:12353. Giving up. (waiting for controller)
-Feb 12 07:25:41.666 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 8 to 172.19.156.16:5006 (296F6955D60EBE2EEF910CED0C4F1F00D178951D) after 6848 ms. Delta 3ms
-Feb 12 07:25:41.846 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 12 to 172.19.156.16:5010 (EB9D945D3E554868A8E5F15B375D129D20449E77) after 6028 ms. Delta 4ms
-Feb 12 07:25:43.166 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 11 to 172.19.156.16:5000 (CC455FEC9679E29B17762238281AB07727FEBF47) after 6412 ms. Delta 3ms
-
-
-
-
-
-See: connection_ap_expire_beginning()
-
-
-
-
-
-
-Feb 12 22:28:22.429 [info] connection_handle_listener_read(): New SOCKS connection opened from 127.0.0.1.
-Feb 12 22:28:22.638 [info] connection_edge_process_inbuf(): data from edge while in 'waiting for controller' state. Leaving it on buffer.
-Feb 12 22:28:22.658 [info] rep_hist_note_used_port(): New port prediction added. Will continue predictive circ building for 3394 more seconds.
-Feb 12 22:28:22.658 [info] link_apconn_to_circ(): Looks like completed circuit to hidden service doesn't allow optimistic data for connection to 127.0.0.1
-Feb 12 22:28:22.658 [info] connection_ap_handshake_send_begin(): Sending relay cell 0 on circ 4294765208 to begin stream 5930.
-Feb 12 22:28:22.658 [info] connection_ap_handshake_send_begin(): Address/port sent, ap socket 40, n_circ_id 4294765208
-
-
-Feb 12 22:28:51.633 [info] circuit_mark_for_close_(): Circuit 4294765208 (id: 24) marked for close at src/core/or/circuituse.c:1507 (orig reason: 9, new reason: 0)
-
-DEBUG:root:Starting client protocol (id: 844, desc: ('127.0.0.1', 9157) -> 24: ['test028r', 'test030target', 'test036e'])
-
-ERROR:root:Client protocol id: 844 had an error (22:30:22.999643)
-Traceback (most recent call last):
-  File "/home/sengler/code/working/tor-benchmarking/dev/experiment_client.py", line 246, in _run_client
-    protocol.run()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 130, in _run_iteration
-    if self.protocols[self.current_protocol] is None or self.protocols[self.current_protocol].run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 172, in _run_iteration
-    raise ProtocolException('Could not connect to SOCKS proxy, msg: %x'%(response[1],))
-basic_protocols.ProtocolException: Could not connect to SOCKS proxy, msg: 5b
-
-
-
-
-
-DEBUG:root:Starting client protocol (id: 1245, desc: ('127.0.0.1', 9108) -> 24: ['test014r', 'test030target', 'test045e'])
-
-ERROR:root:Client protocol id: 1245 had an error (23:19:45.882900)
-Traceback (most recent call last):
-  File "/home/sengler/code/working/tor-benchmarking/dev/experiment_client.py", line 246, in _run_client
-    protocol.run()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 130, in _run_iteration
-    if self.protocols[self.current_protocol] is None or self.protocols[self.current_protocol].run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 172, in _run_iteration
-    raise ProtocolException('Could not connect to SOCKS proxy, msg: %x'%(response[1],))
-basic_protocols.ProtocolException: Could not connect to SOCKS proxy, msg: 5b
-
-Feb 12 23:17:45.684 [info] connection_ap_handshake_send_begin(): Sending relay cell 0 on circ 2533565025 to begin stream 56942.
-Feb 12 23:17:45.684 [info] connection_ap_handshake_send_begin(): Address/port sent, ap socket 15, n_circ_id 2533565025
-
-Feb 12 23:17:55.194 [info] update_consensus_networkstatus_downloads(): Launching microdesc standard networkstatus consensus download.
-Feb 12 23:17:55.194 [info] select_primary_guard_for_circuit(): Selected primary guard test006r ($C296AF35EAD984D5419333340E1DE30AE71D8E48) for circuit.
-Feb 12 23:17:55.194 [info] connection_ap_make_link(): Making internal direct tunnel to 172.19.156.16:5006 ...
-Feb 12 23:17:55.194 [info] connection_ap_make_link(): ... application connection created and linked.
-Feb 12 23:17:55.194 [info] directory_send_command(): Downloading consensus from 172.19.156.16:5006 using /tor/status-vote/current/consensus-microdesc/58BF94+BAF767.z
-Feb 12 23:17:55.194 [warn] connection_ap_expire_beginning(): Bug: circuit->purpose == CIRCUIT_PURPOSE_C_GENERAL failed. The purpose on the circuit was Circuit made by controller; it was in state open, path_state new. (on Tor 0.4.2.6 971a6beff5a53434)
-Feb 12 23:17:55.194 [info] connection_ap_expire_beginning(): We tried for 10 seconds to connect to '127.0.0.1' using exit $514B92EF502BA1FD644BBDEEDF35E2CC8F2EF5AA~test045e at 172.19.156.16. Retrying on a new circuit
-
-Feb 12 23:18:18.285 [info] circuit_expire_old_circuits_clientside(): Closing n_circ_id 2533565025 (dirty 633 sec ago, purpose 22)
-Feb 12 23:18:18.286 [info] circuit_mark_for_close_(): Circuit 2533565025 (id: 24) marked for close at src/core/or/circuituse.c:1507 (orig reason: 9, new reason: 0)
-
-Feb 12 23:19:45.881 [notice] Tried for 120 seconds to get a connection to 127.0.0.1:12353. Giving up. (waiting for controller)
-
-
-
-
-
-
-DEBUG:root:Starting client protocol (id: 336, desc: ('127.0.0.1', 9099) -> 18: ['test002r', 'test030target', 'test058e'])
-
-ERROR:root:Client protocol id: 336 had an error (01:05:33.670825)
-Traceback (most recent call last):
-  File "/home/sengler/code/working/tor-benchmarking/dev/experiment_client.py", line 246, in _run_client
-    protocol.run()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 130, in _run_iteration
-    if self.protocols[self.current_protocol] is None or self.protocols[self.current_protocol].run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 172, in _run_iteration
-    raise ProtocolException('Could not connect to SOCKS proxy, msg: %x'%(response[1],))
-basic_protocols.ProtocolException: Could not connect to SOCKS proxy, msg: 5b
-
-Feb 13 01:03:23.054 [info] connection_handle_listener_read(): New SOCKS connection opened from 127.0.0.1.
-Feb 13 01:03:23.132 [info] connection_edge_process_inbuf(): data from edge while in 'waiting for controller' state. Leaving it on buffer.
-Feb 13 01:03:23.146 [info] channelpadding_send_padding_cell_for_callback(): Sending netflow keepalive on 8 to 172.19.156.16:5015 (0ADE7252BDDA535282B306B0A82CBCC9FEDE2C50) after 5108 ms. Delta 2ms
-Feb 13 01:03:23.152 [info] rep_hist_note_used_port(): New port prediction added. Will continue predictive circ building for 1998 more seconds.
-Feb 13 01:03:23.152 [info] link_apconn_to_circ(): Looks like completed circuit to hidden service doesn't allow optimistic data for connection to 127.0.0.1
-Feb 13 01:03:23.152 [info] connection_ap_handshake_send_begin(): Sending relay cell 0 on circ 3126016833 to begin stream 24424.
-Feb 13 01:03:23.152 [info] connection_ap_handshake_send_begin(): Address/port sent, ap socket 36, n_circ_id 3126016833
-
-Feb 13 01:05:32.666 [info] connection_ap_expire_beginning(): Controller circuit has tried for 129 seconds to connect to '127.0.0.1' using exit $56D109A57085FEAF4B7EBC789C30E13EECEDBCC8~test058e at 17
-2.19.156.16. But not giving up!
-Feb 13 01:05:33.478 [info] connection_ap_process_end_not_open(): Address '127.0.0.1' refused due to 'misc error'. Considering retrying.
-Feb 13 01:05:33.478 [info] client_dns_incr_failures(): Address 127.0.0.1 now has 1 resolve failures.
-Feb 13 01:05:33.478 [info] exit circ (length 3): $610EC312C40076330EBB4C72522995C03D8C442A(open) $B8F98F5966E41AAB5DCDEED7ECEF637644ACF11C(open) $56D109A57085FEAF4B7EBC789C30E13EECEDBCC8(open)
-Feb 13 01:05:33.670 [notice] Tried for 130 seconds to get a connection to 127.0.0.1:12353. Giving up. (waiting for controller)
-
-Feb 13 01:06:01.782 [info] circuit_expire_old_circuits_clientside(): Closing n_circ_id 3126016833 (dirty 758 sec ago, purpose 22)
-Feb 13 01:06:01.782 [info] circuit_mark_for_close_(): Circuit 3126016833 (id: 18) marked for close at src/core/or/circuituse.c:1507 (orig reason: 9, new reason: 0)
-
-
-
-
-
-
-DEBUG:root:Starting client protocol (id: 1084, desc: ('127.0.0.1', 9097) -> 23: ['test021r', 'test030target', 'test045e'])
-
-ERROR:root:Client protocol id: 1084 had an error (22:13:40.143767)
-Traceback (most recent call last):
-  File "/home/sengler/code/working/tor-benchmarking/dev/experiment_client.py", line 257, in _run_client
-    protocol.run()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 130, in _run_iteration
-    if self.protocols[self.current_protocol] is None or self.protocols[self.current_protocol].run():
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 71, in run
-    finished = self._run_iteration()
-  File "/home/sengler/code/working/tor-benchmarking/dev/basic_protocols.py", line 172, in _run_iteration
-    raise ProtocolException('Could not connect to SOCKS proxy, msg: %x'%(response[1],))
-basic_protocols.ProtocolException: Could not connect to SOCKS proxy, msg: 5b
-
-
-Feb 15 22:11:40.074 [info] connection_handle_listener_read(): New SOCKS connection opened from 127.0.0.1.
-Feb 15 22:11:40.288 [info] connection_edge_process_inbuf(): data from edge while in 'waiting for controller' state. Leaving it on buffer.
-Feb 15 22:11:40.308 [info] rep_hist_note_used_port(): New port prediction added. Will continue predictive circ building for 3043 more seconds.
-Feb 15 22:11:40.308 [info] link_apconn_to_circ(): Looks like completed circuit to hidden service doesn't allow optimistic data for connection to 127.0.0.1
-Feb 15 22:11:40.308 [info] connection_ap_handshake_send_begin(): Sending relay cell 0 on circ 3202562382 to begin stream 29645.
-Feb 15 22:11:40.308 [info] connection_ap_handshake_send_begin(): Address/port sent, ap socket 4, n_circ_id 3202562382
-
-Feb 15 22:12:16.809 [info] circuit_expire_old_circuits_clientside(): Closing n_circ_id 3202562382 (dirty 636 sec ago, purpose 22)
-Feb 15 22:12:16.809 [info] circuit_mark_for_close_(): Circuit 3202562382 (id: 23) marked for close at src/core/or/circuituse.c:1507 (orig reason: 9, new reason: 0)
-Feb 15 22:12:16.810 [info] circuit_free_(): Circuit 0 (id: 23) has been freed.
-
-Feb 15 22:13:40.141 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-Feb 15 22:13:40.141 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-Feb 15 22:13:40.142 [notice] Tried for 120 seconds to get a connection to 127.0.0.1:12353. Giving up. (waiting for controller)
-Feb 15 22:13:40.147 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.
-Feb 15 22:13:40.147 [info] connection_edge_package_raw_inbuf(): called with package_window 0. Skipping.

+ 0 - 21
src/old-network-settings

@@ -1,21 +0,0 @@
-                    if host == 'cluck2':
-                        num_clients = 150
-                        num_guards = 28 # number of relays (including guards)
-                        num_authorities = 2 # will also act as a relay or guard
-                        num_exits = 32 # will be used only as an exit
-                        num_streams_per_client = 10
-                        num_bytes = 20*(2**20)
-                    elif host == 'sengler-rpi':
-                        num_clients = 24
-                        num_guards = 28 # number of relays (including guards)
-                        num_authorities = 2 # will also act as a relay or guard
-                        num_exits = 32 # will be used only as an exit
-                        num_streams_per_client = 8
-                        num_bytes = 10*(2**20)
-                    elif host is None:
-                        num_clients = 10
-                        num_guards = 10 # number of relays (including guards)
-                        num_authorities = 2 # will also act as a relay or guard
-                        num_exits = 12 # will be used only as an exit
-                        num_streams_per_client = 5
-                        num_bytes = 20*(2**20)

+ 0 - 167
src/old/correctness_tester.py

@@ -1,167 +0,0 @@
-#!/usr/bin/python3
-#
-import basic_protocols
-import logging
-import enum
-import time
-import socket
-#
-class ClientConnectionProtocol(basic_protocols.Protocol):
-	def __init__(self, endpoint, data, proxy=None, username=None):
-		self.endpoint = endpoint
-		self.data = data
-		self.proxy = proxy
-		self.username = username
-		#
-		self.states = enum.Enum('CLIENT_CONN_STATES', 'READY_TO_BEGIN CONNECT_TO_PROXY SEND_DATA DONE')
-		self.state = self.states.READY_TO_BEGIN
-		#
-		self.socket = socket.socket()
-		self.sub_protocol = None
-		#
-		if self.proxy is None:
-			logging.debug('Socket %d connecting to endpoint %r...', self.socket.fileno(), self.endpoint)
-			self.socket.connect(self.endpoint)
-		else:
-			logging.debug('Socket %d connecting to proxy %r...', self.socket.fileno(), self.proxy)
-			self.socket.connect(self.proxy)
-		#
-	#
-	def _run_iteration(self, block=True):
-		if self.state is self.states.READY_TO_BEGIN:
-			if self.proxy is None:
-				self.sub_protocol = basic_protocols.SendDataProtocol(self.socket, self.data)
-				self.state = self.states.SEND_DATA
-			else:
-				#self.sub_protocol = basic_protocols.Socks4Protocol(self.socket, self.endpoint, username=self.username)
-				self.sub_protocol = basic_protocols.WeirdProtocol(self.socket, self.endpoint)
-				self.state = self.states.CONNECT_TO_PROXY
-			#
-		#
-		if self.state is self.states.CONNECT_TO_PROXY:
-			if self.sub_protocol.run(block=block):
-				self.sub_protocol = basic_protocols.SendDataProtocol(self.socket, self.data)
-				self.state = self.states.SEND_DATA
-			#
-		#
-		if self.state is self.states.SEND_DATA:
-			if self.sub_protocol.run(block=block):
-				self.state = self.states.DONE
-				return True
-			#
-		#
-		return False
-	#
-#
-class ServerConnectionProtocol(basic_protocols.Protocol):
-	def __init__(self, socket, conn_id, data_callback=None):
-		self.socket = socket
-		self.conn_id = conn_id
-		self.data_callback = data_callback
-		#
-		self.states = enum.Enum('SERVER_CONN_STATES', 'READY_TO_BEGIN RECV_DATA DONE')
-		self.state = self.states.READY_TO_BEGIN
-		#
-		self.sub_protocol = None
-	#
-	def _run_iteration(self, block=True):
-		if self.state is self.states.READY_TO_BEGIN:
-			self.sub_protocol = basic_protocols.ReceiveDataProtocol(self.socket)
-			self.state = self.states.RECV_DATA
-		#
-		if self.state is self.states.RECV_DATA:
-			if self.sub_protocol.run(block=block):
-				self.data_callback(self.conn_id, self.sub_protocol.received_data)
-				self.state = self.states.DONE
-				return True
-			#
-		#
-		return False
-	#
-#
-if __name__ == '__main__':
-	import sys
-	logging.basicConfig(level=logging.DEBUG)
-	#
-	import random
-	random.seed(10)
-	data_to_send = bytearray(random.getrandbits(8) for _ in range(1024*1024*100))
-	#
-	print('Generated bytes')
-	#
-	if sys.argv[1] == 'client':
-		import os
-		#
-		endpoint = ('127.0.0.1', 4747)
-		#endpoint = ('127.0.0.1', 8627)
-		#proxy = ('127.0.0.1', 9003+int(sys.argv[3])-1)
-		#proxy = ('127.0.0.1', 9003)
-		proxy = ('127.0.0.1', 12849)
-		#proxy = None
-		username = bytes([x for x in os.urandom(12) if x != 0])
-		#username = None
-		#
-		client = ClientConnectionProtocol(endpoint, data_to_send, proxy=proxy, username=username)
-		client.run()
-		#
-	elif sys.argv[1] == 'server':
-		import multiprocessing
-		import queue
-		#
-		endpoint = ('127.0.0.1', 4747)
-		processes = []
-		processes_map = {}
-		joinable_connections = multiprocessing.Queue()
-		conn_counter = [0]
-		group_queue = multiprocessing.Queue()
-		bw_queue = multiprocessing.Queue()
-		#
-		def data_callback(conn_id, data):
-			# check data here
-			print('Received {} MB'.format(len(data)/(1024**2)))
-			print('Data matches: {}'.format(data==data_to_send))
-		#
-		def start_server_conn(socket, conn_id):
-			server = ServerConnectionProtocol(socket, conn_id, data_callback=data_callback)
-			try:
-				server.run()
-			except KeyboardInterrupt:
-				socket.close()
-			finally:
-				joinable_connections.put(conn_id)
-			#
-		#
-		def accept_callback(socket):
-			conn_id = conn_counter[0]
-			conn_counter[0] += 1
-			#logging.debug('Adding connection %d', conn_id)
-			p = multiprocessing.Process(target=start_server_conn, args=(socket, conn_id))
-			processes.append(p)
-			processes_map[conn_id] = p
-			p.start()
-			socket.close()
-			# close this process' copy of the socket
-		#
-		l = basic_protocols.ServerListener(endpoint, accept_callback)
-		#
-		try:
-			while True:
-				l.accept()
-				try:
-					while True:
-						conn_id = joinable_connections.get(False)
-						p = processes_map[conn_id]
-						p.join()
-					#
-				except queue.Empty:
-					pass
-				#
-			#
-		except KeyboardInterrupt:
-			print()
-		#
-		for p in processes:
-			p.join()
-		#
-	#
-#

+ 0 - 22
src/old/start_clients.sh

@@ -1,22 +0,0 @@
-#!/bin/bash
-
-START_TIME="$(python3 -c 'import time; print(int(time.time())+5)')"
-
-for i in {1..30}; do
-	echo "Starting client ${i}..."
-	python3 throughput_client.py 127.0.0.1 4373 --wait "${START_TIME}" &
-	pids[${i}]=$!
-	python3 -c "import time; time.sleep(0.01)"
-done
-
-trap stop_everything INT
-
-function stop_everything(){
-	for pid in ${pids[*]}; do
-		kill $pid
-	done
-}
-
-for pid in ${pids[*]}; do
-	wait $pid
-done

+ 0 - 23
src/old/stem_test.py

@@ -1,23 +0,0 @@
-import stem.descriptor.remote
-
-
-#try:
-#  for desc in stem.descriptor.remote.get_consensus():
-#    print("found relay %s (%s)" % (desc.nickname, desc.fingerprint))
-#except Exception as exc:
-#  print("Unable to retrieve the consensus: %s" % exc
-
-endpoint = ('127.0.0.1', 8080)
-
-try:
-  consensus = stem.descriptor.remote.get_consensus(
-    endpoints = (stem.DirPort('127.0.0.1', 7000),)
-  )
-  print('got consensus: {}'.format(consensus))
-  for desc in consensus:
-    print("found relay %s (%s) - %s" % (desc.nickname, desc.fingerprint, desc.exit_policy))
-
-  for x in [desc for desc in consensus if desc.exit_policy.can_exit_to(*endpoint)]:
-    print('%s' % (x.nickname))
-except Exception as exc:
-  print("Unable to retrieve the consensus: %s" % exc)

+ 0 - 237
src/old/stress_tester.py

@@ -1,237 +0,0 @@
-#!/usr/bin/python3
-#
-import throughput_protocols
-import basic_protocols
-import useful
-import time
-import os
-import argparse
-import logging
-import socket
-import random
-import multiprocessing
-import stem.control
-import stem.descriptor.remote
-import base64
-import binascii
-#
-logging.getLogger('stem').setLevel(logging.WARNING)
-#
-def start_client_process(protocol, id_num, finished_queue):
-	p = multiprocessing.Process(target=run_client, args=(protocol, id_num, finished_queue))
-	p.start()
-	return p
-#
-def run_client(protocol, id_num, finished_queue):
-	try:
-		print('Starting protocol (id: {})'.format(id_num))
-		protocol.run()
-		print('Done protocol (id: {})'.format(id_num))
-	finally:
-		finished_queue.put(id_num)
-	#
-#
-def parse_range(range_str):
-	return tuple(int(x) for x in range_str.split('-'))
-#
-def get_socks_ports(control_ports):
-	ports = []
-	#
-	for x in control_ports:
-		#print(x)
-		with stem.control.Controller.from_port(port=x) as controller:
-			controller.authenticate()
-			#
-			socks_addresses = controller.get_listeners(stem.control.Listener.SOCKS)
-			#print(socks_addresses)
-			assert(len(socks_addresses) == 1)
-			assert(socks_addresses[0][0] == '127.0.0.1')
-			#
-			ports.append(socks_addresses[0][1])
-		#
-	#
-	return ports
-#
-if __name__ == '__main__':
-	logging.basicConfig(level=logging.DEBUG)
-	#
-	parser = argparse.ArgumentParser(description='Test the network throughput (optionally through a proxy).')
-	parser.add_argument('ip', type=str, help='destination ip address')
-	parser.add_argument('port', type=int, help='destination port')
-	parser.add_argument('num_bytes', type=useful.parse_bytes,
-	                    help='number of bytes to send (can also end with \'B\', \'KiB\', \'MiB\', or \'GiB\')', metavar='num-bytes')
-	parser.add_argument('proxy_control_port_range', type=parse_range, help='range of ports for the control ports')
-	#parser.add_argument('--proxy', type=str, help='proxy ip address and port', metavar=('ip','port'), nargs=2)
-	#parser.add_argument('--fake-proxy', action='store_true', help='connecting to a fake-tor proxy')
-	parser.add_argument('--wait', type=int,
-	                    help='wait until the given time before pushing data (time in seconds since epoch)', metavar='time')
-	parser.add_argument('--buffer-len', type=useful.parse_bytes,
-	                    help='size of the send and receive buffers (can also end with \'B\', \'KiB\', \'MiB\', or \'GiB\')', metavar='bytes')
-	parser.add_argument('--no-accel', action='store_true', help='don\'t use C acceleration (use pure Python)')
-	args = parser.parse_args()
-	#
-	endpoint = (args.ip, args.port)
-	proxy_control_ports = list(range(args.proxy_control_port_range[0], args.proxy_control_port_range[1]+1))
-	#
-	streams_per_client = 3
-	#
-	socks_ports = get_socks_ports(proxy_control_ports)
-	#
-	try:
-		consensus = stem.descriptor.remote.get_consensus(endpoints=(stem.DirPort('127.0.0.1', 7000),))
-		#
-		relay_fingerprints = [desc.fingerprint for desc in consensus]
-		exit_fingerprints = [desc.fingerprint for desc in consensus if desc.exit_policy.can_exit_to(*endpoint)]
-	except Exception as e:
-		raise Exception('Unable to retrieve the consensus') from e
-	#
-	print('Num socks ports: {}'.format(len(socks_ports)))
-	print('Num relays: {}'.format(len(relay_fingerprints)))
-	print('Num exits: {}'.format(len(exit_fingerprints)))
-	#
-	assert(len(relay_fingerprints) >= len(socks_ports)*3+1)
-	assert(len(exit_fingerprints) >= len(socks_ports)+1)
-	#
-	remaining_relays = list(relay_fingerprints)
-	#
-	target_relay = exit_fingerprints[0]
-	remaining_relays = list(set(remaining_relays)-set([target_relay]))
-	exit_relays = exit_fingerprints[1:1+len(socks_ports)]
-	remaining_relays = list(set(remaining_relays)-set(exit_fingerprints))
-	guard_relays = remaining_relays[:len(socks_ports)]
-	remaining_relays = list(set(remaining_relays)-set(guard_relays))
-	middle_relays = remaining_relays[:len(socks_ports)]
-	remaining_relays = list(set(remaining_relays)-set(middle_relays))
-	#
-	exit_relays = list(exit_relays)
-	guard_relays = list(guard_relays)
-	#
-	controllers = []
-	#
-	controller_circuits = {}
-	fraction_middle = 1
-	#
-	for x in range(len(proxy_control_ports)):
-		#with stem.control.Controller.from_port(port=x) as controller:
-		controller = stem.control.Controller.from_port(port=proxy_control_ports[x])
-		controller.authenticate()
-		#
-		controller_circuits[controller] = []
-		for y in range(streams_per_client):
-			if (x*streams_per_client+y)/(len(proxy_control_ports)*streams_per_client+y) < fraction_middle:
-				circuit = [random.choice(guard_relays), target_relay, random.choice(exit_relays)]
-			else:
-				circuit = [random.choice(guard_relays), random.choice(middle_relays), target_relay]
-			#
-			#circuit = [random.choice(guard_relays), random.choice(middle_relays), random.choice(exit_relays)]
-			#circuit = [middle_relay, random.choice(exit_relays), random.choice(guard_relays)]
-			#circuit = [random.choice(exit_relays), random.choice(guard_relays), middle_relay]
-			print('New circuit #{}'.format(y))
-			print(circuit)
-			circuit_id = controller.new_circuit(circuit, await_build=True)
-			controller_circuits[controller].append(circuit_id)
-		#
-		def attach_stream(stream, controller):
-			#print(stream)
-			#print(controller)
-			#print(circuit_id)
-			if stream.status == 'NEW' and stream.purpose == 'USER':
-				print('Attaching (num_remaining={})'.format(len(controller_circuits[controller])-1))
-				#controller.attach_stream(stream.id, circuit_id)
-				controller.attach_stream(stream.id, controller_circuits[controller][0])
-				controller_circuits[controller] = controller_circuits[controller][1:]
-			#
-		#
-		controller.add_event_listener(lambda x, controller=controller: attach_stream(x, controller), stem.control.EventType.STREAM)
-		controller.set_conf('__LeaveStreamsUnattached', '1')
-		controllers.append(controller)
-		#
-	#
-	processes = {}
-	process_counter = 0
-	finished_processes = multiprocessing.Queue()
-	#
-	for y in range(streams_per_client):
-		for x in socks_ports:
-			client_socket = socket.socket()
-			protocols = []
-			#
-			proxy_username = bytes([z for z in os.urandom(12) if z != 0])
-			proxy_endpoint = ('127.0.0.1', x)
-			#
-			logging.debug('Socket %d connecting to proxy %r...', client_socket.fileno(), proxy_endpoint)
-			client_socket.connect(proxy_endpoint)
-			logging.debug('Socket %d connected', client_socket.fileno())
-			#
-			proxy_protocol = basic_protocols.Socks4Protocol(client_socket, endpoint, username=proxy_username)
-			protocols.append(proxy_protocol)
-			#
-			throughput_protocol = throughput_protocols.ClientProtocol(client_socket, args.num_bytes,
-														 wait_until=args.wait,
-														 send_buffer_len=args.buffer_len,
-														 use_acceleration=(not args.no_accel))
-			protocols.append(throughput_protocol)
-			#
-			combined_protocol = basic_protocols.ChainedProtocol(protocols)
-			processes[process_counter] = start_client_process(combined_protocol, process_counter, finished_processes)
-			process_counter += 1
-			client_socket.close()
-			#
-			time.sleep(0.01)
-		#
-	#
-	print('Starting in {:.2f} seconds'.format(args.wait-time.time()))
-	#
-	try:
-		while len(processes) > 0:
-			print('Waiting for processes ({} left)'.format(len(processes)))
-			p_id = finished_processes.get()
-			p = processes[p_id]
-			p.join()
-			processes.pop(p_id)
-		#
-	except KeyboardInterrupt as e:
-		print()
-		for p_id in processes:
-			processes[p_id].terminate()
-		#
-	#
-	print('Processes finished')
-	#
-	for c in controllers:
-		c.close()
-	#
-#
-
-
-
-# old code, keeping just in case
-
-	'''
-	with stem.control.Controller.from_port(port=proxy_control_ports[0]) as controller:
-		controller.authenticate()
-		#print(controller.get_version())
-		#print(stem.version.Requirement.GETINFO_MICRODESCRIPTORS)
-		#print(controller.get_version() >= stem.version.Requirement.GETINFO_MICRODESCRIPTORS)
-		#print('-------')
-		#print([x.exit_policy for x in controller.get_network_statuses()])
-		relay_fingerprints = list(set([desc.fingerprint for desc in controller.get_network_statuses()]))
-		#print(relay_fingerprints)
-		relay_digest_map = {desc.digest: desc.fingerprint for desc in controller.get_network_statuses()}
-		print(relay_digest_map)
-		relay_exit_digests = list(set([desc.digest for desc in controller.get_microdescriptors() if desc.exit_policy.can_exit_to(*endpoint)]))
-		#print(relay_exit_digests)
-		print([desc.microdescriptor_digest for desc in controller.get_microdescriptors() if desc.exit_policy.can_exit_to(*endpoint)])
-		print([binascii.hexlify(base64.b64decode(digest()+'===')).decode('utf-8').upper()[:40] for digest in relay_exit_digests])
-		relay_exits = list(set([relay_digest_map[binascii.hexlify(base64.b64decode(digest()+'===')).decode('utf-8').upper()[:40]] for digest in relay_exit_digests]))
-		#print(relay_exits)
-		#
-		#print(dir(list(controller.get_network_statuses())[0]))
-		#print(type(list(controller.get_network_statuses())[0]))
-		#print([desc for desc in controller.get_microdescriptors()])
-		#print([desc.exit_policy for desc in controller.get_microdescriptors()])
-		#print([desc.exit_policy.can_exit_to(*endpoint) for desc in controller.get_microdescriptors()])
-		#print([desc.fingerprint for desc in controller.get_microdescriptors()])
-		#print([desc.flags for desc in controller.get_microdescriptors()])
-	#
-	'''

+ 0 - 274
src/throughput_server.old.py

@@ -1,274 +0,0 @@
-#!/usr/bin/python3
-#
-import throughput_protocols
-import basic_protocols
-import os
-import multiprocessing
-import threading
-import queue
-import logging
-import argparse
-#
-def overlap_byte_counters(byte_counters):
-	start_time = None
-	finish_time = None
-	for x in byte_counters:
-		if start_time is None or x['start_time'] < start_time:
-			start_time = x['start_time']
-		#
-		if finish_time is None or x['start_time']+len(x['history']) > finish_time:
-			finish_time = x['start_time']+len(x['history'])
-		#
-	#
-	total_history = [0]*(finish_time-start_time)
-	#
-	for x in byte_counters:
-		for y in range(len(x['history'])):
-			total_history[(x['start_time']-start_time)+y] += x['history'][y]
-		#
-	#
-	return total_history
-#
-if __name__ == '__main__':
-	logging.basicConfig(level=logging.DEBUG)
-	#
-	parser = argparse.ArgumentParser(description='Test the network throughput (optionally through a proxy).')
-	parser.add_argument('port', type=int, help='listen on port')
-	parser.add_argument('--no-accel', action='store_true', help='don\'t use C acceleration (use pure Python)')
-	parser.add_argument('--localhost', action='store_true', help='bind to 127.0.0.1 instead of 0.0.0.0')
-	args = parser.parse_args()
-	#
-	if args.localhost:
-		endpoint = ('127.0.0.1', args.port)
-	else:
-		endpoint = ('0.0.0.0', args.port)
-	#
-	processes = []
-	processes_map = {}
-	joinable_connections = multiprocessing.Queue()
-	joinable_connections_list = []
-	conn_counter = [0]
-	group_queue = multiprocessing.Queue()
-	group_queue_list = []
-	bw_queue = multiprocessing.Queue()
-	bw_queue_list = []
-	#
-	def group_id_callback(conn_id, group_id):
-		# put them in a queue to display later
-		#logging.debug('For conn %d Received group id: %d', conn_id, group_id)
-		group_queue.put({'conn_id':conn_id, 'group_id':group_id})
-	#
-	#def bw_callback(conn_id, data_size, time_first_byte, time_last_byte, transfer_rate, byte_counter, byte_counter_start_time):
-	def bw_callback(conn_id, custom_data, data_size, time_first_byte, time_last_byte, transfer_rate, deltas):
-		# put them in a queue to display later
-		#bw_queue.put({'conn_id':conn_id, 'data_size':data_size, 'time_of_first_byte':time_first_byte, 'time_of_last_byte':time_last_byte, 'transfer_rate':transfer_rate, 'byte_counter':byte_counter, 'byte_counter_start_time':byte_counter_start_time})
-		bw_queue.put({'conn_id':conn_id, 'custom_data':custom_data, 'data_size':data_size, 'time_of_first_byte':time_first_byte, 'time_of_last_byte':time_last_byte, 'transfer_rate':transfer_rate, 'deltas':deltas})
-	#
-	def start_server_conn(socket, conn_id):
-		server = throughput_protocols.ServerProtocol(socket, conn_id, group_id_callback=group_id_callback,
-		                                             bandwidth_callback=bw_callback, use_acceleration=(not args.no_accel))
-		try:
-			server.run()
-		except KeyboardInterrupt:
-			socket.close()
-		finally:
-			joinable_connections.put(conn_id)
-			'''
-			while True:
-				# while we're waiting to join, we might get a KeyboardInterrupt,
-				# in which case we cannot let the process end since it will kill
-				# the queue threads, which may be waiting to push data to the pipe
-				try:
-					joinable_connections.close()
-					group_queue.close()
-					bw_queue.close()
-					#
-					group_queue.join_thread()
-					bw_queue.join_thread()
-					joinable_connections.join_thread()
-					#
-					break
-				except KeyboardInterrupt:
-					pass
-				#
-			#
-			'''
-		#
-	#
-	def accept_callback(socket):
-		conn_id = conn_counter[0]
-		conn_counter[0] += 1
-		#logging.debug('Adding connection %d', conn_id)
-		p = multiprocessing.Process(target=start_server_conn, args=(socket, conn_id))
-		processes.append(p)
-		processes_map[conn_id] = p
-		p.start()
-		socket.close()
-		# close this process' copy of the socket
-	#
-	def unqueue(q, l, print_len=False):
-		while True:
-			val = q.get()
-			if val is None:
-				break
-			#
-			l.append(val)
-			if print_len:
-				print('Queue length: {}'.format(len(l)), end='\r')
-			#
-		#
-	#
-	l = basic_protocols.ServerListener(endpoint, accept_callback)
-	#
-	t_joinable_connections = threading.Thread(target=unqueue, args=(joinable_connections, joinable_connections_list))
-	t_group_queue = threading.Thread(target=unqueue, args=(group_queue, group_queue_list))
-	t_bw_queue = threading.Thread(target=unqueue, args=(bw_queue, bw_queue_list, True))
-	#
-	t_joinable_connections.start()
-	t_group_queue.start()
-	t_bw_queue.start()
-	#
-	try:
-		while True:
-			l.accept()
-			'''
-			try:
-				while True:
-					conn_id = joinable_connections.get(False)
-					p = processes_map[conn_id]
-					p.join()
-				#
-			except queue.Empty:
-				pass
-			#
-			'''
-		#
-	except KeyboardInterrupt:
-		print()
-		#
-		try:
-			for p in processes:
-				p.join()
-			#
-		except KeyboardInterrupt:
-			pass
-		#
-		joinable_connections.put(None)
-		group_queue.put(None)
-		bw_queue.put(None)
-		t_joinable_connections.join()
-		t_group_queue.join()
-		t_bw_queue.join()
-		#
-		bw_values = {}
-		group_values = {}
-		#
-		'''
-		logging.info('BW queue length: {}'.format(bw_queue.qsize()))
-		logging.info('Group queue length: {}'.format(group_queue.qsize()))
-		#
-		temp_counter = 0
-		try:
-			while True:
-				bw_val = bw_queue.get(False)
-				bw_values[bw_val['conn_id']] = bw_val
-				temp_counter += 1
-			#
-		except queue.Empty:
-			pass
-		#
-		logging.info('temp counter: {}'.format(temp_counter))
-		import time
-		time.sleep(2)
-		try:
-			while True:
-				bw_val = bw_queue.get(False)
-				bw_values[bw_val['conn_id']] = bw_val
-				temp_counter += 1
-			#
-		except queue.Empty:
-			pass
-		#
-		logging.info('temp counter: {}'.format(temp_counter))
-		
-		#
-		try:
-			while True:
-				group_val = group_queue.get(False)
-				group_values[group_val['conn_id']] = group_val
-			#
-		except queue.Empty:
-			pass
-		#
-		logging.info('bw_values length: {}'.format(len(bw_values)))
-		logging.info('group_values length: {}'.format(len(group_values)))
-		logging.info('group_values set: {}'.format(list(set([x['group_id'] for x in group_values.values()]))))
-		#
-		'''
-		#
-		#logging.info('BW list length: {}'.format(len(bw_queue_list)))
-		#logging.info('Group list length: {}'.format(len(group_queue_list)))
-		#
-		for x in bw_queue_list:
-			bw_values[x['conn_id']] = x
-		#
-		for x in group_queue_list:
-			group_values[x['conn_id']] = x
-		#
-		group_set = set([x['group_id'] for x in group_values.values()])
-		for group in group_set:
-			# doesn't handle group == None
-			conns_in_group = [x[0] for x in group_values.items() if x[1]['group_id'] == group]
-			in_group = [x for x in bw_values.values() if x['conn_id'] in conns_in_group]
-			if len(in_group) > 0:
-				avg_data_size = sum([x['data_size'] for x in in_group])/len(in_group)
-				avg_transfer_rate = sum([x['transfer_rate'] for x in in_group])/len(in_group)
-				total_transfer_rate = sum([x['data_size'] for x in in_group])/(max([x['time_of_last_byte'] for x in in_group])-min([x['time_of_first_byte'] for x in in_group]))
-				#
-				logging.info('Group size: %d', len(in_group))
-				logging.info('Avg Transferred (MiB): %.4f', avg_data_size/(1024**2))
-				logging.info('Avg Transfer rate (MiB/s): %.4f', avg_transfer_rate/(1024**2))
-				logging.info('Total Transfer rate (MiB/s): %.4f', total_transfer_rate/(1024**2))
-				#
-				'''
-				import math
-				histories = [{'start_time':x['byte_counter_start_time'], 'history':x['byte_counter']} for x in in_group]
-				total_history = overlap_byte_counters(histories)
-				#
-				logging.info('Max Transfer rate (MiB/s): %.4f', max(total_history)/(1024**2))
-				if sum(total_history) != sum([x['data_size'] for x in in_group]):
-					logging.warning('History doesn\'t add up ({} != {}).'.format(sum(total_history), sum([x['data_size'] for x in in_group])))
-				#
-				import json
-				with open('/tmp/group-{}.json'.format(group), 'w') as f:
-					json.dump({'id':group, 'history':total_history, 'individual_histories':histories, 'size':len(in_group), 'avg_transferred':avg_data_size,
-					           'avg_transfer_rate':avg_transfer_rate, 'total_transfer_rate':total_transfer_rate}, f)
-				#
-				'''
-				custom_data = [x['custom_data'].decode('utf-8') for x in in_group]
-				#
-				histories = [x['deltas'] for x in in_group]
-				combined_timestamps, combined_bytes = zip(*sorted(zip([x for y in histories for x in y['timestamps']],
-				                                                      [x for y in histories for x in y['bytes']])))
-				combined_history = {'bytes':combined_bytes, 'timestamps':combined_timestamps}
-				#combined_history = sorted([item for sublist in histories for item in sublist['deltas']], key=lambda x: x['timestamp'])
-				#
-				sum_history_bytes = sum(combined_history['bytes'])
-				sum_data_bytes = sum([x['data_size'] for x in in_group])
-				if sum_history_bytes != sum_data_bytes:
-					logging.warning('History doesn\'t add up ({} != {}).'.format(sum_history_bytes, sum_data_bytes))
-				#
-				import json
-				import gzip
-				with gzip.GzipFile('/tmp/group-{}.json.gz'.format(group), 'w') as f:
-					f.write(json.dumps({'id':group, 'history':combined_history, 'individual_histories':histories, 'size':len(in_group),
-					                    'avg_transferred':avg_data_size, 'avg_transfer_rate':avg_transfer_rate,
-					                    'total_transfer_rate':total_transfer_rate, 'custom_data':custom_data}, f).encode('utf-8'))
-				#
-			#
-		#
-	#
-	for p in processes:
-		p.join()
-	#
-#

+ 0 - 258
tmp.patch

@@ -1,258 +0,0 @@
-diff --git a/src/basic_protocols.py b/src/basic_protocols.py
-index ba8b847..0dcecc8 100755
---- a/src/basic_protocols.py
-+++ b/src/basic_protocols.py
-@@ -123,26 +123,28 @@ class Socks4Protocol(Protocol):
- 	#
- #
- class PushDataProtocol(Protocol):
--	def __init__(self, socket, total_bytes, data_generator=None, send_max_bytes=1024*512, use_accelerated=True):
--		if data_generator is None:
--			data_generator = self._default_data_generator
-+	def __init__(self, socket, total_bytes, send_buffer_len=None, use_acceleration=None):
-+		if send_buffer_len is None:
-+			send_buffer_len = 1024*512
-+		#
-+		if use_acceleration is None:
-+			use_acceleration = True
- 		#
- 		self.socket = socket
--		self.data_generator = data_generator
- 		self.total_bytes = total_bytes
--		self.send_max_bytes = send_max_bytes
--		self.use_accelerated = use_accelerated
-+		self.use_acceleration = use_acceleration
- 		#
- 		self.states = enum.Enum('PUSH_DATA_STATES', 'READY_TO_BEGIN SEND_INFO PUSH_DATA RECV_CONFIRMATION DONE')
- 		self.state = self.states.READY_TO_BEGIN
- 		#
-+		self.byte_buffer = os.urandom(send_buffer_len)
- 		self.bytes_written = 0
- 		self.protocol_helper = None
- 	#
- 	def _run_iteration(self, block=True):
- 		if self.state is self.states.READY_TO_BEGIN:
- 			info = self.total_bytes.to_bytes(8, byteorder='big', signed=False)
--			info += self.send_max_bytes.to_bytes(8, byteorder='big', signed=False)
-+			info += len(self.byte_buffer).to_bytes(8, byteorder='big', signed=False)
- 			self.protocol_helper = ProtocolHelper()
- 			self.protocol_helper.set_buffer(info)
- 			self.state = self.states.SEND_INFO
-@@ -153,24 +155,28 @@ class PushDataProtocol(Protocol):
- 			#
- 		#
- 		if self.state is self.states.PUSH_DATA:
--			max_block_size = self.send_max_bytes
--			block_size = min(max_block_size, self.total_bytes-self.bytes_written)
--			data = self.data_generator(self.bytes_written, block_size)
--			#
--			if self.use_accelerated:
-+			if self.use_acceleration:
- 				if not block:
- 					logging.warning('Protocol set to non-blocking, but using the blocking accelerated function.')
- 				#
--				ret_val = accelerated_functions.push_data(self.socket.fileno(), self.total_bytes, data)
-+				ret_val = accelerated_functions.push_data(self.socket.fileno(), self.total_bytes, self.byte_buffer)
- 				if ret_val < 0:
- 					raise ProtocolException('Error while pushing data.')
- 				#
- 				self.bytes_written = self.total_bytes
- 			else:
-+				bytes_remaining = self.total_bytes-self.bytes_written
-+				data_size = min(len(self.byte_buffer), bytes_remaining)
-+				if data_size != len(self.byte_buffer):
-+					data = self.byte_buffer[:data_size]
-+				else:
-+					data = self.byte_buffer
-+					# don't make a copy of the byte string each time if we don't need to
-+				#
- 				n = self.socket.send(data)
- 				self.bytes_written += n
- 			#
--			if self.bytes_written >= self.total_bytes:
-+			if self.bytes_written == self.total_bytes:
- 				# finished sending the data
- 				logging.debug('Finished sending the data (%d bytes).', self.bytes_written)
- 				self.protocol_helper = ProtocolHelper()
-@@ -190,20 +196,20 @@ class PushDataProtocol(Protocol):
- 		#
- 		return False
- 	#
--	def _default_data_generator(self, index, bytes_needed):
--		return os.urandom(bytes_needed)
--	#
- #
- class PullDataProtocol(Protocol):
--	def __init__(self, socket, use_accelerated=True):
-+	def __init__(self, socket, use_acceleration=None):
-+		if use_acceleration is None:
-+			use_acceleration = True
-+		#
- 		self.socket = socket
--		self.use_accelerated = use_accelerated
-+		self.use_acceleration = use_acceleration
- 		#
- 		self.states = enum.Enum('PULL_DATA_STATES', 'READY_TO_BEGIN RECV_INFO PULL_DATA SEND_CONFIRMATION DONE')
- 		self.state = self.states.READY_TO_BEGIN
- 		#
- 		self.data_size = None
--		self.recv_max_bytes = None
-+		self.recv_buffer_len = None
- 		self.bytes_read = 0
- 		self.protocol_helper = None
- 		self._time_of_first_byte = None
-@@ -219,27 +225,28 @@ class PullDataProtocol(Protocol):
- 			if self.protocol_helper.recv(self.socket, info_size):
- 				response = self.protocol_helper.get_buffer()
- 				self.data_size = int.from_bytes(response[0:8], byteorder='big', signed=False)
--				self.recv_max_bytes = int.from_bytes(response[8:16], byteorder='big', signed=False)
-+				self.recv_buffer_len = int.from_bytes(response[8:16], byteorder='big', signed=False)
- 				self.state = self.states.PULL_DATA
- 			#
- 		#
- 		if self.state is self.states.PULL_DATA:
--			max_block_size = self.recv_max_bytes
--			block_size = min(max_block_size, self.data_size-self.bytes_read)
--			#
--			if self.use_accelerated:
-+			if self.use_acceleration:
- 				if not block:
- 					logging.warning('Protocol set to non-blocking, but using the blocking accelerated function.')
- 				#
--				(ret_val, elapsed_time) = accelerated_functions.pull_data(self.socket.fileno(), self.data_size, block_size)
-+				(ret_val, elapsed_time) = accelerated_functions.pull_data(self.socket.fileno(), self.data_size, self.recv_buffer_len)
- 				if ret_val < 0:
- 					raise ProtocolException('Error while pulling data.')
- 				#
- 				self.bytes_read = self.data_size
- 				self.elapsed_time = elapsed_time
- 			else:
-+				bytes_remaining = self.data_size-self.bytes_read
-+				block_size = min(self.recv_buffer_len, bytes_remaining)
-+				#
- 				data = self.socket.recv(block_size)
- 				self.bytes_read += len(data)
-+				#
- 				if self.bytes_read != 0 and self._time_of_first_byte is None:
- 					self._time_of_first_byte = time.time()
- 				#
-diff --git a/src/throughput_client.py b/src/throughput_client.py
-index d45dffe..0be8289 100644
---- a/src/throughput_client.py
-+++ b/src/throughput_client.py
-@@ -17,6 +17,8 @@ if __name__ == '__main__':
- 	parser.add_argument('--proxy', type=str, help='proxy ip address and port', metavar=('ip','port'), nargs=2)
- 	parser.add_argument('--wait', type=int,
- 	                    help='wait until the given time before pushing data (time in seconds since epoch)', metavar='time')
-+	parser.add_argument('--buffer-len', type=useful.parse_bytes, help='size of the send and receive buffers (can also end with \'B\', \'KiB\', \'MiB\', or \'GiB\')', metavar='bytes')
-+	parser.add_argument('--no-accel', action='store_true', help='don\'t use C acceleration (use pure Python)')
- 	args = parser.parse_args()
- 	#
- 	endpoint = (args.ip, args.port)
-@@ -27,7 +29,20 @@ if __name__ == '__main__':
- 	#
- 	username = bytes([x for x in os.urandom(12) if x != 0])
- 	#username = None
-+	'''
-+	data_MB = 200 #20000
-+	data_B = data_MB*2**20
- 	#
--	client = throughput_protocols.ClientProtocol(endpoint, args.num_bytes, proxy=proxy, username=username, wait_until=args.wait)
-+	if len(sys.argv) > 2:
-+		wait_until = int(sys.argv[2])
-+	else:
-+		wait_until = None
-+	#
-+	'''
-+	#
-+	client = throughput_protocols.ClientProtocol(endpoint, args.num_bytes, proxy=proxy,
-+	                                             username=username, wait_until=args.wait,
-+	                                             send_buffer_len=args.buffer_len,
-+	                                             use_acceleration=(not args.no_accel))
- 	client.run()
- #
-diff --git a/src/throughput_protocols.py b/src/throughput_protocols.py
-index 5dec4b6..3eb3d60 100755
---- a/src/throughput_protocols.py
-+++ b/src/throughput_protocols.py
-@@ -7,13 +7,14 @@ import time
- import socket
- #
- class ClientProtocol(basic_protocols.Protocol):
--	def __init__(self, endpoint, total_bytes, data_generator=None, proxy=None, username=None, wait_until=None):
-+	def __init__(self, endpoint, total_bytes, proxy=None, username=None, wait_until=None, send_buffer_len=None, use_acceleration=None):
- 		self.endpoint = endpoint
--		self.data_generator = data_generator
- 		self.total_bytes = total_bytes
- 		self.proxy = proxy
- 		self.username = username
- 		self.wait_until = wait_until
-+		self.send_buffer_len = send_buffer_len
-+		self.use_acceleration = use_acceleration
- 		#
- 		self.states = enum.Enum('CLIENT_CONN_STATES', 'READY_TO_BEGIN CONNECT_TO_PROXY SEND_GROUP_ID PUSH_DATA DONE')
- 		self.state = self.states.READY_TO_BEGIN
-@@ -47,7 +48,6 @@ class ClientProtocol(basic_protocols.Protocol):
- 				group_id_bytes = self.group_id.to_bytes(8, byteorder='big', signed=False)
- 				self.sub_protocol = basic_protocols.SendDataProtocol(self.socket, group_id_bytes)
- 				self.state = self.states.SEND_GROUP_ID
--				#logging.debug('Sent group ID.')
- 			#
- 		#
- 		if self.state is self.states.SEND_GROUP_ID:
-@@ -56,8 +56,8 @@ class ClientProtocol(basic_protocols.Protocol):
- 			#
- 			if (self.wait_until is None or time.time() >= self.wait_until) and self.sub_protocol.run(block=block):
- 				self.sub_protocol = basic_protocols.PushDataProtocol(self.socket, self.total_bytes,
--				                                                     data_generator=self.data_generator,
--				                                                     send_max_bytes=1024*512)
-+				                                                     send_buffer_len=self.send_buffer_len,
-+				                                                     use_acceleration=self.use_acceleration)
- 				self.state = self.states.PUSH_DATA
- 			#
- 		#
-@@ -71,11 +71,12 @@ class ClientProtocol(basic_protocols.Protocol):
- 	#
- #
- class ServerProtocol(basic_protocols.Protocol):
--	def __init__(self, socket, conn_id, group_id_callback=None, bandwidth_callback=None):
-+	def __init__(self, socket, conn_id, group_id_callback=None, bandwidth_callback=None, use_acceleration=None):
- 		self.socket = socket
- 		self.conn_id = conn_id
- 		self.group_id_callback = group_id_callback
- 		self.bandwidth_callback = bandwidth_callback
-+		self.use_acceleration = use_acceleration
- 		#
- 		self.states = enum.Enum('SERVER_CONN_STATES', 'READY_TO_BEGIN RECV_GROUP_ID PULL_DATA DONE')
- 		self.state = self.states.READY_TO_BEGIN
-@@ -95,7 +96,7 @@ class ServerProtocol(basic_protocols.Protocol):
- 					group_id = None
- 				#
- 				self.group_id_callback(self.conn_id, group_id)
--				self.sub_protocol = basic_protocols.PullDataProtocol(self.socket)
-+				self.sub_protocol = basic_protocols.PullDataProtocol(self.socket, use_acceleration=self.use_acceleration)
- 				self.state = self.states.PULL_DATA
- 			#
- 		#
-diff --git a/src/throughput_server.py b/src/throughput_server.py
-index a22ed8f..0217d14 100644
---- a/src/throughput_server.py
-+++ b/src/throughput_server.py
-@@ -13,6 +13,7 @@ if __name__ == '__main__':
- 	#
- 	parser = argparse.ArgumentParser(description='Test the network throughput (optionally through a proxy).')
- 	parser.add_argument('port', type=int, help='listen on port')
-+	parser.add_argument('--no-accel', action='store_true', help='don\'t use C acceleration (use pure Python)')
- 	args = parser.parse_args()
- 	#
- 	endpoint = ('127.0.0.1', args.port)
-@@ -34,7 +35,8 @@ if __name__ == '__main__':
- 		bw_queue.put({'conn_id':conn_id, 'data_size':data_size, 'transfer_rate':transfer_rate})
- 	#
- 	def start_server_conn(socket, conn_id):
--		server = throughput_protocols.ServerProtocol(socket, conn_id, group_id_callback=group_id_callback, bandwidth_callback=bw_callback)
-+		server = throughput_protocols.ServerProtocol(socket, conn_id, group_id_callback=group_id_callback,
-+		                                             bandwidth_callback=bw_callback, use_acceleration=(not args.no_accel))
- 		try:
- 			server.run()
- 		except KeyboardInterrupt: