Browse Source

Add bridge clients and hidden services to chutney verify

Chutney verify now verifies bridge clients and hidden services.

Bridge clients are verified just like clients, by using an exit to
connect to the TrafficTester's listening port.

Hidden services are verified by using an arbitrary client
(the first one in the list) to connect to the hidden service.
The hidden service is configured to redirect data to the
TrafficTester's listening host:port.

This works almost exactly like the standard client / bridge client tests.
The only difference is that the client connects via a hidden service port
pre-configured with a specific redirect, rather than via an exit which
could handle arbitrary hosts and ports.

Also review the number of relays/exits/authorities required for the 25%
guard changes. For 2 guards, 8 relays are required in total,
including authorities and exits.
teor 9 years ago
parent
commit
0d350720ff

+ 68 - 9
lib/chutney/TorNet.py

@@ -312,8 +312,8 @@ class LocalNodeBuilder(NodeBuilder):
         self._createTorrcFile(checkOnly=True)
 
     def preConfig(self, net):
-        """Called on all nodes before any nodes configure: generates keys as
-           needed.
+        """Called on all nodes before any nodes configure: generates keys and
+           hidden service directories as needed.
         """
         self._makeDataDir()
         if self._env['authority']:
@@ -666,6 +666,7 @@ DEFAULTS = {
     'bridge': False,
     'hs': False,
     'hs_directory': 'hidden_service',
+    'hs-hostname': None,
     'connlimit': 60,
     'net_base_dir': 'net',
     'tor': os.environ.get('CHUTNEY_TOR', 'tor'),
@@ -705,13 +706,19 @@ class TorEnviron(chutney.Templating.Environ):
           tor_gencert:
           auth_passphrase:
           torrc_template_path:
+          hs_hostname:
 
        Environment fields used:
           nodenum
           tag
           orport_base, controlport_base, socksport_base, dirport_base
+          tor-gencert (note hyphen)
           chutney_dir
           tor
+          dir
+          hs_directory
+          nick (debugging only)
+          hs-hostname (note hyphen)
 
        XXXX document the above.  Or document all fields in one place?
     """
@@ -752,6 +759,29 @@ class TorEnviron(chutney.Templating.Environ):
     def _get_lockfile(self, my):
         return os.path.join(self['dir'], 'lock')
 
+    # A hs generates its key on first run,
+    # so check for it at the last possible moment,
+    # but cache it in memory to avoid repeatedly reading the file
+    # XXXX - this is not like the other functions in this class,
+    # as it reads from a file created by the hidden service
+    def _get_hs_hostname(self, my):
+        if my['hs-hostname'] is None:
+            datadir = my['dir']
+            # a file containing a single line with the hs' .onion address
+            hs_hostname_file = os.path.join(datadir,
+                                            my['hs_directory'],
+                                            'hostname')
+            try:
+                with open(hs_hostname_file, 'r') as hostnamefp:
+                    hostname = hostnamefp.read()
+                # the hostname file ends with a newline
+                hostname = hostname.strip()
+                my['hs-hostname'] = hostname
+            except IOError as e:
+                print("Error: hs %r error %d: %r opening hostname file '%r'"
+                      %(my['nick'], e.errno, e.strerror, hs_hostname_file))
+        return my['hs-hostname']
+
 
 class Network(object):
 
@@ -866,28 +896,57 @@ class Network(object):
                 c.check(listNonRunning=False)
 
     def verify(self):
-        sys.stdout.write("Verifying data transmission: ")
-        sys.stdout.flush()
+        print("Verifying data transmission:")
         status = self._verify_traffic()
-        print("Success" if status else "Failure")
+        print("Transmission: %s" % ("Success" if status else "Failure"))
         return status
 
     def _verify_traffic(self):
         """Verify (parts of) the network by sending traffic through it
         and verify what is received."""
-        LISTEN_PORT = 4747  # FIXME: Do better! Note the default exit policy.
+        LISTEN_PORT = 4747 # FIXME: Do better! Note the default exit policy.
+        # HSs must have a HiddenServiceDir with
+        # "HiddenServicePort <HS_PORT> 127.0.0.1:<LISTEN_PORT>"
+        HS_PORT = 5858
         DATALEN = 10 * 1024               # Octets.
         TIMEOUT = 3                     # Seconds.
         with open('/dev/urandom', 'r') as randfp:
             tmpdata = randfp.read(DATALEN)
         bind_to = ('127.0.0.1', LISTEN_PORT)
         tt = chutney.Traffic.TrafficTester(bind_to, tmpdata, TIMEOUT)
-        for op in filter(lambda n:
-                         n._env['tag'] == 'c' or n._env['tag'] == 'bc',
-                         self._nodes):
+        client_list = filter(lambda n:
+                             n._env['tag'] == 'c' or n._env['tag'] == 'bc',
+                             self._nodes)
+        if len(client_list) == 0:
+            print("  Unable to verify network: no client nodes available")
+            return False
+        # Each client binds directly to 127.0.0.1:LISTEN_PORT via an Exit relay
+        for op in client_list:
+            print("  Exit to %s:%d via client %s:%s"
+                   % ('127.0.0.1', LISTEN_PORT,
+                      'localhost', op._env['socksport']))
             tt.add(chutney.Traffic.Source(tt, bind_to, tmpdata,
                                           ('localhost',
                                            int(op._env['socksport']))))
+        # The HS redirects .onion connections made to hs_hostname:HS_PORT
+        # to the Traffic Tester's 127.0.0.1:LISTEN_PORT
+        # We must have at least one working client for the hs test to succeed
+        for hs in filter(lambda n:
+                         n._env['tag'] == 'h',
+                         self._nodes):
+            # Instead of binding directly to LISTEN_PORT via an Exit relay,
+            # we bind to hs_hostname:HS_PORT via a hidden service connection
+            # through the first available client
+            bind_to = (hs._env['hs_hostname'], HS_PORT)
+            # Just choose the first client
+            client = client_list[0]
+            print("  HS to %s:%d (%s:%d) via client %s:%s"
+                  % (hs._env['hs_hostname'], HS_PORT,
+                     '127.0.0.1', LISTEN_PORT,
+                     'localhost', client._env['socksport']))
+            tt.add(chutney.Traffic.Source(tt, bind_to, tmpdata,
+                                          ('localhost',
+                                           int(client._env['socksport']))))
         return tt.run()
 
 

+ 1 - 0
lib/chutney/Traffic.py

@@ -56,6 +56,7 @@ def socks_cmd(addr_port):
     except socket.error:
         addr = '\x00\x00\x00\x01'
         dnsname = '%s\x00' % host
+    debug("Socks 4a request to %s:%d" % (host, port))
     return struct.pack('!BBH', ver, cmd, port) + addr + user + dnsname
 
 

+ 2 - 1
networks/basic

@@ -2,6 +2,7 @@ Authority = Node(tag="a", authority=1, relay=1, torrc="authority.tmpl")
 Relay = Node(tag="r", relay=1, torrc="relay.tmpl")
 Client = Node(tag="c", torrc="client.tmpl")
 
-NODES = Authority.getN(3) + Relay.getN(8) + Client.getN(2)
+# We need 8 authorities/relays/exits to ensure at least 2 get the guard flag in 0.2.6
+NODES = Authority.getN(3) + Relay.getN(5) + Client.getN(2)
 
 ConfigureNodes(NODES)

+ 3 - 0
networks/basic-min

@@ -2,6 +2,9 @@ Authority = Node(tag="a", authority=1, relay=1, torrc="authority.tmpl")
 Relay = Node(tag="r", relay=1, torrc="relay.tmpl")
 Client = Node(tag="c", torrc="client.tmpl")
 
+# We need 8 authorities/relays/exits to ensure at least 2 get the guard flag in 0.2.6
+# Since basic-min only has 4, at least 1 of these should still get the flag,
+# Otherwise, TestingDirAuthVoteGuard * may need to be used
 NODES = Authority.getN(3) + Relay.getN(1) + Client.getN(1)
 
 ConfigureNodes(NODES)

+ 3 - 2
networks/bridges

@@ -7,7 +7,8 @@ BridgeAuthority = Node(tag="ba", authority=1, bridgeauthority=1,
 Bridge = Node(tag="br", bridge=1, relay=1, torrc="bridge.tmpl")
 BridgeClient = Node(tag="bc", torrc="bridgeclient.tmpl")
 
-NODES = Authority.getN(3) + BridgeAuthority.getN(1) + Relay.getN(8) + \
-    Bridge.getN(2) + Client.getN(2) + BridgeClient.getN(1)
+# We need 8 authorities/relays/exits to ensure at least 2 get the guard flag in 0.2.6
+NODES = Authority.getN(3) + BridgeAuthority.getN(1) + Relay.getN(4) + \
+    Bridge.getN(1) + Client.getN(1) + BridgeClient.getN(1)
 
 ConfigureNodes(NODES)

+ 8 - 3
networks/bridges+ipv6

@@ -8,8 +8,13 @@ Bridge = Node(tag="br", bridge=1, relay=1, torrc="bridge.tmpl")
 BridgeIPv6 = Node(tag="br", bridge=1, relay=1, ipv6_addr="[::1]", torrc="bridge-v6.tmpl")
 BridgeClient = Node(tag="bc", torrc="bridgeclient.tmpl")
 
-NODES = Authority.getN(3) + BridgeAuthority.getN(1) + Relay.getN(8) + \
-    Bridge.getN(1) + BridgeIPv6.getN(1) + \
-    Client.getN(2) + BridgeClient.getN(1)
+# We need 8 authorities/relays/exits to ensure at least 2 get the guard flag in 0.2.6
+# We put the IPv6 bridge first to ensure that clients try IPv6 before IPv4
+# Unfortunately, this does not prevent clients falling back to IPv4
+# TODO: create test for IPv6 only
+NODES = Authority.getN(3) + BridgeAuthority.getN(1) + \
+    Relay.getN(4) + \
+    BridgeIPv6.getN(1) + Bridge.getN(1) + \
+    Client.getN(1) + BridgeClient.getN(1)
 
 ConfigureNodes(NODES)

+ 23 - 0
networks/bridges+ipv6+hs

@@ -0,0 +1,23 @@
+Authority = Node(tag="a", authority=1, relay=1, torrc="authority.tmpl")
+Relay = Node(tag="r", relay=1, torrc="relay.tmpl")
+Client = Node(tag="c", torrc="client.tmpl")
+
+BridgeAuthority = Node(tag="ba", authority=1, bridgeauthority=1,
+                       relay=1, torrc="bridgeauthority.tmpl")
+Bridge = Node(tag="br", bridge=1, relay=1, torrc="bridge.tmpl")
+BridgeIPv6 = Node(tag="br", bridge=1, relay=1, ipv6_addr="[::1]", torrc="bridge-v6.tmpl")
+BridgeClient = Node(tag="bc", torrc="bridgeclient.tmpl")
+HS = Node(tag="h", hs=1, torrc="hs.tmpl")
+
+# We need 8 authorities/relays/exits to ensure at least 2 get the guard flag in 0.2.6
+# We need 6 (or 7?) authorities/relays/exits to ensure we can build HS connections
+# We put the IPv6 bridge first to ensure that clients try IPv6 before IPv4
+# Unfortunately, this does not prevent clients falling back to IPv4
+# TODO: create test for IPv6 only
+NODES = Authority.getN(3) + BridgeAuthority.getN(1) + \
+    Relay.getN(4) + \
+    BridgeIPv6.getN(1) + Bridge.getN(1) + \
+    Client.getN(1) + BridgeClient.getN(1) + \
+    HS.getN(1)
+
+ConfigureNodes(NODES)

+ 3 - 2
networks/hs

@@ -1,9 +1,10 @@
 Authority = Node(tag="a", authority=1, relay=1, torrc="authority.tmpl")
-Middle = Node(tag="m", relay=1, torrc="relay-non-exit.tmpl")
 Relay = Node(tag="r", relay=1, torrc="relay.tmpl")
 Client = Node(tag="c", torrc="client.tmpl")
 HS = Node(tag="h", hs=1, torrc="hs.tmpl")
 
-NODES = Authority.getN(4) + Middle.getN(10) + Relay.getN(6) + Client.getN(5) + HS.getN(1)
+# We need 8 authorities/relays/exits to ensure at least 2 get the guard flag in 0.2.6
+# We need 6 (or 7?) authorities/relays/exits to ensure we can build HS connections
+NODES = Authority.getN(3) + Relay.getN(4) + Client.getN(1) + HS.getN(1)
 
 ConfigureNodes(NODES)

+ 2 - 1
networks/middle

@@ -3,6 +3,7 @@ Middle = Node(tag="m", relay=1, torrc="relay-non-exit.tmpl")
 Relay = Node(tag="r", relay=1, torrc="relay.tmpl")
 Client = Node(tag="c", torrc="client.tmpl")
 
-NODES = Authority.getN(3) + Middle.getN(4) + Relay.getN(4) + Client.getN(2)
+# We need 8 authorities/relays/exits to ensure at least 2 get the guard flag in 0.2.6
+NODES = Authority.getN(3) + Middle.getN(2) + Relay.getN(3) + Client.getN(2)
 
 ConfigureNodes(NODES)

+ 3 - 2
networks/mixed

@@ -8,7 +8,8 @@ OldRelay = Node(tag="rOLD", relay=1, torrc="relay.tmpl", tor=OLD_TOR)
 Client = Node(tag="c", torrc="client.tmpl")
 OldClient = Node(tag="cOLD", torrc="client.tmpl", tor=OLD_TOR)
 
-NODES = (Authority.getN(2) + OldAuthority.getN(2) + OldRelay.getN(4) +
-      Relay.getN(4) + Client.getN(2) + OldClient.getN(2))
+# We need 8 authorities/relays/exits to ensure at least 2 get the guard flag in 0.2.6
+NODES = (Authority.getN(2) + OldAuthority.getN(2) + OldRelay.getN(2) +
+      Relay.getN(2) + Client.getN(2) + OldClient.getN(2))
 
 ConfigureNodes(NODES)

+ 8 - 5
torrc_templates/authority.tmpl

@@ -23,13 +23,14 @@ V3AuthDistDelay 2
 # This is autoconfigured by chutney, so you probably don't want to use it
 #TestingV3AuthVotingStartOffset 0
 
-# Work around situations where the Exit and Guard flags aren't being set
-# These flags are set eventually, but it takes ~30 minutes
+# Work around situations where the Exit, Guard and HSDir flags aren't being set
+# These flags are all set eventually, but it takes Guard up to ~30 minutes
 # We could be more precise here, but it's easiest just to vote everything
 # Clients are sensible enough to filter out Exits without any exit ports,
-# and Guards without ORPorts
-# If your tor doesn't recognise TestingDirAuthVoteExit, update your chutney
-# to a version that includes the issue-13161-check-torrc-options features
+# and Guards and HSDirs without ORPorts
+# If your tor doesn't recognise TestingDirAuthVoteExit/HSDir,
+# either update your chutney to a 2015 version,
+# or update your tor to a later version, most likely 0.2.6.2-final
 
 # These are all set in common.i in the Comprehensive/Rapid sections
 # Work around Exit requirements
@@ -38,3 +39,5 @@ V3AuthDistDelay 2
 #TestingMinExitFlagThreshold 0
 # Work around Guard uptime requirements
 #TestingDirAuthVoteGuard *
+# Work around HSDir uptime and ORPort connectivity requirements
+#TestingDirAuthVoteHSDir *

+ 6 - 1
torrc_templates/common.i

@@ -1,11 +1,13 @@
 TestingTorNetwork 1
 
 ## Comprehensive Bootstrap Testing Options ##
-# These typically launch a working minimal Tor network in 25s-30s
+# These typically launch a working minimal Tor network in 25s-30s,
+# and a working HS Tor network in 40-45s.
 # See authority.tmpl for a partial explanation
 #AssumeReachable 0
 #Default PathsNeededToBuildCircuits 0.6
 #Disable TestingDirAuthVoteExit
+#Disable TestingDirAuthVoteHSDir
 #Default V3AuthNIntervalsValid 3
 
 ## Rapid Bootstrap Testing Options ##
@@ -16,6 +18,7 @@ TestingTorNetwork 1
 AssumeReachable 1
 PathsNeededToBuildCircuits 0.25
 TestingDirAuthVoteExit *
+TestingDirAuthVoteHSDir *
 V3AuthNIntervalsValid 2
 
 ## Always On Testing Options ##
@@ -23,6 +26,8 @@ V3AuthNIntervalsValid 2
 TestingDirAuthVoteGuard *
 # We set TestingMinExitFlagThreshold to 0 to avoid Exit bandwidth requirements
 TestingMinExitFlagThreshold 0
+# VoteOnHidServDirectoriesV2 needs to be set for HSDirs to get the HSDir flag
+#Default VoteOnHidServDirectoriesV2 1
 
 DataDirectory $dir
 RunAsDaemon 1

+ 4 - 1
torrc_templates/hs.tmpl

@@ -5,4 +5,7 @@ Address $ip
 HiddenServiceDir ${dir}/hidden_service
 # SSH is usually a popular service that is running. This is really just to make
 # a quick way to test the HS with torsocks and have an app at the other end.
-HiddenServicePort 6000 127.0.0.1:22
+#HiddenServicePort 6000 127.0.0.1:22
+
+# Redirect requests to the port used by chutney verify
+HiddenServicePort 5858 127.0.0.1:4747