Browse Source

initial commit

Justin Tracey 2 months ago
commit
6f9b7cd0ee
8 changed files with 1393 additions and 0 deletions
  1. 2 0
      README.txt
  2. 27 0
      gen-onions.sh
  3. 347 0
      mnettools.py
  4. 324 0
      mnettortools.py
  5. 47 0
      patch-atlas.py
  6. 597 0
      plot_mgen.py
  7. 21 0
      server.crt
  8. 28 0
      server.key

+ 2 - 0
README.txt

@@ -0,0 +1,2 @@
+dependencies:
+pyyaml (https://github.com/yaml/pyyaml, python3-yaml in apt)

+ 27 - 0
gen-onions.sh

@@ -0,0 +1,27 @@
+#!/bin/bash
+
+hosts_dir="$1"
+for i in {0..10}; do
+    for ClientDir in "$hosts_dir"/*client*/; do
+        if [ -f $ClientDir/$i.torrc ] ; then
+            DataDirectory=$ClientDir/tor-$i
+            chmod 700 $ClientDir/*.tor
+            chmod 700 $DataDirectory
+            tor --hush -f $ClientDir/$i.torrc --ControlPort 0 --DisableNetwork 1 --DataDirectory $DataDirectory &
+        fi
+    done
+    echo "terminating set $i"
+    pkill -P $$
+    wait
+done
+
+echo "replacing hosts file"
+for ClientDir in "$hosts_dir"/*client*/; do
+    for UserDir in $ClientDir/user*.tor; do
+        if [ -d "$UserDir" ] ; then
+            user=$(basename "$UserDir" .tor)
+            onion=$(cat "$UserDir"/hostname)
+            sed -i "s/^$user:/$onion:/g" "$hosts_dir/hosts"
+        fi
+    done
+done

+ 347 - 0
mnettools.py

@@ -0,0 +1,347 @@
+#!/usr/bin/env python3
+
+import argparse
+import glob
+import os
+import random
+import shutil
+
+from yaml import load, dump
+try:
+    from yaml import CLoader as Loader, CDumper as Dumper
+except ImportError:
+    from yaml import Loader, Dumper
+
+SECONDS_IN_HOUR = 60.0 * 60.0
+
+class Conversation:
+    def __init__(self, size, users, waits):
+        self.size = size
+        self.users = users
+        self.waits = waits
+
+    def merge(self, other):
+        self.users.extend(other.users)
+        self.waits.extend(other.waits)
+        return
+
+    def merge_slice(conversations):
+        first = conversations.pop()
+        return [first.merge(o) for o in conversations]
+
+    def add_silent_members(self, users):
+        self.users.extend(users)
+        self.waits.extend([SECONDS_IN_HOUR] * len(users))
+
+class User:
+    def __init__(self, name, dists_path, client, tor_process):
+        self.name = name
+        self.dists_path = dists_path
+        self.client = client
+        self.tor_process = tor_process
+        self.conversations = []
+
+    def socks_port(self):
+        # default tor socks port is 9050, default tor control port is 9051
+        # each additional process needs both of those, so socks port goes up by 2
+        return 9050 + self.tor_process * 2
+
+    def control_port(self):
+        return self.socks_port() + 1
+
+    def save(self, config):
+        assert(config['hosts'] is not None)
+        client_path = '~/.cargo/bin/mgen-client'
+        mgen_config_path = self.client + self.name + '.yaml'
+        host_name = self.client.split('/')[-2]
+        print("saving: ", self.name, flush=True)
+        host = config['hosts'][host_name]
+        process = next((p for p in host['processes'] if p['path'] == client_path), None)
+
+        tors = [p for p in host['processes'] if p['path'] == '~/.local/bin/tor']
+        torrc = '{}.torrc'.format(self.tor_process)
+        tor_datadir = "tor-{}".format(self.tor_process)
+        tor_start = tors[0]['start_time']
+        if process is None:
+            if len(tors) == 0:
+                print('Error: No tor process for client {} in shadow config.'.format(self.client))
+                exit(1)
+            proc = {
+                'path': client_path,
+                'args': 'user*.yaml',
+                'start_time': 1170, #tor_start + 60,
+                'expected_final_state': 'running'
+            }
+            host['processes'].append(proc)
+        if self.tor_process != 0 and not any('-f {}'.format(torrc) in tor['args'] for tor in tors):
+            # we haven't setup this tor client yet, handle that first
+            tor_proc = {
+                'path': tors[0]['path'],
+                'args': '--defaults-torrc torrc-defaults -f {} --DataDirectory ./{}'.format(torrc, tor_datadir),
+                'start_time': tor_start + self.tor_process,
+                'expected_final_state': 'running',
+                'environment': {'OPENBLAS_NUM_THREADS': '1'}
+            }
+            host['processes'].append(tor_proc)
+            torrc_path = self.client + torrc
+            torrc_contents = "SocksPort {}\n".format(self.socks_port())
+            torrc_contents += "ControlPort {}\n".format(self.control_port())
+            with open(torrc_path, 'w') as f:
+                f.write(torrc_contents)
+            os.mkdir(self.client + tor_datadir)
+
+        yaml_str = 'user: "{}"\n'.format(self.name)
+        # proxy starts commented out for baseline testing,
+        # a simple sed replacement can enable it
+        yaml_str += '#socks: "127.0.0.1:{}"\n'.format(self.socks_port())
+        # defaults
+        yaml_str += 'message_server: "1.1.1.2:6397"\n'
+        yaml_str += 'web_server: "1.1.1.3:6398"\n'
+        yaml_str += 'bootstrap: 5.0\n'
+        yaml_str += 'retry: 5.0\n'
+        yaml_str += 'distributions:\n'
+        with open(self.dists_path + '/S.dat') as f:
+            s = f.read().strip()
+        yaml_str += '  s: {}\n'.format(s)
+        with open(self.dists_path + '/R.dat') as f:
+            r = f.read().strip()
+        yaml_str += '  r: {}\n'.format(r)
+        weighted_format = '  {}: {{ distribution: "Weighted", weights_file: "' + self.dists_path + '/{}.dat" }}\n'
+        yaml_str += weighted_format.format('m', 'sizes')
+        yaml_str += weighted_format.format('i', 'I')
+        yaml_str += weighted_format.format('w', 'W')
+        yaml_str += weighted_format.format('a_s', 'As')
+        yaml_str += weighted_format.format('a_r', 'Ar')
+        yaml_str += 'conversations:\n'
+        for group in self.conversations:
+            yaml_str += '  - group: "{}"\n'.format(group[0].name)
+            yaml_str += '    bootstrap: {}\n'.format(group[1])
+            yaml_str += '    message_server: "{}:6397"\n'.format(group[0].server_ip)
+            yaml_str += '    web_server: "{}:6398"\n'.format(group[0].web_ip)
+        with open(mgen_config_path, 'w') as f:
+            f.write(yaml_str)
+
+def normalize_weights(weights):
+    """ Normalize weights so they sum to 1 """
+    tot = sum(weights)
+    return [w/tot for w in weights]
+
+def read_dist_file(path):
+    with open(path) as f:
+        (weights, vals) = f.readlines()
+        vals = list(map(int, vals.split(',')))
+        weights = normalize_weights(list(map(float, weights.split(','))))
+        return vals, weights
+
+def read_dist_file_float(path):
+    with open(path) as f:
+        (weights, vals) = f.readlines()
+        vals = list(map(float, vals.split(',')))
+        weights = normalize_weights(list(map(float, weights.split(','))))
+        return vals, weights
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Generate messenger clients for use with mgen and shadow.")
+    parser.add_argument('--dyadic', type=str, help='File containging the weighted distribution of the number of dyadic (1-on-1) conversations a user may have.', required=True)
+    parser.add_argument('--group', type=str, help='File containging the weighted distribution of the number of group conversations a user may have.', required=True)
+    parser.add_argument('--participants', type=str, help='File containing the weighted distribution of the number of participants in a group conversation.', required=True)
+    parser.add_argument('--config', type=str, help='The original shadow.config.yaml file; a modified copy will be placed in the same directory as mnet.shadow.config.yaml', required=True)
+    parser.add_argument('--clients', type=str, help='Glob specifying the paths to shadow host template directories where users will be assigned uniformly at random. This will also determine where the server and web server directories are created.', required=True)
+    parser.add_argument('--empirical', type=str, help='Path of directory containing the directories for each empirical user distribution data.', required=True)
+    parser.add_argument('--users', type=int, help='Number of concurrent simulated users to generate.', required=True)
+    parser.add_argument('--servers', type=int, default=1, help='Number of message and web servers to run (defaults to 1).')
+    parser.add_argument('--tors', type=int, default=0, help='Number of additional tor processes to run (if 0 or unset, clients use the original tor process, else clients only use new processes).')
+    parser.add_argument('--seed', type=int, help='RNG seed, if deterministic config generation is desired.')
+    args = parser.parse_args()
+
+    random.seed(args.seed, version=2)
+
+    print("loading config...", flush=True)
+    with open(args.config) as f:
+        config = load(f, Loader=Loader)
+    assert(config['hosts'] is not None)
+
+    print("adding servers to config...", flush=True)
+    taken_ips = {host['ip_addr'] for host in config['hosts'].values() if 'ip_addr' in host}
+    servers = []
+    webs = []
+    server_start = 300
+    server_idle = 900
+    # bump the bootstrap time to account for our bootstrap as well
+    config['general']['bootstrap_end_time'] = server_start + server_idle
+    for server_num in range(args.servers):
+        server_ip = get_free_ip(2, taken_ips)
+        servers.append(server_ip)
+        web_ip = get_free_ip(int(server_ip.split('.')[-1]) + 1, taken_ips)
+        webs.append(web_ip)
+        taken_ips |= {server_ip, web_ip}
+
+        server_name = 'server{}'.format(server_num)
+        web_name = 'web{}'.format(server_num)
+
+        config['hosts'][server_name] = {
+            'network_node_id': 2521, # FIXME: is this sufficiently stable?
+            'ip_addr': server_ip,
+            'bandwidth_down': '10000000 kilobit',
+            'bandwidth_up': '10000000 kilobit',
+            'processes': [{
+                'path': '~/.cargo/bin/mgen-server',
+                'args': ['server.crt', 'server.key', '{}:6397'.format(server_ip), str(server_idle)],
+                'start_time': str(server_start),
+                'expected_final_state': 'running'
+            }]
+        }
+        config['hosts'][web_name] = {
+            'network_node_id': 2521, # FIXME: is this sufficiently stable?
+            'ip_addr': web_ip,
+            'bandwidth_down': '10000000 kilobit',
+            'bandwidth_up': '10000000 kilobit',
+            'processes': [{
+                'path': '~/.cargo/bin/mgen-web',
+                'args': ['server.crt', 'server.key', '{}:6398'.format(web_ip)],
+                'start_time': str(server_start),
+                'expected_final_state': 'running'
+            }]
+        }
+
+    dyadic_dist_vals, dyadic_dist_weights = read_dist_file(args.dyadic)
+    group_dist_vals, group_dist_weights = read_dist_file(args.group)
+    participants_dist_vals, participants_dist_weights = read_dist_file(args.participants)
+
+    client_paths = glob.glob(args.clients)
+    empirical_users = [args.empirical + '/' + f for f in os.listdir(args.empirical)]
+
+    print("caching idle distributions...", flush=True)
+    idles = { path: read_dist_file_float(path + '/I.dat') for path in empirical_users }
+
+    conversations = {2: []}
+    users = set()
+    print("sampling users...", flush=True)
+    for i in range(args.users):
+        user = sample_user(i, empirical_users, client_paths, args.tors)
+
+        num_dyadic = sample_dyadic_conversation_count(dyadic_dist_vals, dyadic_dist_weights)
+        num_group_conversations = sample_group_conversation_count(group_dist_vals, group_dist_weights)
+
+        idle_dist_vals, idle_dist_weights = idles[user.dists_path]
+        initial_waits = sample_initial_idle(idle_dist_vals, idle_dist_weights, num_dyadic + num_group_conversations)
+
+        conversations[2].extend([Conversation(2, [user], [initial_waits.pop()]) for _ in range(num_dyadic)])
+        for c in range(num_group_conversations):
+            num_participants = sample_participant_count(participants_dist_vals, participants_dist_weights)
+            if num_participants not in conversations:
+                conversations[num_participants] = []
+            conversations[num_participants].append(Conversation(num_participants, [user], [initial_waits.pop()]))
+        users.add(user)
+
+    group_count = 0
+    for size in sorted(conversations):
+        print("creating groups of size {}...".format(size), flush=True)
+        remaining = conversations[size]
+        grouped = []
+        group = Conversation(size, [], [])
+        while len(remaining) > 0:
+            if len(group.users) == size:
+                grouped.append(group)
+                group = Conversation(size, [], [])
+            for i in reversed(range(len(remaining))):
+                if remaining[i].users[0] not in group.users:
+                    group.merge(remaining.pop(i))
+                    break
+            else:
+                # no remaining users not already in the group, we have to move on
+                # (n.b. this is a python for/else, not an if/else)
+                grouped.append(group)
+                group = Conversation(size, [], [])
+                break
+        for group in grouped:
+            group.name = "group" + str(group_count)
+            server_num = random.randint(0, args.servers - 1)
+            group.server_ip = servers[server_num]
+            group.web_ip = webs[server_num]
+            #print("creating group {} (size: {}, active members: {})".format(group.name, group.size, len(group.users)))
+            if group.size == len(group.users):
+                create_group(group)
+            else:
+                # add silent members to pad out group
+                sample_from = list(users - set(group.users))
+                sample_count = group.size - len(group.users)
+                if len(sample_from) < sample_count:
+                    print("Error: trying to sample {} users from {} users not already in the group; try increasing the --users count.".format(
+                        sample_count, len(sample_from)))
+                    exit(1)
+                silent = random.sample(sample_from, sample_count)
+                group.add_silent_members(silent)
+                create_group(group, set(silent))
+            group_count += 1
+
+    print("saving groups to disk...", flush=True)
+    for user in users:
+        user.save(config)
+
+    print("saving config...", flush=True)
+    new_config = os.path.dirname(args.config) + '/mnet.shadow.config.yaml'
+    with open(new_config, 'w') as f:
+        dump(config, f, Dumper=Dumper)
+
+    print("copying cert and key...", flush=True)
+    cert = os.path.dirname(__file__) + '/server.crt'
+    key = os.path.dirname(__file__) + '/server.key'
+    split_glob = [s for s in args.clients.split('/') if s != '']
+    shadow_config_path = '/'+'/'.join(split_glob[:-1])
+    for server_num in range(args.servers):
+        server_dir = '{}/server{}'.format(shadow_config_path, server_num)
+        web_dir = '{}/web{}'.format(shadow_config_path, server_num)
+        os.makedirs(server_dir, exist_ok=True)
+        os.makedirs(web_dir, exist_ok=True)
+        shutil.copy(cert, server_dir)
+        shutil.copy(cert, web_dir)
+        shutil.copy(key, server_dir)
+        shutil.copy(key, web_dir)
+
+    print("done!")
+
+def create_group(group, silent=set()):
+    if all(n >= SECONDS_IN_HOUR for n in group.waits):
+        # every group member is going to do nothing, just drop it
+        return
+    [group.users[i].conversations.append((group, group.waits[i])) for i in range(len(group.users)) if group.users[i] not in silent]
+    [user.conversations.append((group, SECONDS_IN_HOUR)) for user in silent]
+
+def sample_user(id_number, empirical_users, client_paths, tor_processes):
+    name = "user{}".format(id_number)
+    dists_path = random.choice(empirical_users)
+    client = random.choice(client_paths)
+    tor_process = (id_number % tor_processes) + 1 if tor_processes > 0 else 0
+    return User(name, dists_path, client, tor_process)
+
+def sample_participant_count(participants_dist_vals, participants_dist_weights):
+    return random.choices(participants_dist_vals, weights=participants_dist_weights)[0]
+
+def sample_dyadic_conversation_count(dyadic_dist_vals, dyadic_dist_weights):
+    return random.choices(dyadic_dist_vals, dyadic_dist_weights)[0]
+
+def sample_group_conversation_count(group_dist_vals, group_dist_weights):
+    return random.choices(group_dist_vals, group_dist_weights)[0]
+
+# takes I distribution, the function will scale it then return a list of samples
+def sample_initial_idle(idle_dist_vals, idle_dist_weights, n_samples):
+    real_bootstrap = 30
+    scaled_weights = [real_bootstrap + idle_dist_vals[i] * idle_dist_weights[i] for i in range(len(idle_dist_vals))]
+    if sum(scaled_weights) == 0.0:
+        # edge case where user always idled 0 seconds; say they were always idle instead
+        return [SECONDS_IN_HOUR] * max(1, n_samples)
+    return random.choices(idle_dist_vals, scaled_weights, k=n_samples)
+
+def get_free_ip(start, taken_ips):
+    for i in range(start, 256):
+        ip = "1.1.1.{}".format(i)
+        if ip not in taken_ips:
+            return ip
+    else:
+        print("Error: no IPs remaining in 1.1.1.0/24, modify source to use a different unused block.")
+        exit(1)
+
+if __name__ == '__main__':
+    main()

+ 324 - 0
mnettortools.py

@@ -0,0 +1,324 @@
+#!/usr/bin/env python3
+
+import argparse
+import glob
+import os
+import random
+import shutil
+
+from yaml import load, dump
+try:
+    from yaml import CLoader as Loader, CDumper as Dumper
+except ImportError:
+    from yaml import Loader, Dumper
+
+SECONDS_IN_HOUR = 60.0 * 60.0
+
+# modified from tornettools/generate_tgen.py
+def generate_onion_service_keys(tor_cmd, n):
+    with tempfile.TemporaryDirectory(prefix='tornettools-hs-keygen-') as dir_name:
+        config = {'DisableNetwork': '1', 'DataDirectory': dir_name, 'ControlPort': '9030'}
+        tor_process = stem.process.launch_tor_with_config(config,
+                                                          tor_cmd=tor_cmd,
+                                                          init_msg_handler=logging.debug,
+                                                          take_ownership=True,
+                                                          completion_percent=0)
+        controller = stem.connection.connect(control_port=('127.0.0.1', 9030))
+
+        keys = []
+
+        for x in range(n):
+            hs = controller.create_ephemeral_hidden_service(80)
+            assert hs.private_key_type == 'ED25519-V3'
+
+            keys.append((hs.private_key, hs.service_id + '.onion'))
+
+        controller.close()
+
+        # must make sure process ends before the temporary directory is removed,
+        # otherwise there's a race condition
+        tor_process.kill()
+        tor_process.wait()
+
+class Conversation:
+    def __init__(self, size, users, waits):
+        self.size = size
+        self.users = users
+        self.waits = waits
+
+    def merge(self, other):
+        self.users.extend(other.users)
+        self.waits.extend(other.waits)
+        return
+
+    def merge_slice(conversations):
+        first = conversations.pop()
+        return [first.merge(o) for o in conversations]
+
+    def add_silent_members(self, users):
+        self.users.extend(users)
+        self.waits.extend([SECONDS_IN_HOUR] * len(users))
+
+class User:
+    def __init__(self, name, dists_path, client, tor_process, onion_port):
+        self.name = name
+        self.dists_path = dists_path
+        self.client = client
+        self.tor_process = tor_process
+        self.onion_port = onion_port
+        self.conversations = []
+
+    def socks_port(self):
+        # default tor socks port is 9050, default tor control port is 9051
+        # each additional process needs both of those, so socks port goes up by 2
+        return 9050 + self.tor_process * 2
+
+    def control_port(self):
+        return self.socks_port() + 1
+
+    def save(self, config):
+        assert(config['hosts'] is not None)
+        client_path = '~/.cargo/bin/mgen-peer'
+        mgen_config_path = self.client + self.name + '.yaml'
+        onion_service_path = self.client + self.name + '.tor'
+        host_name = self.client.split('/')[-2]
+        print("saving: ", self.name, flush=True)
+        host = config['hosts'][host_name]
+        process = next((p for p in host['processes'] if p['path'] == client_path), None)
+
+        tors = [p for p in host['processes'] if p['path'] == '~/.local/bin/tor']
+        torrc = '{}.torrc'.format(self.tor_process)
+        tor_datadir = "tor-{}".format(self.tor_process)
+        torrc_path = self.client + torrc
+        tor_start = tors[0]['start_time']
+        if process is None:
+            if len(tors) == 0:
+                print('Error: No tor process for client {} in shadow config.'.format(self.client))
+                exit(1)
+            proc = {
+                'path': client_path,
+                'args': '../hosts user*.yaml',
+                'start_time': tor_start + 60,
+                'expected_final_state': 'running'
+            }
+            host['processes'].append(proc)
+        if self.tor_process != 0 and not any('-f {}'.format(torrc) in tor['args'] for tor in tors):
+            # we haven't setup this tor client yet, handle that first
+            tor_proc = {
+                'path': tors[0]['path'],
+                'args': '--defaults-torrc torrc-defaults -f {} --DataDirectory ./{}'.format(torrc, tor_datadir),
+                'start_time': tor_start,
+                'expected_final_state': 'running',
+                'environment': {'OPENBLAS_NUM_THREADS': '1'}
+            }
+            host['processes'].append(tor_proc)
+            torrc_contents = "SocksPort {}\n".format(self.socks_port())
+            torrc_contents += "ControlPort {}\n".format(self.control_port())
+            with open(torrc_path, 'w') as f:
+                f.write(torrc_contents)
+            os.mkdir(self.client + tor_datadir)
+
+        with open(torrc_path, 'a') as f:
+            torrc_contents = "HiddenServiceDir {}\n".format(onion_service_path)
+            torrc_contents += "HiddenServicePort {} 127.0.0.1:{}\n".format(self.onion_port, self.onion_port)
+            f.write(torrc_contents)
+        os.makedirs(onion_service_path)
+
+        yaml_str = 'user: "{}"\n'.format(self.name)
+        yaml_str += 'socks: "127.0.0.1:{}"\n'.format(self.socks_port())
+        yaml_str += 'listen: "127.0.0.1:{}"\n'.format(self.onion_port)
+        # defaults
+        yaml_str += 'bootstrap: 5.0\n'
+        yaml_str += 'retry: 5.0\n'
+        yaml_str += 'distributions:\n'
+        with open(self.dists_path + '/S.dat') as f:
+            s = f.read().strip()
+        yaml_str += '  s: {}\n'.format(s)
+        with open(self.dists_path + '/R.dat') as f:
+            r = f.read().strip()
+        yaml_str += '  r: {}\n'.format(r)
+        weighted_format = '  {}: {{ distribution: "Weighted", weights_file: "' + self.dists_path + '/{}.dat" }}\n'
+        yaml_str += weighted_format.format('m', 'sizes')
+        yaml_str += weighted_format.format('i', 'I')
+        yaml_str += weighted_format.format('w', 'W')
+        yaml_str += weighted_format.format('a_s', 'As')
+        yaml_str += weighted_format.format('a_r', 'Ar')
+        yaml_str += 'conversations:\n'
+        for group in self.conversations:
+            yaml_str += '  - group: "{}"\n'.format(group[0].name)
+            yaml_str += '    bootstrap: {}\n'.format(group[1])
+            yaml_str += '    recipients: {}\n'.format([user.name for user in group[0].users])
+        with open(mgen_config_path, 'w') as f:
+            f.write(yaml_str)
+
+def normalize_weights(weights):
+    """ Normalize weights so they sum to 1 """
+    tot = sum(weights)
+    return [w/tot for w in weights]
+
+def read_dist_file(path):
+    with open(path) as f:
+        (weights, vals) = f.readlines()
+        vals = list(map(int, vals.split(',')))
+        weights = normalize_weights(list(map(float, weights.split(','))))
+        return vals, weights
+
+def read_dist_file_float(path):
+    with open(path) as f:
+        (weights, vals) = f.readlines()
+        vals = list(map(float, vals.split(',')))
+        weights = normalize_weights(list(map(float, weights.split(','))))
+        return vals, weights
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Generate messenger clients for use with mgen and shadow.")
+    parser.add_argument('--dyadic', type=str, help='File containging the weighted distribution of the number of dyadic (1-on-1) conversations a user may have.', required=True)
+    parser.add_argument('--group', type=str, help='File containging the weighted distribution of the number of group conversations a user may have.', required=True)
+    parser.add_argument('--participants', type=str, help='File containing the weighted distribution of the number of participants in a group conversation.', required=True)
+    parser.add_argument('--config', type=str, help='The original shadow.config.yaml file; a modified copy will be placed in the same directory as mnet.shadow.config.yaml', required=True)
+    parser.add_argument('--clients', type=str, help='Glob specifying the paths to shadow host template directories where users will be assigned uniformly at random.', required=True)
+    parser.add_argument('--empirical', type=str, help='Path of directory containing the directories for each empirical user distribution data.', required=True)
+    parser.add_argument('--users', type=int, help='Number of concurrent simulated users to generate.', required=True)
+    parser.add_argument('--tors', type=int, default=0, help='Number of additional tor processes to run (if 0 or unset, clients use the original tor process, else clients only use new processes).')
+    parser.add_argument('--seed', type=int, help='RNG seed, if deterministic config generation is desired.')
+    args = parser.parse_args()
+
+    random.seed(args.seed, version=2)
+
+    print("loading config...", flush=True)
+    with open(args.config) as f:
+        config = load(f, Loader=Loader)
+    assert(config['hosts'] is not None)
+
+    dyadic_dist_vals, dyadic_dist_weights = read_dist_file(args.dyadic)
+    group_dist_vals, group_dist_weights = read_dist_file(args.group)
+    participants_dist_vals, participants_dist_weights = read_dist_file(args.participants)
+
+    client_paths = [[65535, g] for g in glob.glob(args.clients)]
+    empirical_users = [args.empirical + '/' + f for f in os.listdir(args.empirical)]
+
+    print("caching idle distributions...", flush=True)
+    idles = { path: read_dist_file_float(path + '/I.dat') for path in empirical_users }
+
+    conversations = {2: []}
+    users = set()
+    print("sampling users...", flush=True)
+    for i in range(args.users):
+        user = sample_user(i, empirical_users, client_paths, args.tors)
+
+        num_dyadic = sample_dyadic_conversation_count(dyadic_dist_vals, dyadic_dist_weights)
+        num_group_conversations = sample_group_conversation_count(group_dist_vals, group_dist_weights)
+
+        idle_dist_vals, idle_dist_weights = idles[user.dists_path]
+        initial_waits = sample_initial_idle(idle_dist_vals, idle_dist_weights, num_dyadic + num_group_conversations)
+
+        conversations[2].extend([Conversation(2, [user], [initial_waits.pop()]) for _ in range(num_dyadic)])
+        for c in range(num_group_conversations):
+            num_participants = sample_participant_count(participants_dist_vals, participants_dist_weights)
+            if num_participants not in conversations:
+                conversations[num_participants] = []
+            conversations[num_participants].append(Conversation(num_participants, [user], [initial_waits.pop()]))
+        users.add(user)
+
+    group_count = 0
+    for size in sorted(conversations):
+        print("creating groups of size {}...".format(size), flush=True)
+        remaining = conversations[size]
+        grouped = []
+        group = Conversation(size, [], [])
+        while len(remaining) > 0:
+            if len(group.users) == size:
+                grouped.append(group)
+                group = Conversation(size, [], [])
+            for i in reversed(range(len(remaining))):
+                if remaining[i].users[0] not in group.users:
+                    group.merge(remaining.pop(i))
+                    break
+            else:
+                # no remaining users not already in the group, we have to move on
+                # (n.b. this is a python for/else, not an if/else)
+                grouped.append(group)
+                group = Conversation(size, [], [])
+                break
+        for group in grouped:
+            group.name = "group" + str(group_count)
+            if group.size == len(group.users):
+                create_group(group)
+            else:
+                # add silent members to pad out group
+                sample_from = list(users - set(group.users))
+                sample_count = group.size - len(group.users)
+                if len(sample_from) < sample_count:
+                    print("Error: trying to sample {} users from {} users not already in the group; try increasing the --users count.".format(
+                        sample_count, len(sample_from)))
+                    exit(1)
+                silent = random.sample(sample_from, sample_count)
+                group.add_silent_members(silent)
+                create_group(group, set(silent))
+            group_count += 1
+
+    hosts_lines = ""
+    print("saving groups to disk...", flush=True)
+    for user in users:
+        user.save(config)
+        # structured for easy sed replacement with onion address generated later
+        hosts_lines += "{}:{} {}\n".format(user.name, user.onion_port, user.name)
+
+    split_glob = [s for s in args.clients.split('/') if s != '']
+    shadow_config_path = '/'+'/'.join(split_glob[:-1])
+    shadow_hosts_file = shadow_config_path + '/hosts'
+    with open(shadow_hosts_file, 'w') as f:
+        f.write(hosts_lines)
+
+    print("saving config...", flush=True)
+    new_config = os.path.dirname(args.config) + '/mnet.shadow.config.yaml'
+    with open(new_config, 'w') as f:
+        dump(config, f, Dumper=Dumper)
+
+    print("done!")
+
+def create_group(group, silent=set()):
+    if all(n >= SECONDS_IN_HOUR for n in group.waits):
+        # every group member is going to do nothing, just drop it
+        return
+    [group.users[i].conversations.append((group, group.waits[i])) for i in range(len(group.users)) if group.users[i] not in silent]
+    [user.conversations.append((group, SECONDS_IN_HOUR)) for user in silent]
+
+def sample_user(id_number, empirical_users, client_paths, tor_processes):
+    name = "user{}".format(id_number)
+    dists_path = random.choice(empirical_users)
+    client = random.choice(client_paths)
+    client[0] -= 1
+    tor_process = (id_number % tor_processes) + 1 if tor_processes > 0 else 0
+    return User(name, dists_path, client[1], tor_process, client[0])
+
+def sample_participant_count(participants_dist_vals, participants_dist_weights):
+    return random.choices(participants_dist_vals, weights=participants_dist_weights)[0]
+
+def sample_dyadic_conversation_count(dyadic_dist_vals, dyadic_dist_weights):
+    return random.choices(dyadic_dist_vals, dyadic_dist_weights)[0]
+
+def sample_group_conversation_count(group_dist_vals, group_dist_weights):
+    return random.choices(group_dist_vals, group_dist_weights)[0]
+
+# takes I distribution, the function will scale it then return a list of samples
+def sample_initial_idle(idle_dist_vals, idle_dist_weights, n_samples):
+    real_bootstrap = 30
+    scaled_weights = [real_bootstrap + idle_dist_vals[i] * idle_dist_weights[i] for i in range(len(idle_dist_vals))]
+    if sum(scaled_weights) == 0.0:
+        # edge case where user always idled 0 seconds; say they were always idle instead
+        return [SECONDS_IN_HOUR] * max(1, n_samples)
+    return random.choices(idle_dist_vals, scaled_weights, k=n_samples)
+
+def get_free_ip(start, taken_ips):
+    for i in range(start, 256):
+        ip = "1.1.1.{}".format(i)
+        if ip not in taken_ips:
+            return ip
+    else:
+        print("Error: no IPs remaining in 1.1.1.0/24, modify source to use a different unused block.")
+        exit(1)
+
+if __name__ == '__main__':
+    main()

+ 47 - 0
patch-atlas.py

@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+
+import sys
+import networkx as nx
+import copy
+
+if len(sys.argv) != 3:
+    print("Usage: {} atlas.gml output.gml".format(sys.argv[0]))
+    exit(1)
+
+# read first arg as a graph
+g = nx.read_gml(sys.argv[1])
+
+# add our 10gig node at a valid IP we know won't be in an atlas probe
+new_node = "node at 1.1.1.1"
+g.add_node(new_node,
+           label=new_node,
+           ip_address="1.1.1.1",
+           city_code="4744870",
+           country_code="US",
+           bandwidth_down="10000000 Kibit",
+           bandwidth_up="10000000 Kibit"
+           )
+
+# then duplicate all edges with source or dest of the node we're imitating,
+imitated_node = "node at 76.104.11.141"
+edges_to_copy = g.edges(imitated_node, data=True)
+
+# but remaped to our new node
+def remap_edge(e):
+    if e[0] == imitated_node:
+        e[0] = new_node
+    if e[1] == imitated_node:
+        e[1] = new_node
+    return e
+# (list(e) also gives us a shallow copy of e, good enough for us)
+copied_edges = map(remap_edge, [list(e) for e in edges_to_copy])
+g.add_edges_from(copied_edges)
+
+# create an edge between the new and old node
+edge = copy.deepcopy(g.get_edge_data(imitated_node, imitated_node))
+edge['label'] = "path from 1.1.1.1 to 76.104.11.141"
+g.add_edge(new_node, imitated_node)
+g[new_node][imitated_node].update(edge)
+
+# finally, save to the second arg
+nx.write_gml(g, sys.argv[2])

+ 597 - 0
plot_mgen.py

@@ -0,0 +1,597 @@
+import sys
+import os
+import argparse
+import logging
+
+from datetime import datetime
+from random import randint
+from random import seed as stdseed
+from numpy.random import seed as numpyseed
+from multiprocessing import cpu_count
+from platform import platform, uname
+
+from tornettools.util import which, make_directories
+from tornettools._version import __version__
+
+import re
+import os
+import logging
+
+from itertools import cycle
+import matplotlib.pyplot as pyplot
+from matplotlib.ticker import FuncFormatter
+from matplotlib.backends.backend_pdf import PdfPages
+
+from tornettools.util import load_json_data, find_matching_files_in_dir
+
+from tornettools.plot_common import (DEFAULT_COLORS, DEFAULT_LINESTYLES, draw_cdf, draw_cdf_ci,
+                                     draw_line, draw_line_ci, quantile, set_plot_options)
+from tornettools.plot_tgen import plot_tgen
+from tornettools.plot_oniontrace import plot_oniontrace
+
+
+
+HELP_MAIN = """
+Use 'tornettools <subcommand> --help' for more info
+"""
+DESC_MAIN = """
+tornettools is a utility to guide you through the Tor network
+experimentation process using Shadow. tornettools must be run with a
+subcommand to specify a mode of operation.
+
+For more information, see https://github.com/shadow/tornettools.
+"""
+
+HELP_STAGE = """
+Process Tor metrics data for staging network generation
+"""
+DESC_STAGE = """
+Process Tor network consensuses, relay descriptors, and user files
+from Tor metrics to stage TorNet network generation.
+
+This command should be used before running generate. This command
+produces staging files that will be required for the generate
+command to succeed.
+"""
+
+HELP_GENERATE = """
+Generate TorNet network configurations
+"""
+DESC_GENERATE = """
+Loads the TorNet staging files produced with the stage command
+and uses them to generate a valid TorNet network configuration.
+
+This command should be used after running stage.
+"""
+
+HELP_SIMULATE = """
+Run a TorNet simulation in Shadow
+"""
+DESC_SIMULATE = """
+Runs a Tor simulation using Shadow and the TorNet network
+configurations files generated with the generate command.
+
+This command should be used after running generate.
+"""
+
+HELP_PARSE = """
+Parse useful data from simulation log files
+"""
+DESC_PARSE = """
+Parses log files created by simulations run with the simulate
+command; extracts and stores various useful performance metrics.
+
+This command should be used after running simulate.
+"""
+
+HELP_PLOT = """
+Plot previously parsed data to visualize results
+"""
+DESC_PLOT = """
+Visualizes various performance metrics that were extracted and
+stored with the parse command by producing graphical plots.
+
+This command should be used after running parse.
+"""
+
+HELP_ARCHIVE = """
+Cleanup and compress Shadow simulation data
+"""
+DESC_ARCHIVE = """
+Prepares a Shadow simulation directory for archival by compressing
+simulation output log files and data directories.
+
+This command can be used any time after running simulate, but
+ideally after parsing and plotting is also completed.
+"""
+
+def __setup_logging_helper(logfilename=None):
+    my_handlers = []
+
+    stdout_handler = logging.StreamHandler(sys.stdout)
+    my_handlers.append(stdout_handler)
+
+    if logfilename != None:
+        make_directories(logfilename)
+        file_handler = logging.FileHandler(filename=logfilename)
+        my_handlers.append(file_handler)
+
+    logging.basicConfig(
+        level=logging.INFO,
+        format='%(asctime)s %(created)f [tornettools] [%(levelname)s] %(message)s',
+        datefmt='%Y-%m-%d %H:%M:%S',
+        handlers=my_handlers,
+    )
+
+    msg = "Logging system initialized! Logging events to stdout"
+    if logfilename != None:
+        msg += " and to '{}'".format(logfilename)
+    logging.info(msg)
+
+def __setup_logging(args):
+    if args.quiet <= 1:
+        logfilename = None
+        if args.quiet == 0 and hasattr(args, 'prefix'):
+            # log to a file too
+            prefixstr = str(args.prefix)
+            funcstr = str(args.command) if args.command is not None else "none"
+            datestr = datetime.now().strftime("%Y-%m-%d.%H.%M.%S")
+            logfilename = "{}/tornettools.{}.{}.log".format(prefixstr, funcstr, datestr)
+        __setup_logging_helper(logfilename)
+    else:
+        pass # no logging
+
+def run(args):
+    logging.info("Plotting simulation results now")
+    set_plot_options()
+
+    logging.info("Plotting mgen comparisons")
+    __plot_mnet(args)
+
+    logging.info(f"Done plotting! PDF files are saved to {args.prefix}")
+
+def __pattern_for_basename(circuittype, basename):
+    s = basename + r'\.' + circuittype + r'\.json'
+    if circuittype == 'exit':
+        # Data files without a circuittype contain exit circuits (from legacy
+        # tornettools runs).
+        s = basename + r'(\.' + circuittype + r')?\.json'
+    else:
+        s = basename + r'\.' + circuittype + r'\.json'
+    return re.compile(s)
+
+def __plot_mnet(args):
+    args.pdfpages = PdfPages(f"{args.prefix}/tornet.plot.pages.pdf")
+
+    net_scale = __get_simulated_network_scale(args)
+
+    logging.info("Loading mgen rtt_all data")
+    dbs = __load_tornet_datasets(args, "rtt_all.mgen.json")
+    logging.info("Plotting mgen rtt_all")
+    __plot_mgen_rtt_all(args, dbs, net_scale)
+
+    logging.info("Loading mgen rtt_timeout data")
+    dbs = __load_tornet_datasets(args, "rtt_timeout.mgen.json")
+    logging.info("Plotting mgen rtt_timeout")
+    __plot_mgen_rtt_timeout(args, dbs, net_scale)
+
+    logging.info("Loading mgen timeout_by_send data")
+    dbs = __load_tornet_datasets(args, "timeout_by_send.mgen.json")
+    logging.info("Plotting mgen rtt_by_send")
+    __plot_mgen_timeout_by_send(args, dbs, net_scale)
+
+    logging.info("Loading mgen timeout_by_receive data")
+    dbs = __load_tornet_datasets(args, "timeout_by_receive.mgen.json")
+    logging.info("Plotting mgen rtt_by_receive")
+    __plot_mgen_timeout_by_receive(args, dbs, net_scale)
+
+    logging.info("Loading mgen rtt_counts data")
+    dbs = __load_tornet_datasets(args, "counts.mgen.json")
+    logging.info("Plotting mgen rtt_counts")
+    __plot_mgen_count(args, dbs, net_scale)
+
+    args.pdfpages.close()
+
+
+def __plot_mgen_rtt_all(args, rtt_dbs, net_scale):
+    # cache the corresponding data in the 'data' keyword for __plot_cdf_figure
+    for rtt_db in rtt_dbs:
+        rtt_db['data'] = rtt_db['dataset']
+    __plot_cdf_figure(args, rtt_dbs, 'rtt_all.mgen', yscale='taillog',
+                      xscale='log',
+                      xlabel="Time (s)")
+
+def __plot_mgen_rtt_timeout(args, rtt_dbs, net_scale):
+    # cache the corresponding data in the 'data' keyword for __plot_cdf_figure
+    for rtt_db in rtt_dbs:
+        rtt_db['data'] = rtt_db['dataset']
+    __plot_cdf_figure(args, rtt_dbs, 'rtt_timeout.mgen', yscale='taillog',
+                      xlabel="Time (s)")
+
+
+def __plot_mgen_timeout_by_send(args, rtt_dbs, net_scale):
+    # cache the corresponding data in the 'data' keyword for __plot_cdf_figure
+    for rtt_db in rtt_dbs:
+        rtt_db['data'] = rtt_db['dataset']
+    __plot_cdf_figure(args, rtt_dbs, 'timeout_by_send.mgen', yscale='taillog',
+                      xscale='log',
+                      xlabel="Fraction of (user, group)'s expected receipts")
+
+def __plot_mgen_timeout_by_receive(args, rtt_dbs, net_scale):
+    # cache the corresponding data in the 'data' keyword for __plot_cdf_figure
+    for rtt_db in rtt_dbs:
+        rtt_db['data'] = rtt_db['dataset']
+    __plot_cdf_figure(args, rtt_dbs, 'timeout_by_receive.mgen', yscale='taillog',
+                      xscale='log',
+                      xlabel="Fraction of (user, group)'s receipts")
+
+
+def __plot_mgen_count(args, count_dbs, net_scale):
+    # cache the corresponding data in the 'data' keyword for __plot_cdf_figure
+    for count_db in count_dbs:
+        count_db['data'] = count_db['dataset']
+    __plot_cdf_figure(args, count_dbs, 'count.mgen',
+                      xlabel="Messages sent per user")
+
+def __plot_cdf_figure(args, dbs, filename, xscale=None, yscale=None, xlabel=None, ylabel="CDF"):
+    color_cycle = cycle(DEFAULT_COLORS)
+    linestyle_cycle = cycle(DEFAULT_LINESTYLES)
+
+    pyplot.figure()
+    lines, labels = [], []
+
+    for db in dbs:
+        if 'data' not in db or len(db['data']) < 1:
+            continue
+        elif len(db['data']) == 1:
+            (plot_func, d) = draw_cdf, db['data'][0]
+        else:
+            (plot_func, d) = draw_cdf_ci, db['data']
+
+        if len(d) < 1:
+            continue
+
+        line = plot_func(pyplot, d,
+                         yscale=yscale,
+                         label=db['label'],
+                         color=db['color'] or next(color_cycle),
+                         linestyle=next(linestyle_cycle))
+
+        lines.append(line)
+        labels.append(db['label'])
+
+    if xscale is not None:
+        pyplot.xscale(xscale)
+        if xlabel is not None:
+            xlabel += __get_scale_suffix(xscale)
+    if yscale is not None:
+        pyplot.yscale(yscale)
+        if ylabel is not None:
+            ylabel += __get_scale_suffix(yscale)
+    if xlabel is not None:
+        pyplot.xlabel(xlabel, fontsize=14)
+    if ylabel is not None:
+        pyplot.ylabel(ylabel, fontsize=14)
+
+    m = 0.025
+    pyplot.margins(m)
+
+    # the plot will exit the visible space at the 99th percentile,
+    # so make sure the x-axis is centered correctly
+    # (this is usually only a problem if using the 'taillog' yscale)
+    x_visible_max = None
+    for db in dbs:
+        if len(db['data']) >= 1 and len(db['data'][0]) >= 1:
+            q = quantile(db['data'][0], 0.99)
+            x_visible_max = q if x_visible_max is None else max(x_visible_max, q)
+    if x_visible_max is not None:
+        pyplot.xlim(xmin=max(0, -m * x_visible_max), xmax=(m + 1) * x_visible_max)
+
+    __plot_finish(args, lines, labels, filename)
+
+def __plot_finish(args, lines, labels, filename):
+    pyplot.tick_params(axis='y', which='major', labelsize=12)
+    pyplot.tick_params(axis='x', which='major', labelsize=14)
+    pyplot.tick_params(axis='both', which='minor', labelsize=8)
+    pyplot.grid(True, axis='both', which='minor', color='0.1', linestyle=':', linewidth='0.5')
+    pyplot.grid(True, axis='both', which='major', color='0.1', linestyle=':', linewidth='1.0')
+
+    pyplot.legend(lines, labels, loc='lower right', fontsize=14)
+    pyplot.tight_layout(pad=0.3)
+    pyplot.savefig(f"{args.prefix}/{filename}.{'png' if args.plot_pngs else 'pdf'}")
+    args.pdfpages.savefig()
+
+def __get_scale_suffix(scale):
+    if scale == 'taillog':
+        return " (tail log scale)"
+    elif scale == 'log':
+        return " (log scale)"
+    else:
+        return ""
+
+def __time_format_func(x, pos):
+    hours = int(x // 3600)
+    minutes = int((x % 3600) // 60)
+    seconds = int(x % 60)
+    return "{:d}:{:02d}:{:02d}".format(hours, minutes, seconds)
+
+def __load_tornet_datasets(args, filepattern):
+    tornet_dbs = []
+
+    print(args.labels)
+    label_cycle = cycle(args.labels) if args.labels is not None else None
+    color_cycle = cycle(args.colors) if args.colors is not None else None
+
+    if args.tornet_collection_path is not None:
+        for collection_dir in args.tornet_collection_path:
+            tornet_db = {
+                'dataset': [load_json_data(p) for p in find_matching_files_in_dir(collection_dir, filepattern)],
+                'label': next(label_cycle) if label_cycle is not None else os.path.basename(collection_dir),
+                'color': next(color_cycle) if color_cycle is not None else None,
+            }
+            tornet_dbs.append(tornet_db)
+
+    return tornet_dbs
+
+def __load_torperf_datasets(torperf_argset):
+    torperf_dbs = []
+
+    if torperf_argset is not None:
+        for torperf_args in torperf_argset:
+            torperf_db = {
+                'dataset': load_json_data(torperf_args[0]) if torperf_args[0] is not None else None,
+                'label': torperf_args[1] if torperf_args[1] is not None else "Public Tor",
+                'color': torperf_args[2],
+            }
+            torperf_dbs.append(torperf_db)
+
+    return torperf_dbs
+
+def __get_simulated_network_scale(args):
+    sim_info = __load_tornet_datasets(args, "simulation_info.json")
+
+    net_scale = 0.0
+    for db in sim_info:
+        for i, d in enumerate(db['dataset']):
+            if 'net_scale' in d:
+                if net_scale == 0.0:
+                    net_scale = float(d['net_scale'])
+                    logging.info(f"Found simulated network scale {net_scale}")
+                else:
+                    if float(d['net_scale']) != net_scale:
+                        logging.warning("Some of your tornet data is from networks of different scale")
+                        logging.critical(f"Found network scales {net_scale} and {float(d['net_scale'])} and they don't match")
+
+    return net_scale
+
+def __compute_torperf_error_rates(daily_counts):
+    err_rates = []
+    for day in daily_counts:
+        total = int(daily_counts[day]['requests'])
+        if total <= 0:
+            continue
+
+        timeouts = int(daily_counts[day]['timeouts'])
+        failures = int(daily_counts[day]['failures'])
+
+        err_rates.append((timeouts + failures) / float(total) * 100.0)
+    return err_rates
+
+
+def main():
+    my_formatter_class = CustomHelpFormatter
+
+    # construct the options
+    main_parser = argparse.ArgumentParser(description=DESC_MAIN, formatter_class=my_formatter_class)
+
+    main_parser.add_argument('-v', '--version',
+        help="""Prints the version of the toolkit and exits.""",
+        action="store_true", dest="do_version",
+        default=False)
+
+    main_parser.add_argument('-q', '--quiet',
+        help="""Do not write log messages to file. Use twice to also not write to stdout.""",
+        action="count", dest="quiet",
+        default=0)
+
+    main_parser.add_argument('-s', '--seed',
+        help="""Initialize tornettools' PRNGs with a seed to allow for
+            deterministic behavior. This does not affect the seed for the Shadow
+            simulation.""",
+        action="store", type=int, dest="seed", metavar="N",
+        default=None)
+
+    sub_parser = main_parser.add_subparsers(help=HELP_MAIN, dest='command')
+
+    plot_parser = sub_parser.add_parser('plot',
+        description=DESC_PLOT,
+        help=HELP_PLOT,
+        formatter_class=my_formatter_class)
+    plot_parser.set_defaults(func=run, formatter_class=my_formatter_class)
+
+    plot_parser.add_argument('tornet_collection_path',
+        help="""Path to a directory containing one or more subdirectories of parsed
+            tornet results from the 'parse' command. Confidence intervals are drawn
+            when this path contains plot data from multiple simulations.""",
+        action='store',
+        type=__type_str_dir_path_in,
+        nargs='+')
+
+    plot_parser.add_argument('-t', '--tor_metrics_path',
+        help="""Path to a tor_metrics.json file that was created by the 'stage' command,
+            which we be compared against the tornet collections. The label and color
+            to use in the graphs that we create are optional.""",
+        action=PathStringArgsAction,
+        nargs='+',
+        metavar="PATH [LABEL [COLOR]]")
+
+    plot_parser.add_argument('--prefix',
+        help="""A directory PATH prefix where the graphs generated by this script
+            will be written.""",
+        action="store",
+        type=__type_str_dir_path_out,
+        dest="prefix",
+        default=os.getcwd(),
+        metavar="PATH")
+
+    plot_parser.add_argument('-l', '--labels',
+        help="""Labels for the tornet collections to be used in the graph legends.""",
+        action='store',
+        type=str,
+        dest="labels",
+        nargs='+',
+        metavar='LABEL')
+
+    plot_parser.add_argument('-c', '--colors',
+        help="""Colors for the tornet collections to be used in the graph plots.""",
+        action='store',
+        type=str,
+        dest="colors",
+        nargs='+',
+        metavar='COLOR')
+
+    plot_parser.add_argument('-a', '--all',
+        help="""Also generate individual tgentools and oniontracetools plots for each simulation.""",
+        action="store_true",
+        dest="plot_all",
+        default=False)
+
+    plot_parser.add_argument('--pngs',
+        help="""Save individual plot images in png instead of pdf format.""",
+        action="store_true",
+        dest="plot_pngs",
+        default=False)
+
+    # get args and call the command handler for the chosen mode
+    args = main_parser.parse_args()
+
+    if not hasattr(args, "prefix") and hasattr(args, "tornet_config_path"):
+        args.prefix = args.tornet_config_path
+    if hasattr(args, "nprocesses"):
+        args.nprocesses = args.nprocesses if args.nprocesses > 0 else cpu_count()
+
+    # check if it's just a version check and we should short circuit
+    if args.do_version:
+        __setup_logging(args)
+        logging.info("tornettools version {}".format(__version__))
+        return
+
+    # if it's anything other than version, we need a subcommand
+    if args.command == None:
+        main_parser.print_usage()
+        return
+
+    # now we know we can start
+    __setup_logging(args)
+
+    # seed the pseudo-random generators
+    # if we don't have a seed, choose one and make sure we log it for reproducibility
+    if args.seed == None:
+        args.seed = randint(0, 2**31)
+    stdseed(args.seed)
+    numpyseed(args.seed)
+    logging.info("Seeded standard and numpy PRNGs with seed={}".format(args.seed))
+
+    logging.info("The argument namespace is: {}".format(str(args)))
+    logging.info("The platform is: {}".format(str(platform())))
+    logging.info("System info: {}".format(str(uname())))
+
+    # now run the configured mode
+    rv = run(args)
+
+    if rv == 0 or rv == None:
+        return 0
+    elif isinstance(rv, int):
+        return rv
+    else:
+        logging.warning(f"Unknown return value: {rv}")
+        return 1
+
+
+def __type_nonnegative_integer(value):
+    i = int(value)
+    if i < 0:
+        raise argparse.ArgumentTypeError("'%s' is an invalid non-negative int value" % value)
+    return i
+
+def __type_nonnegative_float(value):
+    i = float(value)
+    if i < 0.0:
+        raise argparse.ArgumentTypeError("'%s' is an invalid non-negative flat value" % value)
+    return i
+
+def __type_fractional_float(value):
+    i = float(value)
+    if i <= 0.0 or i > 1.0:
+        raise argparse.ArgumentTypeError("'%s' is an invalid fractional float value" % value)
+    return i
+
+def __type_str_file_path_out(value):
+    s = str(value)
+    if s == "-":
+        return s
+    p = os.path.abspath(os.path.expanduser(s))
+    make_directories(p)
+    return p
+
+def __type_str_dir_path_out(value):
+    s = str(value)
+    p = os.path.abspath(os.path.expanduser(s))
+    make_directories(p)
+    return p
+
+def __type_str_file_path_in(value):
+    s = str(value)
+    if s == "-":
+        return s
+    p = os.path.abspath(os.path.expanduser(s))
+    if not os.path.exists(p):
+        raise argparse.ArgumentTypeError(f"Path does not exist: {p}")
+    elif not os.path.isfile(p):
+        raise argparse.ArgumentTypeError(f"Path is not a file: {p}")
+    return p
+
+def __type_str_dir_path_in(value):
+    s = str(value)
+    p = os.path.abspath(os.path.expanduser(s))
+    if not os.path.exists(p):
+        raise argparse.ArgumentTypeError(f"Path does not exist: {p}")
+    elif not os.path.isdir(p):
+        raise argparse.ArgumentTypeError(f"Path is not a directory: {p}")
+    return p
+
+def type_str_file_path_in(p):
+    return __type_str_file_path_in(p)
+
+# adds the 'RawDescriptionHelpFormatter' to the ArgsDefault one
+class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
+    def _fill_text(self, text, width, indent):
+        return ''.join([indent + line for line in text.splitlines(True)])
+
+# a custom action for passing in experimental data directories when plotting
+class PathStringArgsAction(argparse.Action):
+    def __call__(self, parser, namespace, values, option_string=None):
+        if len(values) == 0:
+            raise argparse.ArgumentError(self, "A path is required.")
+        elif len(values) > 3:
+            raise argparse.ArgumentError(self, "Must specify 3 or fewer strings.")
+
+        # get the values
+        path = values[0]
+        label = values[1] if len(values) > 1 else None
+        color = values[2] if len(values) > 2 else None
+
+        # extract and validate the path
+        path = type_str_file_path_in(path)
+
+        # remove the default
+        if "_didremovedefault" not in namespace:
+            setattr(namespace, self.dest, [])
+            setattr(namespace, "_didremovedefault", True)
+
+        # append our new arg set
+        dest = getattr(namespace, self.dest)
+        dest.append([path, label, color])
+
+if __name__ == '__main__':
+    sys.exit(main())

+ 21 - 0
server.crt

@@ -0,0 +1,21 @@
+-----BEGIN CERTIFICATE-----
+MIIDazCCAlOgAwIBAgIUZ++oU4ax9bOfOI5c+TfysW8P0UowDQYJKoZIhvcNAQEL
+BQAwRTELMAkGA1UEBhMCVVMxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
+GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMzA3MTUyMjM5NTdaFw0yMzA4
+MTQyMjM5NTdaMEUxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
+HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
+AQUAA4IBDwAwggEKAoIBAQDGE1VtF1/YTjm/yxeRcWdy/Tt0zybPUJn7VAn78T0u
+Xckr2s8L71yC513dA9pVcTUGcSqaL7JUQe1tpq1vIdK6EHKWGhz8Uzbn+RSd8gfV
+NM5MH28jb9dnKBfY7y0AcNBnWkSrJaHx4OHbgNbMaYqJmVN9YOBSrqYBXL9prtGt
+e1TpPPGAWHu5K8nb6WCd7/8g3ih6LQc2FAjcMdm0AWIAm1gGmxqu1i4swjyB4ECF
+VE7H1QUA4qL1rNLYz9boLqSEXMjIaF9nSMZ1y43yWPk8+6gYvvmvqIFj/t1ArY0C
+l2dePm+2C/lyv/XmyRNNpaZxbR4q1RRT2PdTU2vgV1SxAgMBAAGjUzBRMB0GA1Ud
+DgQWBBQ9ZhuVtY4lwGK+TuROxVVddW19rTAfBgNVHSMEGDAWgBQ9ZhuVtY4lwGK+
+TuROxVVddW19rTAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQA4
++D344kBAGa2lirPjXKMk8AbIcGOI12/g47juTJ2njj0IOEBjJKj6Sd+DonBF0ZFf
+ENiHjXXLAoQuhEbtgfUcqfOFqHUcA4rNIb9FwVVkElSNcl173JuBguv9oxO4cLsE
+a/8xMKH3pEBM/jONXSj899X7Psf9XEnOX6SOwIzvcP+9zlCHZ8I17EK1AXJnLRap
+uJy2WZONkUcEtCi7mij3Y7JCkFHYMKM6R2IEJnktfczyC/EQ4pTFJwsLPyqyb1q8
+R7I8Ea5fN95tzuB8Et6ke9Zz/UwmwPVGwhXg3ieEz5rSAYVVwrDJeYEGzADErGpf
+ZS5uF8f3OfmcADWdPRuD
+-----END CERTIFICATE-----

+ 28 - 0
server.key

@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDGE1VtF1/YTjm/
+yxeRcWdy/Tt0zybPUJn7VAn78T0uXckr2s8L71yC513dA9pVcTUGcSqaL7JUQe1t
+pq1vIdK6EHKWGhz8Uzbn+RSd8gfVNM5MH28jb9dnKBfY7y0AcNBnWkSrJaHx4OHb
+gNbMaYqJmVN9YOBSrqYBXL9prtGte1TpPPGAWHu5K8nb6WCd7/8g3ih6LQc2FAjc
+Mdm0AWIAm1gGmxqu1i4swjyB4ECFVE7H1QUA4qL1rNLYz9boLqSEXMjIaF9nSMZ1
+y43yWPk8+6gYvvmvqIFj/t1ArY0Cl2dePm+2C/lyv/XmyRNNpaZxbR4q1RRT2PdT
+U2vgV1SxAgMBAAECggEAA7+CqLBuKr2LM/UDvoew85D1Zq/TTg26RjJYSIVPeTDC
+4WKv84u9WkhGw0uC/oYogNVUHywLIbNIKwCiDEXtcwIj6vF2TjOEaNYSpOz7JzaL
+N09KdvcTMkNk1SDsfvNDjEsd3Me25WjyGSlaVy6hlZo6RVd3kzT1FPZEdHtfghr4
+cVv+fm18AxF5LF+wKJ9XnKKt1N8j+yoIJGBjn8CfUC9Za5Vi7tMvtixadg0F1UDC
+lG6vSdeW+uXsxiMBh+c42Qhhk8B321FDVdF3jw7Yn8AHpFLnOcm1UAT5fDMcpQ4a
+TnA9sTepEuXNtKZdY3u2c7FNHXzG79rvYyi34ox5QQKBgQDbkWtVZN0a6bKK+DUT
+0Fcc4RgTQRYBdEZ9aiTPm+scLGSOe5hvuvkabB/1OoVB/vOI6/mmC84B3TTzm2zo
+itMteFXXuCnOpxq+5kpjsMBmKXPorXqXOtpcVXCPziQhG/KYNNJE7qbqpoYApBH6
+WGjCD7SPUewbxI3qhd2Hms/goQKBgQDm8PpH+iXZ0dMl5CvrgpXQ/a6BR3FQk7BS
+uMXYs7yQpfc8ifDVfDz064hEJoKtwOhhfmRybwdnHegb6u/c6goNvY/cKSLAlWyF
+T89TEqB1bv+7bH8T/7iT6jDJ1m3lJQMgBccSqS7cd9+FIG7j78W3ruZL2j+O+AgX
+Lp2yrE8qEQKBgCG+f5hoH/L6542kB8Q7yKePkHulDRS8Ifk0TuP5OnDiAbJEHHFP
+cuk0pNSzYbd6z0LDwWJbfhWbQYAO6vXyH/JlBAxbKVGxLNMZ4WTgzTDmPgIMZ0LG
+sLhwCRSQwcy01tu9gnNFmjGF1iJTFNA8thzc/QrptDewRX89g4ZLrJcBAoGBAI2c
+uSyH1MwDoWF7z/7DfZDA7k/x+ic52QZwrUlbtcZRLxEdWOPgIhThlRaNMtbPEvAt
+q/SL5tMxgJIV923UycNxORT82IWVWw1ISk6bfm9kWEaamjYuOgXhtnceGRdJIehy
+AoeL3ONuUk70+2qkLe6bvjZHJ3BI4dUtTaAxjv2xAoGBALkRxCE9Pw3oaSSGmJtc
+/JnXjE8mwDQxk+wKYFBPy08F8vltTAISZeqKx2X+RZeQ+zMa8+vT7QMMeYFw1NEE
+bzyo8KWv4Vx1vD2WL5dDnY6cAPdcjzTkRletvqBno2E0kyLeexkv1hnPhGarZNsM
+eaGu3kM8xsKbjqJ7D6xFVWz0
+-----END PRIVATE KEY-----