|
@@ -0,0 +1,182 @@
|
|
|
+#!/usr/bin/env python3
|
|
|
+#
|
|
|
+import subprocess
|
|
|
+import logging
|
|
|
+import os
|
|
|
+import sys
|
|
|
+#
|
|
|
+def start_chutney_network(chutney_path, tor_path, network_file, controlling_pid=None):
|
|
|
+ args = [os.path.join(chutney_path, 'tools/test-network.sh'), '--chutney-path', chutney_path,
|
|
|
+ '--tor-path', tor_path, '--stop-time', '-1', '--network', network_file]
|
|
|
+ if controlling_pid is not None:
|
|
|
+ args.extend(['--controlling-pid', str(controlling_pid)])
|
|
|
+ #
|
|
|
+ try:
|
|
|
+ subprocess.check_output(args, stderr=subprocess.STDOUT)
|
|
|
+ except subprocess.CalledProcessError as e:
|
|
|
+ logging.error('Chutney error:\n' + e.output.decode(sys.stdout.encoding))
|
|
|
+ raise
|
|
|
+ #
|
|
|
+#
|
|
|
+def stop_chutney_network(chutney_path, network_file):
|
|
|
+ args = [os.path.join(chutney_path, 'chutney'), 'stop', network_file]
|
|
|
+ try:
|
|
|
+ subprocess.check_output(args, stderr=subprocess.STDOUT)
|
|
|
+ except subprocess.CalledProcessError as e:
|
|
|
+ logging.error('Chutney error:\n' + e.output.decode(sys.stdout.encoding))
|
|
|
+ raise
|
|
|
+ #
|
|
|
+#
|
|
|
+class ChutneyNetwork:
|
|
|
+ def __init__(self, chutney_path, tor_path, network_file, controlling_pid=None):
|
|
|
+ self.chutney_path = chutney_path
|
|
|
+ self.network_file = network_file
|
|
|
+ #
|
|
|
+ start_chutney_network(chutney_path, tor_path, network_file, controlling_pid=controlling_pid)
|
|
|
+ #
|
|
|
+ def stop(self):
|
|
|
+ stop_chutney_network(self.chutney_path, self.network_file)
|
|
|
+ #
|
|
|
+ def __enter__(self):
|
|
|
+ return self
|
|
|
+ #
|
|
|
+ def __exit__(self, exc_type, exc_val, exc_tb):
|
|
|
+ self.stop()
|
|
|
+ #
|
|
|
+#
|
|
|
+class Node:
|
|
|
+ def __init__(self, **kwargs):
|
|
|
+ self.options = kwargs
|
|
|
+ #
|
|
|
+ def guess_nickname(self, index):
|
|
|
+ """
|
|
|
+ This guesses the nickname based on the format Chutney uses. There is
|
|
|
+ no good way to get the actual value.
|
|
|
+ """
|
|
|
+ #
|
|
|
+ return '{:03}{}'.format(index, self.options['tag'])
|
|
|
+ #
|
|
|
+ def _value_formatter(self, value):
|
|
|
+ if type(value) == str:
|
|
|
+ return "'{}'".format(value)
|
|
|
+ #
|
|
|
+ return value
|
|
|
+ #
|
|
|
+ def __str__(self):
|
|
|
+ arg_value_pairs = ['{}={}'.format(x, self._value_formatter(self.options[x])) for x in self.options]
|
|
|
+ return 'Node({})'.format(', '.join(arg_value_pairs))
|
|
|
+ #
|
|
|
+#
|
|
|
+def create_compact_chutney_config(nodes):
|
|
|
+ if len(nodes) == 0:
|
|
|
+ return None
|
|
|
+ #
|
|
|
+ config = ''
|
|
|
+ for (name, count, options) in nodes:
|
|
|
+ config += '{} = {}\n'.format(name, str(options))
|
|
|
+ #
|
|
|
+ config += '\n'
|
|
|
+ config += 'NODES = {}\n'.format(' + '.join(['{}.getN({})'.format(name, count) for (name, count, options) in nodes]))
|
|
|
+ config += '\n'
|
|
|
+ config += 'ConfigureNodes(NODES)'
|
|
|
+ #
|
|
|
+ return config
|
|
|
+#
|
|
|
+def create_chutney_config(nodes):
|
|
|
+ if len(nodes) == 0:
|
|
|
+ return None
|
|
|
+ #
|
|
|
+ config = ''
|
|
|
+ config += 'NODES = [{}]\n'.format(', \n'.join([str(node) for node in nodes]))
|
|
|
+ config += '\n'
|
|
|
+ config += 'ConfigureNodes(NODES)'
|
|
|
+ #
|
|
|
+ return config
|
|
|
+#
|
|
|
+def read_fingerprint(nickname, chutney_path):
|
|
|
+ try:
|
|
|
+ with open(os.path.join(chutney_path, 'net', 'nodes', nickname, 'fingerprint'), 'r') as f:
|
|
|
+ return f.read().strip().split(' ')[1]
|
|
|
+ #
|
|
|
+ except IOError as e:
|
|
|
+ return None
|
|
|
+ #
|
|
|
+#
|
|
|
+def numa_scheduler(num_processors_needed, numa_nodes):
|
|
|
+ """
|
|
|
+ Finds the numa node with the most physical cores remaining and
|
|
|
+ assigns physical cores (typically 2 virtual processors) until
|
|
|
+ the process has enough processors.
|
|
|
+ """
|
|
|
+ #
|
|
|
+ chosen_processors = []
|
|
|
+ num_physical_cores = {x:len(numa_nodes[x]['physical_cores']) for x in numa_nodes}
|
|
|
+ node_with_most_physical_cores = max(num_physical_cores, key=lambda x: (num_physical_cores.get(x), -x))
|
|
|
+ while len(chosen_processors) < num_processors_needed:
|
|
|
+ chosen_processors.extend(numa_nodes[node_with_most_physical_cores]['physical_cores'][0])
|
|
|
+ # note: this may assign more processors than requested
|
|
|
+ numa_nodes[node_with_most_physical_cores]['physical_cores'] = numa_nodes[node_with_most_physical_cores]['physical_cores'][1:]
|
|
|
+ #
|
|
|
+ return (node_with_most_physical_cores, chosen_processors)
|
|
|
+#
|
|
|
+if __name__ == '__main__':
|
|
|
+ import time
|
|
|
+ import tempfile
|
|
|
+ import numa
|
|
|
+ #
|
|
|
+ logging.basicConfig(level=logging.DEBUG)
|
|
|
+ #
|
|
|
+ chutney_path = '/home/sengler/code/measureme/chutney'
|
|
|
+ tor_path = '/home/sengler/code/measureme/tor'
|
|
|
+ #
|
|
|
+ #nodes = [('authority', 2, Node(tag='a', relay=1, authority=1, torrc='authority.tmpl')),
|
|
|
+ # ('other_relay', 14, Node(tag='r', relay=1, torrc='relay-non-exit.tmpl')),
|
|
|
+ # ('exit_relay', 1, Node(tag='r', exit=1, torrc='relay.tmpl')),
|
|
|
+ # ('client', 16, Node(tag='c', client=1, torrc='client.tmpl'))]
|
|
|
+ #nodes = [('authority', 2, Node(tag='a', relay=1, num_cpus=2, authority=1, torrc='authority.tmpl')),
|
|
|
+ # ('other_relay', 2, Node(tag='r', relay=1, num_cpus=2, torrc='relay-non-exit.tmpl')),
|
|
|
+ # ('exit_relay', 1, Node(tag='r', exit=1, num_cpus=2, torrc='relay.tmpl')),
|
|
|
+ # ('client', 2, Node(tag='c', client=1, num_cpus=1, torrc='client.tmpl'))]
|
|
|
+ #
|
|
|
+ nodes = [Node(tag='a', relay=1, num_cpus=2, authority=1, torrc='authority.tmpl') for _ in range(2)] + \
|
|
|
+ [Node(tag='r', relay=1, num_cpus=2, torrc='relay-non-exit.tmpl') for _ in range(2)] + \
|
|
|
+ [Node(tag='e', exit=1, num_cpus=2, torrc='relay.tmpl') for _ in range(1)] + \
|
|
|
+ [Node(tag='c', client=1, num_cpus=1, torrc='client.tmpl') for _ in range(2)]
|
|
|
+ #
|
|
|
+ numa_remaining = numa.get_numa_overview()
|
|
|
+ numa_sets = []
|
|
|
+ for node in nodes:
|
|
|
+ num_cpus = node.options['num_cpus']
|
|
|
+ if num_cpus%2 != 0:
|
|
|
+ num_cpus += 1
|
|
|
+ #
|
|
|
+ (numa_node, processors) = numa_scheduler(num_cpus, numa_remaining)
|
|
|
+ node.options['numa_settings'] = (numa_node, processors)
|
|
|
+ numa_sets.append((numa_node, processors))
|
|
|
+ #
|
|
|
+ print(numa_sets)
|
|
|
+ unused_processors = numa.generate_range_list([z for node in numa_remaining for y in numa_remaining[node]['physical_cores'] for z in y])
|
|
|
+ print(unused_processors)
|
|
|
+ #
|
|
|
+ nicknames = [nodes[x].guess_nickname(x) for x in range(len(nodes))]
|
|
|
+ print(nicknames)
|
|
|
+ #
|
|
|
+ (fd, tmp_network_file) = tempfile.mkstemp(prefix='chutney-network-')
|
|
|
+ try:
|
|
|
+ with os.fdopen(fd, mode='w') as f:
|
|
|
+ #f.write(create_compact_chutney_config(nodes))
|
|
|
+ f.write(create_chutney_config(nodes))
|
|
|
+ #
|
|
|
+ with ChutneyNetwork(chutney_path, tor_path, tmp_network_file) as net:
|
|
|
+ # do stuff here
|
|
|
+ fingerprints = []
|
|
|
+ for nick in nicknames:
|
|
|
+ fingerprints.append(read_fingerprint(nick, chutney_path))
|
|
|
+ #
|
|
|
+ print(fingerprints)
|
|
|
+ time.sleep(5)
|
|
|
+ #
|
|
|
+ finally:
|
|
|
+ os.remove(tmp_network_file)
|
|
|
+ #
|
|
|
+#
|