|
@@ -1,2383 +0,0 @@
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-import StringIO
|
|
|
-import string
|
|
|
-import re
|
|
|
-import datetime
|
|
|
-import gzip
|
|
|
-import os.path
|
|
|
-import json
|
|
|
-import math
|
|
|
-import sys
|
|
|
-import urllib
|
|
|
-import urllib2
|
|
|
-import hashlib
|
|
|
-import dateutil.parser
|
|
|
-import copy
|
|
|
-import re
|
|
|
-
|
|
|
-from stem.descriptor import DocumentHandler
|
|
|
-from stem.descriptor.remote import get_consensus, get_server_descriptors, MAX_FINGERPRINTS
|
|
|
-
|
|
|
-import logging
|
|
|
-logging.root.name = ''
|
|
|
-
|
|
|
-HAVE_IPADDRESS = False
|
|
|
-try:
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- import ipaddress
|
|
|
- HAVE_IPADDRESS = True
|
|
|
-except ImportError:
|
|
|
-
|
|
|
- logging.warning('Unable to import ipaddress, please install py2-ipaddress.' +
|
|
|
- ' A fallback list will be created, but optional netblock' +
|
|
|
- ' analysis will not be performed.')
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-FALLBACK_FORMAT_VERSION = '2.0.0'
|
|
|
-SECTION_SEPARATOR_BASE = '====='
|
|
|
-SECTION_SEPARATOR_COMMENT = '/* ' + SECTION_SEPARATOR_BASE + ' */'
|
|
|
-
|
|
|
-
|
|
|
-OUTPUT_CANDIDATES = False
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-PERFORM_IPV4_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else True
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-PERFORM_IPV6_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else False
|
|
|
-
|
|
|
-
|
|
|
-MUST_BE_RUNNING_NOW = (PERFORM_IPV4_DIRPORT_CHECKS
|
|
|
- or PERFORM_IPV6_DIRPORT_CHECKS)
|
|
|
-
|
|
|
-
|
|
|
-DOWNLOAD_MICRODESC_CONSENSUS = True
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-REASONABLY_LIVE_TIME = 24*60*60
|
|
|
-
|
|
|
-
|
|
|
-OUTPUT_COMMENTS = True if OUTPUT_CANDIDATES else False
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-CONTACT_COUNT = True if OUTPUT_CANDIDATES else False
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-OUTPUT_SORT_FIELD = 'contact' if OUTPUT_CANDIDATES else 'fingerprint'
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-ONIONOO = 'https://onionoo.torproject.org/'
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-LOCAL_FILES_ONLY = False
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-INCLUDE_UNLISTED_ENTRIES = True if OUTPUT_CANDIDATES else False
|
|
|
-
|
|
|
-WHITELIST_FILE_NAME = 'scripts/maint/fallback.whitelist'
|
|
|
-FALLBACK_FILE_NAME = 'src/app/config/fallback_dirs.inc'
|
|
|
-
|
|
|
-
|
|
|
-MAX_LIST_FILE_SIZE = 1024 * 1024
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-ADDRESS_AND_PORT_STABLE_DAYS = 90
|
|
|
-
|
|
|
-MAX_DOWNTIME_DAYS = 0 if MUST_BE_RUNNING_NOW else 7
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-CUTOFF_RUNNING = .50
|
|
|
-CUTOFF_V2DIR = .50
|
|
|
-
|
|
|
-
|
|
|
-CUTOFF_GUARD = .00
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-PERMITTED_BADEXIT = .00
|
|
|
-
|
|
|
-
|
|
|
-AGE_ALPHA = 0.99
|
|
|
-
|
|
|
-
|
|
|
-ONIONOO_SCALE_ONE = 999.
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-_FB_POG = 0.2
|
|
|
-FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else _FB_POG
|
|
|
-
|
|
|
-
|
|
|
-MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 200
|
|
|
-
|
|
|
-MIN_FALLBACK_COUNT = 0 if OUTPUT_CANDIDATES else MAX_FALLBACK_COUNT*0.5
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-MAX_FALLBACKS_PER_IP = 1
|
|
|
-MAX_FALLBACKS_PER_IPV4 = MAX_FALLBACKS_PER_IP
|
|
|
-MAX_FALLBACKS_PER_IPV6 = MAX_FALLBACKS_PER_IP
|
|
|
-MAX_FALLBACKS_PER_CONTACT = 7
|
|
|
-MAX_FALLBACKS_PER_FAMILY = 7
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-EXIT_BANDWIDTH_FRACTION = 1.0
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-MIN_BANDWIDTH = 50.0 * 10.0 * 1024.0
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-CONSENSUS_DOWNLOAD_SPEED_MAX = 15.0
|
|
|
-
|
|
|
-
|
|
|
-CONSENSUS_DOWNLOAD_RETRY = True
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-def parse_ts(t):
|
|
|
- return datetime.datetime.strptime(t, "%Y-%m-%d %H:%M:%S")
|
|
|
-
|
|
|
-def remove_bad_chars(raw_string, bad_char_list):
|
|
|
-
|
|
|
- cleansed_string = raw_string
|
|
|
- for c in bad_char_list:
|
|
|
- cleansed_string = cleansed_string.replace(c, '')
|
|
|
- return cleansed_string
|
|
|
-
|
|
|
-def cleanse_unprintable(raw_string):
|
|
|
-
|
|
|
- cleansed_string = ''
|
|
|
- for c in raw_string:
|
|
|
- if c in string.printable:
|
|
|
- cleansed_string += c
|
|
|
- return cleansed_string
|
|
|
-
|
|
|
-def cleanse_whitespace(raw_string):
|
|
|
-
|
|
|
- cleansed_string = raw_string
|
|
|
- for c in string.whitespace:
|
|
|
- cleansed_string = cleansed_string.replace(c, ' ')
|
|
|
- return cleansed_string
|
|
|
-
|
|
|
-def cleanse_c_multiline_comment(raw_string):
|
|
|
- cleansed_string = raw_string
|
|
|
-
|
|
|
- cleansed_string = cleanse_whitespace(cleansed_string)
|
|
|
-
|
|
|
- cleansed_string = cleanse_unprintable(cleansed_string)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- bad_char_list = '*/'
|
|
|
-
|
|
|
- bad_char_list += '\0'
|
|
|
-
|
|
|
- bad_char_list += ','
|
|
|
-
|
|
|
- bad_char_list += '='
|
|
|
-
|
|
|
- cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- return cleansed_string
|
|
|
-
|
|
|
-def cleanse_c_string(raw_string):
|
|
|
- cleansed_string = raw_string
|
|
|
-
|
|
|
- cleansed_string = cleanse_whitespace(cleansed_string)
|
|
|
-
|
|
|
- cleansed_string = cleanse_unprintable(cleansed_string)
|
|
|
-
|
|
|
-
|
|
|
- bad_char_list = '"'
|
|
|
-
|
|
|
- bad_char_list += '\\'
|
|
|
-
|
|
|
- bad_char_list += '\0'
|
|
|
-
|
|
|
- bad_char_list += ','
|
|
|
-
|
|
|
- bad_char_list += '='
|
|
|
-
|
|
|
- cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- return cleansed_string
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-fetch_source = {}
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-def register_fetch_source(what, url, relays_published, version):
|
|
|
- fetch_source[what] = {}
|
|
|
- fetch_source[what]['url'] = url
|
|
|
- fetch_source[what]['relays_published'] = relays_published
|
|
|
- fetch_source[what]['version'] = version
|
|
|
-
|
|
|
-
|
|
|
-def fetch_source_list():
|
|
|
- return sorted(fetch_source.keys())
|
|
|
-
|
|
|
-
|
|
|
-def describe_fetch_source(what):
|
|
|
- desc = '/*'
|
|
|
- desc += '\n'
|
|
|
- desc += 'Onionoo Source: '
|
|
|
- desc += cleanse_c_multiline_comment(what)
|
|
|
- desc += ' Date: '
|
|
|
- desc += cleanse_c_multiline_comment(fetch_source[what]['relays_published'])
|
|
|
- desc += ' Version: '
|
|
|
- desc += cleanse_c_multiline_comment(fetch_source[what]['version'])
|
|
|
- desc += '\n'
|
|
|
- desc += 'URL: '
|
|
|
- desc += cleanse_c_multiline_comment(fetch_source[what]['url'])
|
|
|
- desc += '\n'
|
|
|
- desc += '*/'
|
|
|
- return desc
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-def write_to_file(str, file_name, max_len):
|
|
|
- try:
|
|
|
- with open(file_name, 'w') as f:
|
|
|
- f.write(str[0:max_len])
|
|
|
- except EnvironmentError, error:
|
|
|
- logging.error('Writing file %s failed: %d: %s'%
|
|
|
- (file_name,
|
|
|
- error.errno,
|
|
|
- error.strerror)
|
|
|
- )
|
|
|
-
|
|
|
-def read_from_file(file_name, max_len):
|
|
|
- try:
|
|
|
- if os.path.isfile(file_name):
|
|
|
- with open(file_name, 'r') as f:
|
|
|
- return f.read(max_len)
|
|
|
- except EnvironmentError, error:
|
|
|
- logging.info('Loading file %s failed: %d: %s'%
|
|
|
- (file_name,
|
|
|
- error.errno,
|
|
|
- error.strerror)
|
|
|
- )
|
|
|
- return None
|
|
|
-
|
|
|
-def parse_fallback_file(file_name):
|
|
|
- file_data = read_from_file(file_name, MAX_LIST_FILE_SIZE)
|
|
|
- file_data = cleanse_unprintable(file_data)
|
|
|
- file_data = remove_bad_chars(file_data, '\n"\0')
|
|
|
- file_data = re.sub('/\*.*?\*/', '', file_data)
|
|
|
- file_data = file_data.replace(',', '\n')
|
|
|
- file_data = file_data.replace(' weight=10', '')
|
|
|
- return file_data
|
|
|
-
|
|
|
-def load_possibly_compressed_response_json(response):
|
|
|
- if response.info().get('Content-Encoding') == 'gzip':
|
|
|
- buf = StringIO.StringIO( response.read() )
|
|
|
- f = gzip.GzipFile(fileobj=buf)
|
|
|
- return json.load(f)
|
|
|
- else:
|
|
|
- return json.load(response)
|
|
|
-
|
|
|
-def load_json_from_file(json_file_name):
|
|
|
-
|
|
|
-
|
|
|
- try:
|
|
|
- with open(json_file_name, 'r') as f:
|
|
|
- return json.load(f)
|
|
|
- except EnvironmentError, error:
|
|
|
- raise Exception('Reading not-modified json file %s failed: %d: %s'%
|
|
|
- (json_file_name,
|
|
|
- error.errno,
|
|
|
- error.strerror)
|
|
|
- )
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-def datestr_to_datetime(datestr):
|
|
|
-
|
|
|
- if datestr is not None:
|
|
|
- dt = dateutil.parser.parse(datestr)
|
|
|
- else:
|
|
|
-
|
|
|
- dt = datetime.datetime.utcfromtimestamp(0)
|
|
|
-
|
|
|
- dt = dt.replace(tzinfo=None)
|
|
|
- return dt
|
|
|
-
|
|
|
-def onionoo_fetch(what, **kwargs):
|
|
|
- params = kwargs
|
|
|
- params['type'] = 'relay'
|
|
|
-
|
|
|
- params['first_seen_days'] = '%d-'%(ADDRESS_AND_PORT_STABLE_DAYS)
|
|
|
- params['last_seen_days'] = '-%d'%(MAX_DOWNTIME_DAYS)
|
|
|
- params['flag'] = 'V2Dir'
|
|
|
- url = ONIONOO + what + '?' + urllib.urlencode(params)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- base_file_name = what + '-' + hashlib.sha1(url).hexdigest()
|
|
|
-
|
|
|
- full_url_file_name = base_file_name + '.full_url'
|
|
|
- MAX_FULL_URL_LENGTH = 1024
|
|
|
-
|
|
|
- last_modified_file_name = base_file_name + '.last_modified'
|
|
|
- MAX_LAST_MODIFIED_LENGTH = 64
|
|
|
-
|
|
|
- json_file_name = base_file_name + '.json'
|
|
|
-
|
|
|
- if LOCAL_FILES_ONLY:
|
|
|
-
|
|
|
- response_json = load_json_from_file(json_file_name)
|
|
|
- else:
|
|
|
-
|
|
|
-
|
|
|
- write_to_file(url, full_url_file_name, MAX_FULL_URL_LENGTH)
|
|
|
-
|
|
|
- request = urllib2.Request(url)
|
|
|
- request.add_header('Accept-encoding', 'gzip')
|
|
|
-
|
|
|
-
|
|
|
- last_mod_date = read_from_file(last_modified_file_name,
|
|
|
- MAX_LAST_MODIFIED_LENGTH)
|
|
|
- if last_mod_date is not None:
|
|
|
- request.add_header('If-modified-since', last_mod_date)
|
|
|
-
|
|
|
-
|
|
|
- last_mod = datestr_to_datetime(last_mod_date)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- required_freshness = datetime.datetime.utcnow()
|
|
|
-
|
|
|
- required_freshness = required_freshness.replace(tzinfo=None)
|
|
|
- required_freshness -= datetime.timedelta(hours=24)
|
|
|
-
|
|
|
-
|
|
|
- response_code = 0
|
|
|
- try:
|
|
|
- response = urllib2.urlopen(request)
|
|
|
- response_code = response.getcode()
|
|
|
- except urllib2.HTTPError, error:
|
|
|
- response_code = error.code
|
|
|
- if response_code == 304:
|
|
|
- pass
|
|
|
- else:
|
|
|
- raise Exception("Could not get " + url + ": "
|
|
|
- + str(error.code) + ": " + error.reason)
|
|
|
-
|
|
|
- if response_code == 200:
|
|
|
- last_mod = datestr_to_datetime(response.info().get('Last-Modified'))
|
|
|
-
|
|
|
-
|
|
|
- if last_mod < required_freshness:
|
|
|
- if last_mod_date is not None:
|
|
|
-
|
|
|
- date_message = "Outdated data: last updated " + last_mod_date
|
|
|
- else:
|
|
|
- date_message = "No data: never downloaded "
|
|
|
- raise Exception(date_message + " from " + url)
|
|
|
-
|
|
|
-
|
|
|
- if response_code == 200:
|
|
|
-
|
|
|
- response_json = load_possibly_compressed_response_json(response)
|
|
|
-
|
|
|
- with open(json_file_name, 'w') as f:
|
|
|
-
|
|
|
- json.dump(response_json, f, separators=(',',':'))
|
|
|
-
|
|
|
-
|
|
|
- if response.info().get('Last-modified') is not None:
|
|
|
- write_to_file(response.info().get('Last-Modified'),
|
|
|
- last_modified_file_name,
|
|
|
- MAX_LAST_MODIFIED_LENGTH)
|
|
|
-
|
|
|
- elif response_code == 304:
|
|
|
-
|
|
|
- response_json = load_json_from_file(json_file_name)
|
|
|
-
|
|
|
- else:
|
|
|
- raise Exception("Unexpected HTTP response code to " + url + ": "
|
|
|
- + str(response_code))
|
|
|
-
|
|
|
- register_fetch_source(what,
|
|
|
- url,
|
|
|
- response_json['relays_published'],
|
|
|
- response_json['version'])
|
|
|
-
|
|
|
- return response_json
|
|
|
-
|
|
|
-def fetch(what, **kwargs):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- return onionoo_fetch(what, **kwargs)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-class Candidate(object):
|
|
|
- CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.utcnow()
|
|
|
- - datetime.timedelta(ADDRESS_AND_PORT_STABLE_DAYS))
|
|
|
-
|
|
|
- def __init__(self, details):
|
|
|
- for f in ['fingerprint', 'nickname', 'last_changed_address_or_port',
|
|
|
- 'consensus_weight', 'or_addresses', 'dir_address']:
|
|
|
- if not f in details: raise Exception("Document has no %s field."%(f,))
|
|
|
-
|
|
|
- if not 'contact' in details:
|
|
|
- details['contact'] = None
|
|
|
- if not 'flags' in details or details['flags'] is None:
|
|
|
- details['flags'] = []
|
|
|
- if (not 'advertised_bandwidth' in details
|
|
|
- or details['advertised_bandwidth'] is None):
|
|
|
-
|
|
|
-
|
|
|
- details['advertised_bandwidth'] = 0
|
|
|
- if (not 'effective_family' in details
|
|
|
- or details['effective_family'] is None):
|
|
|
- details['effective_family'] = []
|
|
|
- if not 'platform' in details:
|
|
|
- details['platform'] = None
|
|
|
- details['last_changed_address_or_port'] = parse_ts(
|
|
|
- details['last_changed_address_or_port'])
|
|
|
- self._data = details
|
|
|
- self._stable_sort_or_addresses()
|
|
|
-
|
|
|
- self._fpr = self._data['fingerprint']
|
|
|
- self._running = self._guard = self._v2dir = 0.
|
|
|
- self._split_dirport()
|
|
|
- self._compute_orport()
|
|
|
- if self.orport is None:
|
|
|
- raise Exception("Failed to get an orport for %s."%(self._fpr,))
|
|
|
- self._compute_ipv6addr()
|
|
|
- if not self.has_ipv6():
|
|
|
- logging.debug("Failed to get an ipv6 address for %s."%(self._fpr,))
|
|
|
- self._compute_version()
|
|
|
- self._extra_info_cache = None
|
|
|
-
|
|
|
- def _stable_sort_or_addresses(self):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- self._data['or_addresses_raw'] = self._data['or_addresses']
|
|
|
- or_address_primary = self._data['or_addresses'][:1]
|
|
|
-
|
|
|
-
|
|
|
- or_addresses_secondaries_stable = sorted(self._data['or_addresses'][1:])
|
|
|
- or_addresses_stable = or_address_primary + or_addresses_secondaries_stable
|
|
|
- self._data['or_addresses'] = or_addresses_stable
|
|
|
-
|
|
|
- def get_fingerprint(self):
|
|
|
- return self._fpr
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def is_valid_ipv4_address(address):
|
|
|
- if not isinstance(address, (str, unicode)):
|
|
|
- return False
|
|
|
-
|
|
|
-
|
|
|
- if address.count(".") != 3:
|
|
|
- return False
|
|
|
-
|
|
|
-
|
|
|
- for entry in address.split("."):
|
|
|
- if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
|
|
|
- return False
|
|
|
- elif entry[0] == "0" and len(entry) > 1:
|
|
|
- return False
|
|
|
-
|
|
|
- return True
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def is_valid_ipv6_address(address):
|
|
|
- if not isinstance(address, (str, unicode)):
|
|
|
- return False
|
|
|
-
|
|
|
-
|
|
|
- address = address[1:-1]
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- colon_count = address.count(":")
|
|
|
-
|
|
|
- if colon_count > 7:
|
|
|
- return False
|
|
|
- elif colon_count != 7 and not "::" in address:
|
|
|
- return False
|
|
|
- elif address.count("::") > 1 or ":::" in address:
|
|
|
- return False
|
|
|
-
|
|
|
- found_ipv4_on_previous_entry = False
|
|
|
- for entry in address.split(":"):
|
|
|
-
|
|
|
-
|
|
|
- if found_ipv4_on_previous_entry:
|
|
|
- return False
|
|
|
- if not re.match("^[0-9a-fA-f]{0,4}$", entry):
|
|
|
- if not Candidate.is_valid_ipv4_address(entry):
|
|
|
- return False
|
|
|
- else:
|
|
|
- found_ipv4_on_previous_entry = True
|
|
|
-
|
|
|
- return True
|
|
|
-
|
|
|
- def _split_dirport(self):
|
|
|
-
|
|
|
- (self.dirip, _dirport) = self._data['dir_address'].split(':', 2)
|
|
|
- self.dirport = int(_dirport)
|
|
|
-
|
|
|
- def _compute_orport(self):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- self._split_dirport()
|
|
|
- self.orport = None
|
|
|
- for i in self._data['or_addresses']:
|
|
|
- if i != self._data['or_addresses'][0]:
|
|
|
- logging.debug('Secondary IPv4 Address Used for %s: %s'%(self._fpr, i))
|
|
|
- (ipaddr, port) = i.rsplit(':', 1)
|
|
|
- if (ipaddr == self.dirip) and Candidate.is_valid_ipv4_address(ipaddr):
|
|
|
- self.orport = int(port)
|
|
|
- return
|
|
|
-
|
|
|
- def _compute_ipv6addr(self):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- self.ipv6addr = None
|
|
|
- self.ipv6orport = None
|
|
|
-
|
|
|
- for i in self._data['or_addresses']:
|
|
|
- (ipaddr, port) = i.rsplit(':', 1)
|
|
|
- if (port == self.orport) and Candidate.is_valid_ipv6_address(ipaddr):
|
|
|
- self.ipv6addr = ipaddr
|
|
|
- self.ipv6orport = int(port)
|
|
|
- return
|
|
|
-
|
|
|
- for i in self._data['or_addresses']:
|
|
|
- (ipaddr, port) = i.rsplit(':', 1)
|
|
|
- if Candidate.is_valid_ipv6_address(ipaddr):
|
|
|
- self.ipv6addr = ipaddr
|
|
|
- self.ipv6orport = int(port)
|
|
|
- return
|
|
|
-
|
|
|
- def _compute_version(self):
|
|
|
-
|
|
|
-
|
|
|
- self._data['version'] = None
|
|
|
- if self._data['platform'] is None:
|
|
|
- return
|
|
|
-
|
|
|
- tokens = self._data['platform'].split()
|
|
|
- for token in tokens:
|
|
|
- vnums = token.split('.')
|
|
|
-
|
|
|
- if (len(vnums) >= 4 and vnums[0].isdigit() and vnums[1].isdigit() and
|
|
|
- vnums[2].isdigit()):
|
|
|
- self._data['version'] = token
|
|
|
- return
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- STALE_CONSENSUS_VERSIONS = ['0.2.9.1-alpha-dev',
|
|
|
- '0.2.9.2-alpha',
|
|
|
- '0.2.9.2-alpha-dev',
|
|
|
- '0.2.9.3-alpha',
|
|
|
- '0.2.9.3-alpha-dev',
|
|
|
- '0.2.9.4-alpha',
|
|
|
- '0.2.9.4-alpha-dev',
|
|
|
- '0.3.0.0-alpha-dev'
|
|
|
- ]
|
|
|
-
|
|
|
- def is_valid_version(self):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if not self._data.has_key('recommended_version'):
|
|
|
- log_excluded('%s not a candidate: no recommended_version field',
|
|
|
- self._fpr)
|
|
|
- return False
|
|
|
- if not self._data['recommended_version']:
|
|
|
- log_excluded('%s not a candidate: version not recommended', self._fpr)
|
|
|
- return False
|
|
|
-
|
|
|
- if not self._data.has_key('version'):
|
|
|
- log_excluded('%s not a candidate: no version field', self._fpr)
|
|
|
- return False
|
|
|
- if self._data['version'] in Candidate.STALE_CONSENSUS_VERSIONS:
|
|
|
- logging.warning('%s not a candidate: version delivers stale consensuses',
|
|
|
- self._fpr)
|
|
|
- return False
|
|
|
- return True
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def _extract_generic_history(history, which='unknown'):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- generic_history = []
|
|
|
-
|
|
|
- periods = history.keys()
|
|
|
- periods.sort(key = lambda x: history[x]['interval'])
|
|
|
- now = datetime.datetime.utcnow()
|
|
|
- newest = now
|
|
|
- for p in periods:
|
|
|
- h = history[p]
|
|
|
- interval = datetime.timedelta(seconds = h['interval'])
|
|
|
- this_ts = parse_ts(h['last'])
|
|
|
-
|
|
|
- if (len(h['values']) != h['count']):
|
|
|
- logging.warning('Inconsistent value count in %s document for %s'
|
|
|
- %(p, which))
|
|
|
- for v in reversed(h['values']):
|
|
|
- if (this_ts <= newest):
|
|
|
- agt1 = now - this_ts
|
|
|
- agt2 = interval
|
|
|
- agetmp1 = (agt1.microseconds + (agt1.seconds + agt1.days * 24 * 3600)
|
|
|
- * 10**6) / 10**6
|
|
|
- agetmp2 = (agt2.microseconds + (agt2.seconds + agt2.days * 24 * 3600)
|
|
|
- * 10**6) / 10**6
|
|
|
- generic_history.append(
|
|
|
- { 'age': agetmp1,
|
|
|
- 'length': agetmp2,
|
|
|
- 'value': v
|
|
|
- })
|
|
|
- newest = this_ts
|
|
|
- this_ts -= interval
|
|
|
-
|
|
|
- if (this_ts + interval != parse_ts(h['first'])):
|
|
|
- logging.warning('Inconsistent time information in %s document for %s'
|
|
|
- %(p, which))
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- return generic_history
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def _avg_generic_history(generic_history):
|
|
|
- a = []
|
|
|
- for i in generic_history:
|
|
|
- if i['age'] > (ADDRESS_AND_PORT_STABLE_DAYS * 24 * 3600):
|
|
|
- continue
|
|
|
- if (i['length'] is not None
|
|
|
- and i['age'] is not None
|
|
|
- and i['value'] is not None):
|
|
|
- w = i['length'] * math.pow(AGE_ALPHA, i['age']/(3600*24))
|
|
|
- a.append( (i['value'] * w, w) )
|
|
|
-
|
|
|
- sv = math.fsum(map(lambda x: x[0], a))
|
|
|
- sw = math.fsum(map(lambda x: x[1], a))
|
|
|
-
|
|
|
- if sw == 0.0:
|
|
|
- svw = 0.0
|
|
|
- else:
|
|
|
- svw = sv/sw
|
|
|
- return svw
|
|
|
-
|
|
|
- def _add_generic_history(self, history):
|
|
|
- periods = r['read_history'].keys()
|
|
|
- periods.sort(key = lambda x: r['read_history'][x]['interval'] )
|
|
|
-
|
|
|
- print periods
|
|
|
-
|
|
|
- def add_running_history(self, history):
|
|
|
- pass
|
|
|
-
|
|
|
- def add_uptime(self, uptime):
|
|
|
- logging.debug('Adding uptime %s.'%(self._fpr,))
|
|
|
-
|
|
|
-
|
|
|
- if not 'flags' in uptime:
|
|
|
- logging.debug('No flags in document for %s.'%(self._fpr,))
|
|
|
- return
|
|
|
-
|
|
|
- for f in ['Running', 'Guard', 'V2Dir']:
|
|
|
- if not f in uptime['flags']:
|
|
|
- logging.debug('No %s in flags for %s.'%(f, self._fpr,))
|
|
|
- return
|
|
|
-
|
|
|
- running = self._extract_generic_history(uptime['flags']['Running'],
|
|
|
- '%s-Running'%(self._fpr))
|
|
|
- guard = self._extract_generic_history(uptime['flags']['Guard'],
|
|
|
- '%s-Guard'%(self._fpr))
|
|
|
- v2dir = self._extract_generic_history(uptime['flags']['V2Dir'],
|
|
|
- '%s-V2Dir'%(self._fpr))
|
|
|
- if 'BadExit' in uptime['flags']:
|
|
|
- badexit = self._extract_generic_history(uptime['flags']['BadExit'],
|
|
|
- '%s-BadExit'%(self._fpr))
|
|
|
-
|
|
|
- self._running = self._avg_generic_history(running) / ONIONOO_SCALE_ONE
|
|
|
- self._guard = self._avg_generic_history(guard) / ONIONOO_SCALE_ONE
|
|
|
- self._v2dir = self._avg_generic_history(v2dir) / ONIONOO_SCALE_ONE
|
|
|
- self._badexit = None
|
|
|
- if 'BadExit' in uptime['flags']:
|
|
|
- self._badexit = self._avg_generic_history(badexit) / ONIONOO_SCALE_ONE
|
|
|
-
|
|
|
- def is_candidate(self):
|
|
|
- try:
|
|
|
- if (MUST_BE_RUNNING_NOW and not self.is_running()):
|
|
|
- log_excluded('%s not a candidate: not running now, unable to check ' +
|
|
|
- 'DirPort consensus download', self._fpr)
|
|
|
- return False
|
|
|
- if (self._data['last_changed_address_or_port'] >
|
|
|
- self.CUTOFF_ADDRESS_AND_PORT_STABLE):
|
|
|
- log_excluded('%s not a candidate: changed address/port recently (%s)',
|
|
|
- self._fpr, self._data['last_changed_address_or_port'])
|
|
|
- return False
|
|
|
- if self._running < CUTOFF_RUNNING:
|
|
|
- log_excluded('%s not a candidate: running avg too low (%lf)',
|
|
|
- self._fpr, self._running)
|
|
|
- return False
|
|
|
- if self._v2dir < CUTOFF_V2DIR:
|
|
|
- log_excluded('%s not a candidate: v2dir avg too low (%lf)',
|
|
|
- self._fpr, self._v2dir)
|
|
|
- return False
|
|
|
- if self._badexit is not None and self._badexit > PERMITTED_BADEXIT:
|
|
|
- log_excluded('%s not a candidate: badexit avg too high (%lf)',
|
|
|
- self._fpr, self._badexit)
|
|
|
- return False
|
|
|
-
|
|
|
- if not self.is_valid_version():
|
|
|
- return False
|
|
|
- if self._guard < CUTOFF_GUARD:
|
|
|
- log_excluded('%s not a candidate: guard avg too low (%lf)',
|
|
|
- self._fpr, self._guard)
|
|
|
- return False
|
|
|
- if (not self._data.has_key('consensus_weight')
|
|
|
- or self._data['consensus_weight'] < 1):
|
|
|
- log_excluded('%s not a candidate: consensus weight invalid', self._fpr)
|
|
|
- return False
|
|
|
- except BaseException as e:
|
|
|
- logging.warning("Exception %s when checking if fallback is a candidate",
|
|
|
- str(e))
|
|
|
- return False
|
|
|
- return True
|
|
|
-
|
|
|
- def id_matches(self, id, exact=False):
|
|
|
- """ Does this fallback's id match id?
|
|
|
- exact is ignored. """
|
|
|
- return self._fpr == id
|
|
|
-
|
|
|
- def ipv4_addr_matches(self, ipv4_addr, exact=False):
|
|
|
- """ Does this fallback's IPv4 address match ipv4_addr?
|
|
|
- exact is ignored. """
|
|
|
- return self.dirip == ipv4_addr
|
|
|
-
|
|
|
- def ipv4_dirport_matches(self, ipv4_dirport, exact=False):
|
|
|
- """ Does this fallback's IPv4 dirport match ipv4_dirport?
|
|
|
- If exact is False, always return True. """
|
|
|
- if exact:
|
|
|
- return self.dirport == int(ipv4_dirport)
|
|
|
- else:
|
|
|
- return True
|
|
|
-
|
|
|
- def ipv4_and_dirport_matches(self, ipv4_addr, ipv4_dirport, exact=False):
|
|
|
- """ Does this fallback's IPv4 address match ipv4_addr?
|
|
|
- If exact is True, also check ipv4_dirport. """
|
|
|
- ipv4_match = self.ipv4_addr_matches(ipv4_addr, exact=exact)
|
|
|
- if exact:
|
|
|
- return ipv4_match and self.ipv4_dirport_matches(ipv4_dirport,
|
|
|
- exact=exact)
|
|
|
- else:
|
|
|
- return ipv4_match
|
|
|
-
|
|
|
- def ipv4_orport_matches(self, ipv4_orport, exact=False):
|
|
|
- """ Does this fallback's IPv4 orport match ipv4_orport?
|
|
|
- If exact is False, always return True. """
|
|
|
- if exact:
|
|
|
- return self.orport == int(ipv4_orport)
|
|
|
- else:
|
|
|
- return True
|
|
|
-
|
|
|
- def ipv4_and_orport_matches(self, ipv4_addr, ipv4_orport, exact=False):
|
|
|
- """ Does this fallback's IPv4 address match ipv4_addr?
|
|
|
- If exact is True, also check ipv4_orport. """
|
|
|
- ipv4_match = self.ipv4_addr_matches(ipv4_addr, exact=exact)
|
|
|
- if exact:
|
|
|
- return ipv4_match and self.ipv4_orport_matches(ipv4_orport,
|
|
|
- exact=exact)
|
|
|
- else:
|
|
|
- return ipv4_match
|
|
|
-
|
|
|
- def ipv6_addr_matches(self, ipv6_addr, exact=False):
|
|
|
- """ Does this fallback's IPv6 address match ipv6_addr?
|
|
|
- Both addresses must be present to match.
|
|
|
- exact is ignored. """
|
|
|
- if self.has_ipv6() and ipv6_addr is not None:
|
|
|
-
|
|
|
- assert(ipv6_addr.startswith('[') and ipv6_addr.endswith(']'))
|
|
|
- return self.ipv6addr == ipv6_addr
|
|
|
- else:
|
|
|
- return False
|
|
|
-
|
|
|
- def ipv6_orport_matches(self, ipv6_orport, exact=False):
|
|
|
- """ Does this fallback's IPv6 orport match ipv6_orport?
|
|
|
- Both ports must be present to match.
|
|
|
- If exact is False, always return True. """
|
|
|
- if exact:
|
|
|
- return (self.has_ipv6() and ipv6_orport is not None and
|
|
|
- self.ipv6orport == int(ipv6_orport))
|
|
|
- else:
|
|
|
- return True
|
|
|
-
|
|
|
- def ipv6_and_orport_matches(self, ipv6_addr, ipv6_orport, exact=False):
|
|
|
- """ Does this fallback's IPv6 address match ipv6_addr?
|
|
|
- If exact is True, also check ipv6_orport. """
|
|
|
- ipv6_match = self.ipv6_addr_matches(ipv6_addr, exact=exact)
|
|
|
- if exact:
|
|
|
- return ipv6_match and self.ipv6_orport_matches(ipv6_orport,
|
|
|
- exact=exact)
|
|
|
- else:
|
|
|
- return ipv6_match
|
|
|
-
|
|
|
- def entry_matches_exact(self, entry):
|
|
|
- """ Is entry an exact match for this fallback?
|
|
|
- A fallback is an exact match for entry if each key in entry matches:
|
|
|
- ipv4
|
|
|
- dirport
|
|
|
- orport
|
|
|
- id
|
|
|
- ipv6 address and port (if present in the fallback or the whitelist)
|
|
|
- If the fallback has an ipv6 key, the whitelist line must also have
|
|
|
- it, otherwise they don't match.
|
|
|
-
|
|
|
- Logs a warning-level message if the fallback would be an exact match,
|
|
|
- but one of the id, ipv4, ipv4 orport, ipv4 dirport, or ipv6 orport
|
|
|
- have changed. """
|
|
|
- if not self.id_matches(entry['id'], exact=True):
|
|
|
-
|
|
|
-
|
|
|
- if self.ipv4_and_orport_matches(entry['ipv4'],
|
|
|
- entry['orport'],
|
|
|
- exact=True):
|
|
|
- logging.warning('%s excluded: has OR %s:%d changed fingerprint to ' +
|
|
|
- '%s?', entry['id'], self.dirip, self.orport,
|
|
|
- self._fpr)
|
|
|
- if self.ipv6_and_orport_matches(entry.get('ipv6_addr'),
|
|
|
- entry.get('ipv6_orport'),
|
|
|
- exact=True):
|
|
|
- logging.warning('%s excluded: has OR %s changed fingerprint to ' +
|
|
|
- '%s?', entry['id'], entry['ipv6'], self._fpr)
|
|
|
- return False
|
|
|
- if not self.ipv4_addr_matches(entry['ipv4'], exact=True):
|
|
|
- logging.warning('%s excluded: has it changed IPv4 from %s to %s?',
|
|
|
- self._fpr, entry['ipv4'], self.dirip)
|
|
|
- return False
|
|
|
- if not self.ipv4_dirport_matches(entry['dirport'], exact=True):
|
|
|
- logging.warning('%s excluded: has it changed DirPort from %s:%d to ' +
|
|
|
- '%s:%d?', self._fpr, self.dirip, int(entry['dirport']),
|
|
|
- self.dirip, self.dirport)
|
|
|
- return False
|
|
|
- if not self.ipv4_orport_matches(entry['orport'], exact=True):
|
|
|
- logging.warning('%s excluded: has it changed ORPort from %s:%d to ' +
|
|
|
- '%s:%d?', self._fpr, self.dirip, int(entry['orport']),
|
|
|
- self.dirip, self.orport)
|
|
|
- return False
|
|
|
- if entry.has_key('ipv6') and self.has_ipv6():
|
|
|
-
|
|
|
- if not self.ipv6_and_orport_matches(entry['ipv6_addr'],
|
|
|
- entry['ipv6_orport'],
|
|
|
- exact=True):
|
|
|
- logging.warning('%s excluded: has it changed IPv6 ORPort from %s ' +
|
|
|
- 'to %s:%d?', self._fpr, entry['ipv6'],
|
|
|
- self.ipv6addr, self.ipv6orport)
|
|
|
- return False
|
|
|
-
|
|
|
-
|
|
|
- elif entry.has_key('ipv6') and not self.has_ipv6():
|
|
|
- logging.warning('%s excluded: has it lost its former IPv6 address %s?',
|
|
|
- self._fpr, entry['ipv6'])
|
|
|
- return False
|
|
|
- elif not entry.has_key('ipv6') and self.has_ipv6():
|
|
|
- logging.warning('%s excluded: has it gained an IPv6 address %s:%d?',
|
|
|
- self._fpr, self.ipv6addr, self.ipv6orport)
|
|
|
- return False
|
|
|
- return True
|
|
|
-
|
|
|
- def entry_matches_fuzzy(self, entry):
|
|
|
- """ Is entry a fuzzy match for this fallback?
|
|
|
- A fallback is a fuzzy match for entry if at least one of these keys
|
|
|
- in entry matches:
|
|
|
- id
|
|
|
- ipv4
|
|
|
- ipv6 (if present in both the fallback and whitelist)
|
|
|
- The ports and nickname are ignored. Missing or extra ipv6 addresses
|
|
|
- are ignored.
|
|
|
-
|
|
|
- Doesn't log any warning messages. """
|
|
|
- if self.id_matches(entry['id'], exact=False):
|
|
|
- return True
|
|
|
- if self.ipv4_addr_matches(entry['ipv4'], exact=False):
|
|
|
- return True
|
|
|
- if entry.has_key('ipv6') and self.has_ipv6():
|
|
|
-
|
|
|
- if self.ipv6_addr_matches(entry['ipv6_addr'], exact=False):
|
|
|
- return True
|
|
|
- return False
|
|
|
-
|
|
|
- def is_in_whitelist(self, relaylist, exact=False):
|
|
|
- """ If exact is True (existing fallback list), check if this fallback is
|
|
|
- an exact match for any whitelist entry, using entry_matches_exact().
|
|
|
-
|
|
|
- If exact is False (new fallback whitelist), check if this fallback is
|
|
|
- a fuzzy match for any whitelist entry, using entry_matches_fuzzy(). """
|
|
|
- for entry in relaylist:
|
|
|
- if exact:
|
|
|
- if self.entry_matches_exact(entry):
|
|
|
- return True
|
|
|
- else:
|
|
|
- if self.entry_matches_fuzzy(entry):
|
|
|
- return True
|
|
|
- return False
|
|
|
-
|
|
|
- def cw_to_bw_factor(self):
|
|
|
-
|
|
|
-
|
|
|
- return self._data['advertised_bandwidth'] / self._data['consensus_weight']
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def measured_bandwidth(self, median_cw_to_bw_factor):
|
|
|
- cw_to_bw= median_cw_to_bw_factor
|
|
|
-
|
|
|
- if self.is_exit():
|
|
|
- cw_to_bw *= EXIT_BANDWIDTH_FRACTION
|
|
|
- measured_bandwidth = self._data['consensus_weight'] * cw_to_bw
|
|
|
- if self._data['advertised_bandwidth'] != 0:
|
|
|
-
|
|
|
- return min(measured_bandwidth, self._data['advertised_bandwidth'])
|
|
|
- else:
|
|
|
- return measured_bandwidth
|
|
|
-
|
|
|
- def set_measured_bandwidth(self, median_cw_to_bw_factor):
|
|
|
- self._data['measured_bandwidth'] = self.measured_bandwidth(
|
|
|
- median_cw_to_bw_factor)
|
|
|
-
|
|
|
- def is_exit(self):
|
|
|
- return 'Exit' in self._data['flags']
|
|
|
-
|
|
|
- def is_guard(self):
|
|
|
- return 'Guard' in self._data['flags']
|
|
|
-
|
|
|
- def is_running(self):
|
|
|
- return 'Running' in self._data['flags']
|
|
|
-
|
|
|
-
|
|
|
- def has_ipv6(self):
|
|
|
- return self.ipv6addr is not None and self.ipv6orport is not None
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def strip_ipv6_brackets(ip):
|
|
|
- if ip is None:
|
|
|
- return unicode('')
|
|
|
- if len(ip) < 2:
|
|
|
- return unicode(ip)
|
|
|
- if ip[0] == '[' and ip[-1] == ']':
|
|
|
- return unicode(ip[1:-1])
|
|
|
- return unicode(ip)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def netblocks_equal(ip_a, ip_b, mask_bits):
|
|
|
- if ip_a is None or ip_b is None:
|
|
|
- return False
|
|
|
- ip_a = Candidate.strip_ipv6_brackets(ip_a)
|
|
|
- ip_b = Candidate.strip_ipv6_brackets(ip_b)
|
|
|
- a = ipaddress.ip_address(ip_a)
|
|
|
- b = ipaddress.ip_address(ip_b)
|
|
|
- if a.version != b.version:
|
|
|
- raise Exception('Mismatching IP versions in %s and %s'%(ip_a, ip_b))
|
|
|
- if mask_bits > a.max_prefixlen:
|
|
|
- logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b))
|
|
|
- mask_bits = a.max_prefixlen
|
|
|
- if mask_bits < 0:
|
|
|
- logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b))
|
|
|
- mask_bits = 0
|
|
|
- a_net = ipaddress.ip_network('%s/%d'%(ip_a, mask_bits), strict=False)
|
|
|
- return b in a_net
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def ipv4_netblocks_equal(self, other, mask_bits):
|
|
|
- return Candidate.netblocks_equal(self.dirip, other.dirip, mask_bits)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def ipv6_netblocks_equal(self, other, mask_bits):
|
|
|
- if not self.has_ipv6() or not other.has_ipv6():
|
|
|
- return False
|
|
|
- return Candidate.netblocks_equal(self.ipv6addr, other.ipv6addr, mask_bits)
|
|
|
-
|
|
|
-
|
|
|
- def dirport_equal(self, other):
|
|
|
- return self.dirport == other.dirport
|
|
|
-
|
|
|
-
|
|
|
- def ipv4_orport_equal(self, other):
|
|
|
- return self.orport == other.orport
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def ipv6_orport_equal(self, other):
|
|
|
- if not self.has_ipv6() or not other.has_ipv6():
|
|
|
- return False
|
|
|
- return self.ipv6orport == other.ipv6orport
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def port_equal(self, other):
|
|
|
- return (self.dirport_equal(other) or self.ipv4_orport_equal(other)
|
|
|
- or self.ipv6_orport_equal(other))
|
|
|
-
|
|
|
-
|
|
|
- def port_list(self):
|
|
|
- ports = [self.dirport, self.orport]
|
|
|
- if self.has_ipv6() and not self.ipv6orport in ports:
|
|
|
- ports.append(self.ipv6orport)
|
|
|
- return ports
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def port_shared(self, other):
|
|
|
- for p in self.port_list():
|
|
|
- if p in other.port_list():
|
|
|
- return True
|
|
|
- return False
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def fallback_consensus_download_speed(dirip, dirport, nickname, fingerprint,
|
|
|
- max_time):
|
|
|
- download_failed = False
|
|
|
-
|
|
|
-
|
|
|
- logging.info('Initiating %sconsensus download from %s (%s:%d) %s.',
|
|
|
- 'microdesc ' if DOWNLOAD_MICRODESC_CONSENSUS else '',
|
|
|
- nickname, dirip, dirport, fingerprint)
|
|
|
-
|
|
|
-
|
|
|
- TIMEOUT_SLOP = 1.0
|
|
|
- start = datetime.datetime.utcnow()
|
|
|
- try:
|
|
|
- consensus = get_consensus(
|
|
|
- endpoints = [(dirip, dirport)],
|
|
|
- timeout = (max_time + TIMEOUT_SLOP),
|
|
|
- validate = True,
|
|
|
- retries = 0,
|
|
|
- fall_back_to_authority = False,
|
|
|
- document_handler = DocumentHandler.BARE_DOCUMENT,
|
|
|
- microdescriptor = DOWNLOAD_MICRODESC_CONSENSUS
|
|
|
- ).run()[0]
|
|
|
- end = datetime.datetime.utcnow()
|
|
|
- time_since_expiry = (end - consensus.valid_until).total_seconds()
|
|
|
- time_until_valid = (consensus.valid_after - end).total_seconds()
|
|
|
- except Exception, stem_error:
|
|
|
- end = datetime.datetime.utcnow()
|
|
|
- log_excluded('Unable to retrieve a consensus from %s: %s', nickname,
|
|
|
- stem_error)
|
|
|
- status = 'error: "%s"' % (stem_error)
|
|
|
- level = logging.WARNING
|
|
|
- download_failed = True
|
|
|
- elapsed = (end - start).total_seconds()
|
|
|
- if download_failed:
|
|
|
-
|
|
|
- pass
|
|
|
- elif elapsed > max_time:
|
|
|
- status = 'too slow'
|
|
|
- level = logging.WARNING
|
|
|
- download_failed = True
|
|
|
- elif (time_since_expiry > 0):
|
|
|
- status = 'outdated consensus, expired %ds ago'%(int(time_since_expiry))
|
|
|
- if time_since_expiry <= REASONABLY_LIVE_TIME:
|
|
|
- status += ', tolerating up to %ds'%(REASONABLY_LIVE_TIME)
|
|
|
- level = logging.INFO
|
|
|
- else:
|
|
|
- status += ', invalid'
|
|
|
- level = logging.WARNING
|
|
|
- download_failed = True
|
|
|
- elif (time_until_valid > 0):
|
|
|
- status = 'future consensus, valid in %ds'%(int(time_until_valid))
|
|
|
- if time_until_valid <= REASONABLY_LIVE_TIME:
|
|
|
- status += ', tolerating up to %ds'%(REASONABLY_LIVE_TIME)
|
|
|
- level = logging.INFO
|
|
|
- else:
|
|
|
- status += ', invalid'
|
|
|
- level = logging.WARNING
|
|
|
- download_failed = True
|
|
|
- else:
|
|
|
- status = 'ok'
|
|
|
- level = logging.DEBUG
|
|
|
- logging.log(level, 'Consensus download: %0.1fs %s from %s (%s:%d) %s, ' +
|
|
|
- 'max download time %0.1fs.', elapsed, status, nickname,
|
|
|
- dirip, dirport, fingerprint, max_time)
|
|
|
- return download_failed
|
|
|
-
|
|
|
-
|
|
|
- def check_fallback_download_consensus(self):
|
|
|
-
|
|
|
- ipv4_failed = False
|
|
|
- ipv6_failed = False
|
|
|
- if PERFORM_IPV4_DIRPORT_CHECKS:
|
|
|
- ipv4_failed = Candidate.fallback_consensus_download_speed(self.dirip,
|
|
|
- self.dirport,
|
|
|
- self._data['nickname'],
|
|
|
- self._fpr,
|
|
|
- CONSENSUS_DOWNLOAD_SPEED_MAX)
|
|
|
- if self.has_ipv6() and PERFORM_IPV6_DIRPORT_CHECKS:
|
|
|
-
|
|
|
- ipv6_failed = Candidate.fallback_consensus_download_speed(self.ipv6addr,
|
|
|
- self.dirport,
|
|
|
- self._data['nickname'],
|
|
|
- self._fpr,
|
|
|
- CONSENSUS_DOWNLOAD_SPEED_MAX)
|
|
|
- return ((not ipv4_failed) and (not ipv6_failed))
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def try_fallback_download_consensus(self):
|
|
|
- if not self.get_fallback_download_consensus():
|
|
|
- self._data['download_check'] = self.check_fallback_download_consensus()
|
|
|
-
|
|
|
-
|
|
|
- def get_fallback_download_consensus(self):
|
|
|
-
|
|
|
- if not PERFORM_IPV4_DIRPORT_CHECKS and not PERFORM_IPV6_DIRPORT_CHECKS:
|
|
|
- return True
|
|
|
-
|
|
|
- if not self._data.has_key('download_check'):
|
|
|
- return False
|
|
|
- return self._data['download_check']
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def fallbackdir_line(self, fallbacks, prefilter_fallbacks):
|
|
|
- s = ''
|
|
|
- if OUTPUT_COMMENTS:
|
|
|
- s += self.fallbackdir_comment(fallbacks, prefilter_fallbacks)
|
|
|
-
|
|
|
-
|
|
|
- if self.get_fallback_download_consensus() or OUTPUT_COMMENTS:
|
|
|
- s += self.fallbackdir_info(self.get_fallback_download_consensus())
|
|
|
- return s
|
|
|
-
|
|
|
-
|
|
|
- def fallbackdir_comment(self, fallbacks, prefilter_fallbacks):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- s = '/*'
|
|
|
- s += '\n'
|
|
|
- s += cleanse_c_multiline_comment(self._data['nickname'])
|
|
|
- s += '\n'
|
|
|
- s += 'Flags: '
|
|
|
- s += cleanse_c_multiline_comment(' '.join(sorted(self._data['flags'])))
|
|
|
- s += '\n'
|
|
|
-
|
|
|
- bandwidth = self._data['measured_bandwidth']
|
|
|
- weight = self._data['consensus_weight']
|
|
|
- s += 'Bandwidth: %.1f MByte/s, Consensus Weight: %d'%(
|
|
|
- bandwidth/(1024.0*1024.0),
|
|
|
- weight)
|
|
|
- s += '\n'
|
|
|
- if self._data['contact'] is not None:
|
|
|
- s += cleanse_c_multiline_comment(self._data['contact'])
|
|
|
- if CONTACT_COUNT:
|
|
|
- fallback_count = len([f for f in fallbacks
|
|
|
- if f._data['contact'] == self._data['contact']])
|
|
|
- if fallback_count > 1:
|
|
|
- s += '\n'
|
|
|
- s += '%d identical contacts listed' % (fallback_count)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def fallbackdir_info(self, dl_speed_ok):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- c_string = dl_speed_ok
|
|
|
- comment_string = not dl_speed_ok and OUTPUT_COMMENTS
|
|
|
-
|
|
|
- if not c_string and not comment_string:
|
|
|
- return ''
|
|
|
- s = ''
|
|
|
-
|
|
|
-
|
|
|
- if comment_string:
|
|
|
- s += '/* Consensus download failed or was too slow:\n'
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- s += '"%s orport=%d id=%s"'%(
|
|
|
- cleanse_c_string(self._data['dir_address']),
|
|
|
- self.orport,
|
|
|
- cleanse_c_string(self._fpr))
|
|
|
- s += '\n'
|
|
|
-
|
|
|
- if self.has_ipv6():
|
|
|
- s += '" ipv6=%s:%d"'%(cleanse_c_string(self.ipv6addr), self.ipv6orport)
|
|
|
- s += '\n'
|
|
|
-
|
|
|
- if not comment_string:
|
|
|
- s += '/* '
|
|
|
- s += 'nickname=%s'%(cleanse_c_string(self._data['nickname']))
|
|
|
- if not comment_string:
|
|
|
- s += ' */'
|
|
|
- s += '\n'
|
|
|
-
|
|
|
-
|
|
|
- if not comment_string:
|
|
|
- s += '/* '
|
|
|
- s += 'extrainfo=%d'%(1 if self._extra_info_cache else 0)
|
|
|
- if not comment_string:
|
|
|
- s += ' */'
|
|
|
- s += '\n'
|
|
|
-
|
|
|
-
|
|
|
- if not comment_string:
|
|
|
- s += '/* '
|
|
|
- s += SECTION_SEPARATOR_BASE
|
|
|
- if not comment_string:
|
|
|
- s += ' */'
|
|
|
- s += '\n'
|
|
|
- s += ','
|
|
|
- if comment_string:
|
|
|
- s += '\n'
|
|
|
- s += '*/'
|
|
|
- return s
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-class CandidateList(dict):
|
|
|
- def __init__(self):
|
|
|
- pass
|
|
|
-
|
|
|
- def _add_relay(self, details):
|
|
|
- if not 'dir_address' in details: return
|
|
|
- c = Candidate(details)
|
|
|
- self[ c.get_fingerprint() ] = c
|
|
|
-
|
|
|
- def _add_uptime(self, uptime):
|
|
|
- try:
|
|
|
- fpr = uptime['fingerprint']
|
|
|
- except KeyError:
|
|
|
- raise Exception("Document has no fingerprint field.")
|
|
|
-
|
|
|
- try:
|
|
|
- c = self[fpr]
|
|
|
- except KeyError:
|
|
|
- logging.debug('Got unknown relay %s in uptime document.'%(fpr,))
|
|
|
- return
|
|
|
-
|
|
|
- c.add_uptime(uptime)
|
|
|
-
|
|
|
- def _add_details(self):
|
|
|
- logging.debug('Loading details document.')
|
|
|
- d = fetch('details',
|
|
|
- fields=('fingerprint,nickname,contact,last_changed_address_or_port,' +
|
|
|
- 'consensus_weight,advertised_bandwidth,or_addresses,' +
|
|
|
- 'dir_address,recommended_version,flags,effective_family,' +
|
|
|
- 'platform'))
|
|
|
- logging.debug('Loading details document done.')
|
|
|
-
|
|
|
- if not 'relays' in d: raise Exception("No relays found in document.")
|
|
|
-
|
|
|
- for r in d['relays']: self._add_relay(r)
|
|
|
-
|
|
|
- def _add_uptimes(self):
|
|
|
- logging.debug('Loading uptime document.')
|
|
|
- d = fetch('uptime')
|
|
|
- logging.debug('Loading uptime document done.')
|
|
|
-
|
|
|
- if not 'relays' in d: raise Exception("No relays found in document.")
|
|
|
- for r in d['relays']: self._add_uptime(r)
|
|
|
-
|
|
|
- def add_relays(self):
|
|
|
- self._add_details()
|
|
|
- self._add_uptimes()
|
|
|
-
|
|
|
- def count_guards(self):
|
|
|
- guard_count = 0
|
|
|
- for fpr in self.keys():
|
|
|
- if self[fpr].is_guard():
|
|
|
- guard_count += 1
|
|
|
- return guard_count
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def compute_fallbacks(self):
|
|
|
- self.fallbacks = map(lambda x: self[x],
|
|
|
- filter(lambda x: self[x].is_candidate(),
|
|
|
- self.keys()))
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def sort_fallbacks_by_cw_to_bw_factor(self):
|
|
|
- self.fallbacks.sort(key=lambda f: f.cw_to_bw_factor())
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def sort_fallbacks_by_measured_bandwidth(self):
|
|
|
- self.fallbacks.sort(key=lambda f: f._data['measured_bandwidth'],
|
|
|
- reverse=True)
|
|
|
-
|
|
|
-
|
|
|
- def sort_fallbacks_by(self, data_field):
|
|
|
- self.fallbacks.sort(key=lambda f: f._data[data_field])
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def load_relaylist(file_obj):
|
|
|
- """ Read each line in the file, and parse it like a FallbackDir line:
|
|
|
- an IPv4 address and optional port:
|
|
|
- <IPv4 address>:<port>
|
|
|
- which are parsed into dictionary entries:
|
|
|
- ipv4=<IPv4 address>
|
|
|
- dirport=<port>
|
|
|
- followed by a series of key=value entries:
|
|
|
- orport=<port>
|
|
|
- id=<fingerprint>
|
|
|
- ipv6=<IPv6 address>:<IPv6 orport>
|
|
|
- each line's key/value pairs are placed in a dictonary,
|
|
|
- (of string -> string key/value pairs),
|
|
|
- and these dictionaries are placed in an array.
|
|
|
- comments start with
|
|
|
- file_data = file_obj['data']
|
|
|
- file_name = file_obj['name']
|
|
|
- relaylist = []
|
|
|
- if file_data is None:
|
|
|
- return relaylist
|
|
|
- for line in file_data.split('\n'):
|
|
|
- relay_entry = {}
|
|
|
-
|
|
|
- line_comment_split = line.split('#')
|
|
|
- line = line_comment_split[0]
|
|
|
-
|
|
|
- line = cleanse_whitespace(line)
|
|
|
- line = line.strip()
|
|
|
- if len(line) == 0:
|
|
|
- continue
|
|
|
- for item in line.split(' '):
|
|
|
- item = item.strip()
|
|
|
- if len(item) == 0:
|
|
|
- continue
|
|
|
- key_value_split = item.split('=')
|
|
|
- kvl = len(key_value_split)
|
|
|
- if kvl < 1 or kvl > 2:
|
|
|
- print '#error Bad %s item: %s, format is key=value.'%(
|
|
|
- file_name, item)
|
|
|
- if kvl == 1:
|
|
|
-
|
|
|
-
|
|
|
- ipv4_maybe_dirport = key_value_split[0]
|
|
|
- ipv4_maybe_dirport_split = ipv4_maybe_dirport.split(':')
|
|
|
- dirl = len(ipv4_maybe_dirport_split)
|
|
|
- if dirl < 1 or dirl > 2:
|
|
|
- print '#error Bad %s IPv4 item: %s, format is ipv4:port.'%(
|
|
|
- file_name, item)
|
|
|
- if dirl >= 1:
|
|
|
- relay_entry['ipv4'] = ipv4_maybe_dirport_split[0]
|
|
|
- if dirl == 2:
|
|
|
- relay_entry['dirport'] = ipv4_maybe_dirport_split[1]
|
|
|
- elif kvl == 2:
|
|
|
- relay_entry[key_value_split[0]] = key_value_split[1]
|
|
|
-
|
|
|
- if key_value_split[0] == 'ipv6':
|
|
|
- ipv6_orport_split = key_value_split[1].rsplit(':', 1)
|
|
|
- ipv6l = len(ipv6_orport_split)
|
|
|
- if ipv6l != 2:
|
|
|
- print '#error Bad %s IPv6 item: %s, format is [ipv6]:orport.'%(
|
|
|
- file_name, item)
|
|
|
- relay_entry['ipv6_addr'] = ipv6_orport_split[0]
|
|
|
- relay_entry['ipv6_orport'] = ipv6_orport_split[1]
|
|
|
- relaylist.append(relay_entry)
|
|
|
- return relaylist
|
|
|
-
|
|
|
- def apply_filter_lists(self, whitelist_obj, exact=False):
|
|
|
- """ Apply the fallback whitelist_obj to this fallback list,
|
|
|
- passing exact to is_in_whitelist(). """
|
|
|
- excluded_count = 0
|
|
|
- list_type = 'whitelist'
|
|
|
- if whitelist_obj['check_existing']:
|
|
|
- list_type = 'fallback list'
|
|
|
-
|
|
|
- logging.debug('Applying {}'.format(list_type))
|
|
|
-
|
|
|
- whitelist = self.load_relaylist(whitelist_obj)
|
|
|
- filtered_fallbacks = []
|
|
|
- for f in self.fallbacks:
|
|
|
- in_whitelist = f.is_in_whitelist(whitelist, exact=exact)
|
|
|
- if in_whitelist:
|
|
|
-
|
|
|
- filtered_fallbacks.append(f)
|
|
|
- elif INCLUDE_UNLISTED_ENTRIES:
|
|
|
-
|
|
|
- filtered_fallbacks.append(f)
|
|
|
- else:
|
|
|
-
|
|
|
- excluded_count += 1
|
|
|
- log_excluded('Excluding %s: not in %s.',
|
|
|
- f._fpr, list_type)
|
|
|
- self.fallbacks = filtered_fallbacks
|
|
|
- return excluded_count
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def summarise_filters(initial_count, excluded_count, check_existing):
|
|
|
- list_type = 'Whitelist'
|
|
|
- if check_existing:
|
|
|
- list_type = 'Fallback list'
|
|
|
-
|
|
|
- return '/* %s excluded %d of %d candidates. */'%(list_type,
|
|
|
- excluded_count, initial_count)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def calculate_measured_bandwidth(self):
|
|
|
- self.sort_fallbacks_by_cw_to_bw_factor()
|
|
|
- median_fallback = self.fallback_median(True)
|
|
|
- if median_fallback is not None:
|
|
|
- median_cw_to_bw_factor = median_fallback.cw_to_bw_factor()
|
|
|
- else:
|
|
|
-
|
|
|
- median_cw_to_bw_factor = None
|
|
|
- for f in self.fallbacks:
|
|
|
- f.set_measured_bandwidth(median_cw_to_bw_factor)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def remove_low_bandwidth_relays(self):
|
|
|
- if MIN_BANDWIDTH is None:
|
|
|
- return
|
|
|
- above_min_bw_fallbacks = []
|
|
|
- for f in self.fallbacks:
|
|
|
- if f._data['measured_bandwidth'] >= MIN_BANDWIDTH:
|
|
|
- above_min_bw_fallbacks.append(f)
|
|
|
- else:
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- log_excluded('%s not a candidate: bandwidth %.1fMByte/s too low, ' +
|
|
|
- 'must be at least %.1fMByte/s', f._fpr,
|
|
|
- f._data['measured_bandwidth']/(1024.0*1024.0),
|
|
|
- MIN_BANDWIDTH/(1024.0*1024.0))
|
|
|
- self.fallbacks = above_min_bw_fallbacks
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def fallback_min(self):
|
|
|
- if len(self.fallbacks) > 0:
|
|
|
- return self.fallbacks[-1]
|
|
|
- else:
|
|
|
- return None
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def fallback_median(self, require_advertised_bandwidth):
|
|
|
-
|
|
|
-
|
|
|
- if len(self.fallbacks) > 0:
|
|
|
- median_position = (len(self.fallbacks) - 1) / 2
|
|
|
- if not require_advertised_bandwidth:
|
|
|
- return self.fallbacks[median_position]
|
|
|
-
|
|
|
-
|
|
|
- while not self.fallbacks[median_position]._data['advertised_bandwidth']:
|
|
|
- median_position += 1
|
|
|
- if median_position >= len(self.fallbacks):
|
|
|
- return None
|
|
|
- return self.fallbacks[median_position]
|
|
|
- else:
|
|
|
- return None
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def fallback_max(self):
|
|
|
- if len(self.fallbacks) > 0:
|
|
|
- return self.fallbacks[0]
|
|
|
- else:
|
|
|
- return None
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def attribute_new():
|
|
|
- return dict()
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def attribute_count(attribute, attribute_bag):
|
|
|
- if attribute is None or attribute == '':
|
|
|
- return 0
|
|
|
- if attribute not in attribute_bag:
|
|
|
- return 0
|
|
|
- return attribute_bag[attribute]
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def attribute_allow(attribute, attribute_bag, max_count=1):
|
|
|
- if attribute is None or attribute == '' or max_count <= 0:
|
|
|
- return True
|
|
|
- elif CandidateList.attribute_count(attribute, attribute_bag) >= max_count:
|
|
|
- return False
|
|
|
- else:
|
|
|
- return True
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def attribute_add(attribute, attribute_bag, count=1):
|
|
|
- if attribute is None or attribute == '' or count <= 0:
|
|
|
- pass
|
|
|
- attribute_bag.setdefault(attribute, 0)
|
|
|
- attribute_bag[attribute] += count
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def limit_fallbacks_same_ip(self):
|
|
|
- ip_limit_fallbacks = []
|
|
|
- ip_list = CandidateList.attribute_new()
|
|
|
- for f in self.fallbacks:
|
|
|
- if (CandidateList.attribute_allow(f.dirip, ip_list,
|
|
|
- MAX_FALLBACKS_PER_IPV4)
|
|
|
- and CandidateList.attribute_allow(f.ipv6addr, ip_list,
|
|
|
- MAX_FALLBACKS_PER_IPV6)):
|
|
|
- ip_limit_fallbacks.append(f)
|
|
|
- CandidateList.attribute_add(f.dirip, ip_list)
|
|
|
- if f.has_ipv6():
|
|
|
- CandidateList.attribute_add(f.ipv6addr, ip_list)
|
|
|
- elif not CandidateList.attribute_allow(f.dirip, ip_list,
|
|
|
- MAX_FALLBACKS_PER_IPV4):
|
|
|
- log_excluded('Eliminated %s: already have %d fallback(s) on IPv4 %s'
|
|
|
- %(f._fpr, CandidateList.attribute_count(f.dirip, ip_list),
|
|
|
- f.dirip))
|
|
|
- elif (f.has_ipv6() and
|
|
|
- not CandidateList.attribute_allow(f.ipv6addr, ip_list,
|
|
|
- MAX_FALLBACKS_PER_IPV6)):
|
|
|
- log_excluded('Eliminated %s: already have %d fallback(s) on IPv6 %s'
|
|
|
- %(f._fpr, CandidateList.attribute_count(f.ipv6addr,
|
|
|
- ip_list),
|
|
|
- f.ipv6addr))
|
|
|
- original_count = len(self.fallbacks)
|
|
|
- self.fallbacks = ip_limit_fallbacks
|
|
|
- return original_count - len(self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def limit_fallbacks_same_contact(self):
|
|
|
- contact_limit_fallbacks = []
|
|
|
- contact_list = CandidateList.attribute_new()
|
|
|
- for f in self.fallbacks:
|
|
|
- if CandidateList.attribute_allow(f._data['contact'], contact_list,
|
|
|
- MAX_FALLBACKS_PER_CONTACT):
|
|
|
- contact_limit_fallbacks.append(f)
|
|
|
- CandidateList.attribute_add(f._data['contact'], contact_list)
|
|
|
- else:
|
|
|
- log_excluded(
|
|
|
- 'Eliminated %s: already have %d fallback(s) on ContactInfo %s'
|
|
|
- %(f._fpr, CandidateList.attribute_count(f._data['contact'],
|
|
|
- contact_list),
|
|
|
- f._data['contact']))
|
|
|
- original_count = len(self.fallbacks)
|
|
|
- self.fallbacks = contact_limit_fallbacks
|
|
|
- return original_count - len(self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def limit_fallbacks_same_family(self):
|
|
|
- family_limit_fallbacks = []
|
|
|
- fingerprint_list = CandidateList.attribute_new()
|
|
|
- for f in self.fallbacks:
|
|
|
- if CandidateList.attribute_allow(f._fpr, fingerprint_list,
|
|
|
- MAX_FALLBACKS_PER_FAMILY):
|
|
|
- family_limit_fallbacks.append(f)
|
|
|
- CandidateList.attribute_add(f._fpr, fingerprint_list)
|
|
|
- for family_fingerprint in f._data['effective_family']:
|
|
|
- CandidateList.attribute_add(family_fingerprint, fingerprint_list)
|
|
|
- else:
|
|
|
-
|
|
|
-
|
|
|
- log_excluded(
|
|
|
- 'Eliminated %s: already have %d fallback(s) in effective family'
|
|
|
- %(f._fpr, CandidateList.attribute_count(f._fpr, fingerprint_list)))
|
|
|
- original_count = len(self.fallbacks)
|
|
|
- self.fallbacks = family_limit_fallbacks
|
|
|
- return original_count - len(self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def get_fallback_descriptors_once(fingerprint_list):
|
|
|
- desc_list = get_server_descriptors(fingerprints=fingerprint_list).run(suppress=True)
|
|
|
- return desc_list
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def get_fallback_descriptors(fingerprint_list, max_retries=5):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- max_retries += (len(fingerprint_list) + MAX_FINGERPRINTS - 1) / MAX_FINGERPRINTS
|
|
|
- remaining_list = fingerprint_list
|
|
|
- desc_list = []
|
|
|
- for _ in xrange(max_retries):
|
|
|
- if len(remaining_list) == 0:
|
|
|
- break
|
|
|
- new_desc_list = CandidateList.get_fallback_descriptors_once(remaining_list[0:MAX_FINGERPRINTS])
|
|
|
- for d in new_desc_list:
|
|
|
- try:
|
|
|
- remaining_list.remove(d.fingerprint)
|
|
|
- except ValueError:
|
|
|
-
|
|
|
- logging.warning("Directory mirror returned unwanted descriptor %s, ignoring",
|
|
|
- d.fingerprint)
|
|
|
- continue
|
|
|
- desc_list.append(d)
|
|
|
- return desc_list
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def mark_extra_info_caches(self):
|
|
|
- fingerprint_list = [ f._fpr for f in self.fallbacks ]
|
|
|
- logging.info("Downloading fallback descriptors to find extra-info caches")
|
|
|
- desc_list = CandidateList.get_fallback_descriptors(fingerprint_list)
|
|
|
- for d in desc_list:
|
|
|
- self[d.fingerprint]._extra_info_cache = d.extra_info_cache
|
|
|
- missing_descriptor_list = [ f._fpr for f in self.fallbacks
|
|
|
- if f._extra_info_cache is None ]
|
|
|
- for f in missing_descriptor_list:
|
|
|
- logging.warning("No descriptor for {}. Assuming extrainfo=0.".format(f))
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def try_download_consensus_checks(self, max_count):
|
|
|
- dl_ok_count = 0
|
|
|
- for f in self.fallbacks:
|
|
|
- f.try_fallback_download_consensus()
|
|
|
- if f.get_fallback_download_consensus():
|
|
|
-
|
|
|
- dl_ok_count += 1
|
|
|
- if dl_ok_count >= max_count:
|
|
|
-
|
|
|
- return
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def perform_download_consensus_checks(self, max_count):
|
|
|
- self.sort_fallbacks_by_measured_bandwidth()
|
|
|
- self.try_download_consensus_checks(max_count)
|
|
|
- if CONSENSUS_DOWNLOAD_RETRY:
|
|
|
-
|
|
|
-
|
|
|
- self.try_download_consensus_checks(max_count)
|
|
|
-
|
|
|
-
|
|
|
- original_count = len(self.fallbacks)
|
|
|
- self.fallbacks = filter(lambda x: x.get_fallback_download_consensus(),
|
|
|
- self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
- failed_count = original_count - len(self.fallbacks)
|
|
|
- self.fallbacks = self.fallbacks[:max_count]
|
|
|
- return failed_count
|
|
|
-
|
|
|
-
|
|
|
- @staticmethod
|
|
|
- def describe_percentage(a, b):
|
|
|
- if b != 0:
|
|
|
- return '%d/%d = %.0f%%'%(a, b, (a*100.0)/b)
|
|
|
- else:
|
|
|
-
|
|
|
- return '%d/%d = %.0f%%'%(a, b, 0.0)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def fallbacks_by_ipv4_netblock(self, mask_bits):
|
|
|
- netblocks = {}
|
|
|
- for f in self.fallbacks:
|
|
|
- found_netblock = False
|
|
|
- for b in netblocks.keys():
|
|
|
-
|
|
|
- if f.ipv4_netblocks_equal(self[b], mask_bits):
|
|
|
-
|
|
|
- netblocks[b].append(f)
|
|
|
- found_netblock = True
|
|
|
- break
|
|
|
-
|
|
|
- if not found_netblock:
|
|
|
- netblocks[f._fpr] = [f]
|
|
|
- return netblocks
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def fallbacks_by_ipv6_netblock(self, mask_bits):
|
|
|
- netblocks = {}
|
|
|
- for f in self.fallbacks:
|
|
|
-
|
|
|
- if not f.has_ipv6():
|
|
|
- continue
|
|
|
- found_netblock = False
|
|
|
- for b in netblocks.keys():
|
|
|
-
|
|
|
- if f.ipv6_netblocks_equal(self[b], mask_bits):
|
|
|
-
|
|
|
- netblocks[b].append(f)
|
|
|
- found_netblock = True
|
|
|
- break
|
|
|
-
|
|
|
- if not found_netblock:
|
|
|
- netblocks[f._fpr] = [f]
|
|
|
- return netblocks
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_ipv4_netblock_mask(self, mask_bits):
|
|
|
- fallback_count = len(self.fallbacks)
|
|
|
- shared_netblock_fallback_count = 0
|
|
|
- most_frequent_netblock = None
|
|
|
- netblocks = self.fallbacks_by_ipv4_netblock(mask_bits)
|
|
|
- for b in netblocks.keys():
|
|
|
- if len(netblocks[b]) > 1:
|
|
|
-
|
|
|
- shared_netblock_fallback_count += len(netblocks[b])
|
|
|
-
|
|
|
- if (most_frequent_netblock is None
|
|
|
- or len(netblocks[b]) > len(netblocks[most_frequent_netblock])):
|
|
|
- most_frequent_netblock = b
|
|
|
- logging.debug('Fallback IPv4 addresses in the same /%d:'%(mask_bits))
|
|
|
- for f in netblocks[b]:
|
|
|
- logging.debug('%s - %s', f.dirip, f._fpr)
|
|
|
- if most_frequent_netblock is not None:
|
|
|
- logging.warning('There are %s fallbacks in the IPv4 /%d containing %s'%(
|
|
|
- CandidateList.describe_percentage(
|
|
|
- len(netblocks[most_frequent_netblock]),
|
|
|
- fallback_count),
|
|
|
- mask_bits,
|
|
|
- self[most_frequent_netblock].dirip))
|
|
|
- if shared_netblock_fallback_count > 0:
|
|
|
- logging.warning(('%s of fallbacks are in an IPv4 /%d with other ' +
|
|
|
- 'fallbacks')%(CandidateList.describe_percentage(
|
|
|
- shared_netblock_fallback_count,
|
|
|
- fallback_count),
|
|
|
- mask_bits))
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_ipv6_netblock_mask(self, mask_bits):
|
|
|
- fallback_count = len(self.fallbacks_with_ipv6())
|
|
|
- shared_netblock_fallback_count = 0
|
|
|
- most_frequent_netblock = None
|
|
|
- netblocks = self.fallbacks_by_ipv6_netblock(mask_bits)
|
|
|
- for b in netblocks.keys():
|
|
|
- if len(netblocks[b]) > 1:
|
|
|
-
|
|
|
- shared_netblock_fallback_count += len(netblocks[b])
|
|
|
-
|
|
|
- if (most_frequent_netblock is None
|
|
|
- or len(netblocks[b]) > len(netblocks[most_frequent_netblock])):
|
|
|
- most_frequent_netblock = b
|
|
|
- logging.debug('Fallback IPv6 addresses in the same /%d:'%(mask_bits))
|
|
|
- for f in netblocks[b]:
|
|
|
- logging.debug('%s - %s', f.ipv6addr, f._fpr)
|
|
|
- if most_frequent_netblock is not None:
|
|
|
- logging.warning('There are %s fallbacks in the IPv6 /%d containing %s'%(
|
|
|
- CandidateList.describe_percentage(
|
|
|
- len(netblocks[most_frequent_netblock]),
|
|
|
- fallback_count),
|
|
|
- mask_bits,
|
|
|
- self[most_frequent_netblock].ipv6addr))
|
|
|
- if shared_netblock_fallback_count > 0:
|
|
|
- logging.warning(('%s of fallbacks are in an IPv6 /%d with other ' +
|
|
|
- 'fallbacks')%(CandidateList.describe_percentage(
|
|
|
- shared_netblock_fallback_count,
|
|
|
- fallback_count),
|
|
|
- mask_bits))
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_ipv4_netblocks(self):
|
|
|
-
|
|
|
-
|
|
|
- self.describe_fallback_ipv4_netblock_mask(16)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_ipv6_netblocks(self):
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- self.describe_fallback_ipv6_netblock_mask(32)
|
|
|
-
|
|
|
- self.describe_fallback_ipv6_netblock_mask(64)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_netblocks(self):
|
|
|
- self.describe_fallback_ipv4_netblocks()
|
|
|
- self.describe_fallback_ipv6_netblocks()
|
|
|
-
|
|
|
-
|
|
|
- def fallbacks_on_ipv4_orport(self, port):
|
|
|
- return filter(lambda x: x.orport == port, self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
- def fallbacks_on_ipv6_orport(self, port):
|
|
|
- return filter(lambda x: x.ipv6orport == port, self.fallbacks_with_ipv6())
|
|
|
-
|
|
|
-
|
|
|
- def fallbacks_on_dirport(self, port):
|
|
|
- return filter(lambda x: x.dirport == port, self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_ipv4_orport(self, port):
|
|
|
- port_count = len(self.fallbacks_on_ipv4_orport(port))
|
|
|
- fallback_count = len(self.fallbacks)
|
|
|
- logging.warning('%s of fallbacks are on IPv4 ORPort %d'%(
|
|
|
- CandidateList.describe_percentage(port_count,
|
|
|
- fallback_count),
|
|
|
- port))
|
|
|
- return port_count
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_ipv6_orport(self, port):
|
|
|
- port_count = len(self.fallbacks_on_ipv6_orport(port))
|
|
|
- fallback_count = len(self.fallbacks_with_ipv6())
|
|
|
- logging.warning('%s of IPv6 fallbacks are on IPv6 ORPort %d'%(
|
|
|
- CandidateList.describe_percentage(port_count,
|
|
|
- fallback_count),
|
|
|
- port))
|
|
|
- return port_count
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_dirport(self, port):
|
|
|
- port_count = len(self.fallbacks_on_dirport(port))
|
|
|
- fallback_count = len(self.fallbacks)
|
|
|
- logging.warning('%s of fallbacks are on DirPort %d'%(
|
|
|
- CandidateList.describe_percentage(port_count,
|
|
|
- fallback_count),
|
|
|
- port))
|
|
|
- return port_count
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_ports(self):
|
|
|
- fallback_count = len(self.fallbacks)
|
|
|
- ipv4_or_count = fallback_count
|
|
|
- ipv4_or_count -= self.describe_fallback_ipv4_orport(443)
|
|
|
- ipv4_or_count -= self.describe_fallback_ipv4_orport(9001)
|
|
|
- logging.warning('%s of fallbacks are on other IPv4 ORPorts'%(
|
|
|
- CandidateList.describe_percentage(ipv4_or_count,
|
|
|
- fallback_count)))
|
|
|
- ipv6_fallback_count = len(self.fallbacks_with_ipv6())
|
|
|
- ipv6_or_count = ipv6_fallback_count
|
|
|
- ipv6_or_count -= self.describe_fallback_ipv6_orport(443)
|
|
|
- ipv6_or_count -= self.describe_fallback_ipv6_orport(9001)
|
|
|
- logging.warning('%s of IPv6 fallbacks are on other IPv6 ORPorts'%(
|
|
|
- CandidateList.describe_percentage(ipv6_or_count,
|
|
|
- ipv6_fallback_count)))
|
|
|
- dir_count = fallback_count
|
|
|
- dir_count -= self.describe_fallback_dirport(80)
|
|
|
- dir_count -= self.describe_fallback_dirport(9030)
|
|
|
- logging.warning('%s of fallbacks are on other DirPorts'%(
|
|
|
- CandidateList.describe_percentage(dir_count,
|
|
|
- fallback_count)))
|
|
|
-
|
|
|
-
|
|
|
- def fallbacks_with_extra_info_cache(self):
|
|
|
- return filter(lambda x: x._extra_info_cache, self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_extra_info_caches(self):
|
|
|
- extra_info_falback_count = len(self.fallbacks_with_extra_info_cache())
|
|
|
- fallback_count = len(self.fallbacks)
|
|
|
- logging.warning('%s of fallbacks cache extra-info documents'%(
|
|
|
- CandidateList.describe_percentage(extra_info_falback_count,
|
|
|
- fallback_count)))
|
|
|
-
|
|
|
-
|
|
|
- def fallbacks_with_exit(self):
|
|
|
- return filter(lambda x: x.is_exit(), self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_exit_flag(self):
|
|
|
- exit_falback_count = len(self.fallbacks_with_exit())
|
|
|
- fallback_count = len(self.fallbacks)
|
|
|
- logging.warning('%s of fallbacks have the Exit flag'%(
|
|
|
- CandidateList.describe_percentage(exit_falback_count,
|
|
|
- fallback_count)))
|
|
|
-
|
|
|
-
|
|
|
- def fallbacks_with_ipv6(self):
|
|
|
- return filter(lambda x: x.has_ipv6(), self.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
- def describe_fallback_ip_family(self):
|
|
|
- ipv6_falback_count = len(self.fallbacks_with_ipv6())
|
|
|
- fallback_count = len(self.fallbacks)
|
|
|
- logging.warning('%s of fallbacks are on IPv6'%(
|
|
|
- CandidateList.describe_percentage(ipv6_falback_count,
|
|
|
- fallback_count)))
|
|
|
-
|
|
|
- def summarise_fallbacks(self, eligible_count, operator_count, failed_count,
|
|
|
- guard_count, target_count, check_existing):
|
|
|
- s = ''
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
|
|
|
- s += '/* Checked %s%s%s DirPorts served a consensus within %.1fs. */'%(
|
|
|
- 'IPv4' if PERFORM_IPV4_DIRPORT_CHECKS else '',
|
|
|
- ' and ' if (PERFORM_IPV4_DIRPORT_CHECKS
|
|
|
- and PERFORM_IPV6_DIRPORT_CHECKS) else '',
|
|
|
- 'IPv6' if PERFORM_IPV6_DIRPORT_CHECKS else '',
|
|
|
- CONSENSUS_DOWNLOAD_SPEED_MAX)
|
|
|
- else:
|
|
|
- s += '/* Did not check IPv4 or IPv6 DirPort consensus downloads. */'
|
|
|
- s += '\n'
|
|
|
-
|
|
|
- s += '/*'
|
|
|
- s += '\n'
|
|
|
-
|
|
|
- fallback_count = len(self.fallbacks)
|
|
|
- if FALLBACK_PROPORTION_OF_GUARDS is None:
|
|
|
- fallback_proportion = ''
|
|
|
- else:
|
|
|
- fallback_proportion = ', Target %d (%d * %.2f)'%(target_count,
|
|
|
- guard_count,
|
|
|
- FALLBACK_PROPORTION_OF_GUARDS)
|
|
|
- s += 'Final Count: %d (Eligible %d%s'%(fallback_count, eligible_count,
|
|
|
- fallback_proportion)
|
|
|
- if MAX_FALLBACK_COUNT is not None:
|
|
|
- s += ', Max %d'%(MAX_FALLBACK_COUNT)
|
|
|
- s += ')\n'
|
|
|
- if eligible_count != fallback_count:
|
|
|
- removed_count = eligible_count - fallback_count
|
|
|
- excess_to_target_or_max = (eligible_count - operator_count - failed_count
|
|
|
- - fallback_count)
|
|
|
-
|
|
|
-
|
|
|
- s += ('Excluded: %d (Same Operator %d, Failed/Skipped Download %d, ' +
|
|
|
- 'Excess %d)')%(removed_count, operator_count, failed_count,
|
|
|
- excess_to_target_or_max)
|
|
|
- s += '\n'
|
|
|
- min_fb = self.fallback_min()
|
|
|
- min_bw = min_fb._data['measured_bandwidth']
|
|
|
- max_fb = self.fallback_max()
|
|
|
- max_bw = max_fb._data['measured_bandwidth']
|
|
|
- s += 'Bandwidth Range: %.1f - %.1f MByte/s'%(min_bw/(1024.0*1024.0),
|
|
|
- max_bw/(1024.0*1024.0))
|
|
|
- s += '\n'
|
|
|
- s += '*/'
|
|
|
- if fallback_count < MIN_FALLBACK_COUNT:
|
|
|
- list_type = 'whitelist'
|
|
|
- if check_existing:
|
|
|
- list_type = 'fallback list'
|
|
|
-
|
|
|
-
|
|
|
- s += '\n'
|
|
|
- s += '#error Fallback Count %d is too low. '%(fallback_count)
|
|
|
- s += 'Must be at least %d for diversity. '%(MIN_FALLBACK_COUNT)
|
|
|
- s += 'Try adding entries to %s, '%(list_type)
|
|
|
- s += 'or setting INCLUDE_UNLISTED_ENTRIES = True.'
|
|
|
- return s
|
|
|
-
|
|
|
-def process_existing():
|
|
|
- logging.basicConfig(level=logging.INFO)
|
|
|
- logging.getLogger('stem').setLevel(logging.INFO)
|
|
|
- whitelist = {'data': parse_fallback_file(FALLBACK_FILE_NAME),
|
|
|
- 'name': FALLBACK_FILE_NAME,
|
|
|
- 'check_existing' : True}
|
|
|
- list_fallbacks(whitelist, exact=True)
|
|
|
-
|
|
|
-def process_default():
|
|
|
- logging.basicConfig(level=logging.WARNING)
|
|
|
- logging.getLogger('stem').setLevel(logging.WARNING)
|
|
|
- whitelist = {'data': read_from_file(WHITELIST_FILE_NAME, MAX_LIST_FILE_SIZE),
|
|
|
- 'name': WHITELIST_FILE_NAME,
|
|
|
- 'check_existing': False}
|
|
|
- list_fallbacks(whitelist, exact=False)
|
|
|
-
|
|
|
-
|
|
|
-def main():
|
|
|
- if get_command() == 'check_existing':
|
|
|
- process_existing()
|
|
|
- else:
|
|
|
- process_default()
|
|
|
-
|
|
|
-def get_command():
|
|
|
- if len(sys.argv) == 2:
|
|
|
- return sys.argv[1]
|
|
|
- else:
|
|
|
- return None
|
|
|
-
|
|
|
-def log_excluded(msg, *args):
|
|
|
- if get_command() == 'check_existing':
|
|
|
- logging.warning(msg, *args)
|
|
|
- else:
|
|
|
- logging.info(msg, *args)
|
|
|
-
|
|
|
-def list_fallbacks(whitelist, exact=False):
|
|
|
- """ Fetches required onionoo documents and evaluates the
|
|
|
- fallback directory criteria for each of the relays,
|
|
|
- passing exact to apply_filter_lists(). """
|
|
|
- if whitelist['check_existing']:
|
|
|
- print "/* type=fallback */"
|
|
|
- else:
|
|
|
- print "/* type=whitelist */"
|
|
|
-
|
|
|
- print ("/* version={} */"
|
|
|
- .format(cleanse_c_multiline_comment(FALLBACK_FORMAT_VERSION)))
|
|
|
- now = datetime.datetime.utcnow()
|
|
|
- timestamp = now.strftime('%Y%m%d%H%M%S')
|
|
|
- print ("/* timestamp={} */"
|
|
|
- .format(cleanse_c_multiline_comment(timestamp)))
|
|
|
-
|
|
|
- print SECTION_SEPARATOR_COMMENT
|
|
|
-
|
|
|
- logging.warning('Downloading and parsing Onionoo data. ' +
|
|
|
- 'This may take some time.')
|
|
|
-
|
|
|
- candidates = CandidateList()
|
|
|
- candidates.add_relays()
|
|
|
-
|
|
|
-
|
|
|
- guard_count = candidates.count_guards()
|
|
|
- if FALLBACK_PROPORTION_OF_GUARDS is None:
|
|
|
- target_count = guard_count
|
|
|
- else:
|
|
|
- target_count = int(guard_count * FALLBACK_PROPORTION_OF_GUARDS)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if MAX_FALLBACK_COUNT is None:
|
|
|
- max_count = target_count
|
|
|
- else:
|
|
|
- max_count = min(target_count, MAX_FALLBACK_COUNT)
|
|
|
-
|
|
|
- candidates.compute_fallbacks()
|
|
|
- prefilter_fallbacks = copy.copy(candidates.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- initial_count = len(candidates.fallbacks)
|
|
|
- excluded_count = candidates.apply_filter_lists(whitelist, exact=exact)
|
|
|
- print candidates.summarise_filters(initial_count, excluded_count,
|
|
|
- whitelist['check_existing'])
|
|
|
- eligible_count = len(candidates.fallbacks)
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- candidates.calculate_measured_bandwidth()
|
|
|
- candidates.remove_low_bandwidth_relays()
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- candidates.sort_fallbacks_by_measured_bandwidth()
|
|
|
- operator_count = 0
|
|
|
-
|
|
|
-
|
|
|
- if not OUTPUT_CANDIDATES:
|
|
|
- operator_count += candidates.limit_fallbacks_same_ip()
|
|
|
- operator_count += candidates.limit_fallbacks_same_contact()
|
|
|
- operator_count += candidates.limit_fallbacks_same_family()
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
|
|
|
- logging.warning('Checking consensus download speeds. ' +
|
|
|
- 'This may take some time.')
|
|
|
- failed_count = candidates.perform_download_consensus_checks(max_count)
|
|
|
-
|
|
|
-
|
|
|
- candidates.mark_extra_info_caches()
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- candidates.describe_fallback_ip_family()
|
|
|
-
|
|
|
- if HAVE_IPADDRESS:
|
|
|
- candidates.describe_fallback_netblocks()
|
|
|
- candidates.describe_fallback_ports()
|
|
|
- candidates.describe_fallback_extra_info_caches()
|
|
|
- candidates.describe_fallback_exit_flag()
|
|
|
-
|
|
|
-
|
|
|
- if len(candidates.fallbacks) > 0:
|
|
|
- print candidates.summarise_fallbacks(eligible_count, operator_count,
|
|
|
- failed_count, guard_count,
|
|
|
- target_count,
|
|
|
- whitelist['check_existing'])
|
|
|
- else:
|
|
|
- print '/* No Fallbacks met criteria */'
|
|
|
-
|
|
|
-
|
|
|
- for s in fetch_source_list():
|
|
|
- print describe_fetch_source(s)
|
|
|
-
|
|
|
-
|
|
|
- print SECTION_SEPARATOR_COMMENT
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
-
|
|
|
- candidates.sort_fallbacks_by(OUTPUT_SORT_FIELD)
|
|
|
-
|
|
|
- for x in candidates.fallbacks:
|
|
|
- print x.fallbackdir_line(candidates.fallbacks, prefilter_fallbacks)
|
|
|
-
|
|
|
-if __name__ == "__main__":
|
|
|
- main()
|