updateFallbackDirs.py 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241
  1. #!/usr/bin/python
  2. # Usage: scripts/maint/updateFallbackDirs.py > src/or/fallback_dirs.inc
  3. #
  4. # Then read the generated list to ensure no-one slipped anything funny into
  5. # their name or contactinfo
  6. # Script by weasel, April 2015
  7. # Portions by gsathya & karsten, 2013
  8. # https://trac.torproject.org/projects/tor/attachment/ticket/8374/dir_list.2.py
  9. # Modifications by teor, 2015
  10. import StringIO
  11. import string
  12. import re
  13. import datetime
  14. import gzip
  15. import os.path
  16. import json
  17. import math
  18. import sys
  19. import urllib
  20. import urllib2
  21. import hashlib
  22. import dateutil.parser
  23. # bson_lazy provides bson
  24. #from bson import json_util
  25. import logging
  26. logging.basicConfig(level=logging.DEBUG)
  27. ## Top-Level Configuration
  28. # Output all candidate fallbacks, or only output selected fallbacks?
  29. OUTPUT_CANDIDATES = False
  30. ## OnionOO Settings
  31. ONIONOO = 'https://onionoo.torproject.org/'
  32. #ONIONOO = 'https://onionoo.thecthulhu.com/'
  33. # Don't bother going out to the Internet, just use the files available locally,
  34. # even if they're very old
  35. LOCAL_FILES_ONLY = False
  36. ## Whitelist / Blacklist Filter Settings
  37. # The whitelist contains entries that are included if all attributes match
  38. # (IPv4, dirport, orport, id, and optionally IPv6 and IPv6 orport)
  39. # The blacklist contains (partial) entries that are excluded if any
  40. # sufficiently specific group of attributes matches:
  41. # IPv4 & DirPort
  42. # IPv4 & ORPort
  43. # ID
  44. # IPv6 & DirPort
  45. # IPv6 & IPv6 ORPort
  46. # If neither port is included in the blacklist, the entire IP address is
  47. # blacklisted.
  48. # What happens to entries in neither list?
  49. # When True, they are included, when False, they are excluded
  50. INCLUDE_UNLISTED_ENTRIES = True if OUTPUT_CANDIDATES else False
  51. # If an entry is in both lists, what happens?
  52. # When True, it is excluded, when False, it is included
  53. BLACKLIST_EXCLUDES_WHITELIST_ENTRIES = True
  54. WHITELIST_FILE_NAME = 'scripts/maint/fallback.whitelist'
  55. BLACKLIST_FILE_NAME = 'scripts/maint/fallback.blacklist'
  56. # The number of bytes we'll read from a filter file before giving up
  57. MAX_LIST_FILE_SIZE = 1024 * 1024
  58. ## Eligibility Settings
  59. ADDRESS_AND_PORT_STABLE_DAYS = 120
  60. # What time-weighted-fraction of these flags must FallbackDirs
  61. # Equal or Exceed?
  62. CUTOFF_RUNNING = .95
  63. CUTOFF_V2DIR = .95
  64. CUTOFF_GUARD = .95
  65. # What time-weighted-fraction of these flags must FallbackDirs
  66. # Equal or Fall Under?
  67. # .00 means no bad exits
  68. PERMITTED_BADEXIT = .00
  69. ## List Length Limits
  70. # The target for these parameters is 20% of the guards in the network
  71. # This is around 200 as of October 2015
  72. FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else 0.2
  73. # Limit the number of fallbacks (eliminating lowest by weight)
  74. MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 500
  75. # Emit a C #error if the number of fallbacks is below
  76. MIN_FALLBACK_COUNT = 100
  77. ## Fallback Weight Settings
  78. # Any fallback with the Exit flag has its weight multipled by this fraction
  79. EXIT_WEIGHT_FRACTION = 0.2
  80. # If True, emit a C #error if we can't satisfy various constraints
  81. # If False, emit a C comment instead
  82. STRICT_FALLBACK_WEIGHTS = False
  83. # Limit the proportional weight
  84. # If a single fallback's weight is too high, it will see too many clients
  85. # We reweight using a lower threshold to provide some leeway for:
  86. # * elimination of low weight relays
  87. # * consensus weight changes
  88. # * fallback directory losses over time
  89. # A relay weighted at 1 in 10 fallbacks will see about 10% of clients that
  90. # use the fallback directories. (The 9 directory authorities see a similar
  91. # proportion of clients.)
  92. TARGET_MAX_WEIGHT_FRACTION = 1/10.0
  93. REWEIGHTING_FUDGE_FACTOR = 0.8
  94. MAX_WEIGHT_FRACTION = TARGET_MAX_WEIGHT_FRACTION * REWEIGHTING_FUDGE_FACTOR
  95. # If a single fallback's weight is too low, it's pointless adding it.
  96. # (Final weights may be slightly higher than this, due to low weight relays
  97. # being excluded.)
  98. # A relay weighted at 1 in 1000 fallbacks will see about 0.1% of clients.
  99. MIN_WEIGHT_FRACTION = 0.0 if OUTPUT_CANDIDATES else 1/1000.0
  100. ## Other Configuration Parameters
  101. # older entries' weights are adjusted with ALPHA^(age in days)
  102. AGE_ALPHA = 0.99
  103. # this factor is used to scale OnionOO entries to [0,1]
  104. ONIONOO_SCALE_ONE = 999.
  105. ## Parsing Functions
  106. def parse_ts(t):
  107. return datetime.datetime.strptime(t, "%Y-%m-%d %H:%M:%S")
  108. def remove_bad_chars(raw_string, bad_char_list):
  109. # Remove each character in the bad_char_list
  110. escaped_string = raw_string
  111. for c in bad_char_list:
  112. escaped_string = escaped_string.replace(c, '')
  113. return escaped_string
  114. def cleanse_whitespace(raw_string):
  115. # Replace all whitespace characters with a space
  116. escaped_string = raw_string
  117. for c in string.whitespace:
  118. escaped_string = escaped_string.replace(c, ' ')
  119. return escaped_string
  120. def cleanse_c_multiline_comment(raw_string):
  121. # Prevent a malicious / unanticipated string from breaking out
  122. # of a C-style multiline comment
  123. # This removes '/*' and '*/'
  124. # To deal with '//', the end comment must be on its own line
  125. bad_char_list = '*'
  126. # Prevent a malicious string from using C nulls
  127. bad_char_list += '\0'
  128. # Be safer by removing bad characters entirely
  129. escaped_string = remove_bad_chars(raw_string, bad_char_list)
  130. # Embedded newlines should be removed by tor/onionoo, but let's be paranoid
  131. escaped_string = cleanse_whitespace(escaped_string)
  132. # Some compilers may further process the content of comments
  133. # There isn't much we can do to cover every possible case
  134. # But comment-based directives are typically only advisory
  135. return escaped_string
  136. def cleanse_c_string(raw_string):
  137. # Prevent a malicious address/fingerprint string from breaking out
  138. # of a C-style string
  139. bad_char_list = '"'
  140. # Prevent a malicious string from using escapes
  141. bad_char_list += '\\'
  142. # Prevent a malicious string from using C nulls
  143. bad_char_list += '\0'
  144. # Be safer by removing bad characters entirely
  145. escaped_string = remove_bad_chars(raw_string, bad_char_list)
  146. # Embedded newlines should be removed by tor/onionoo, but let's be paranoid
  147. escaped_string = cleanse_whitespace(escaped_string)
  148. # Some compilers may further process the content of strings
  149. # There isn't much we can do to cover every possible case
  150. # But this typically only results in changes to the string data
  151. return escaped_string
  152. ## OnionOO Source Functions
  153. # a dictionary of source metadata for each onionoo query we've made
  154. fetch_source = {}
  155. # register source metadata for 'what'
  156. # assumes we only retrieve one document for each 'what'
  157. def register_fetch_source(what, url, relays_published, version):
  158. fetch_source[what] = {}
  159. fetch_source[what]['url'] = url
  160. fetch_source[what]['relays_published'] = relays_published
  161. fetch_source[what]['version'] = version
  162. # list each registered source's 'what'
  163. def fetch_source_list():
  164. return sorted(fetch_source.keys())
  165. # given 'what', provide a multiline C comment describing the source
  166. def describe_fetch_source(what):
  167. desc = '/*'
  168. desc += '\n'
  169. desc += 'Onionoo Source: '
  170. desc += cleanse_c_multiline_comment(what)
  171. desc += ' Date: '
  172. desc += cleanse_c_multiline_comment(fetch_source[what]['relays_published'])
  173. desc += ' Version: '
  174. desc += cleanse_c_multiline_comment(fetch_source[what]['version'])
  175. desc += '\n'
  176. desc += 'URL: '
  177. desc += cleanse_c_multiline_comment(fetch_source[what]['url'])
  178. desc += '\n'
  179. desc += '*/'
  180. return desc
  181. ## File Processing Functions
  182. def write_to_file(str, file_name, max_len):
  183. try:
  184. with open(file_name, 'w') as f:
  185. f.write(str[0:max_len])
  186. except EnvironmentError, error:
  187. logging.debug('Writing file %s failed: %d: %s'%
  188. (file_name,
  189. error.errno,
  190. error.strerror)
  191. )
  192. def read_from_file(file_name, max_len):
  193. try:
  194. if os.path.isfile(file_name):
  195. with open(file_name, 'r') as f:
  196. return f.read(max_len)
  197. except EnvironmentError, error:
  198. logging.debug('Loading file %s failed: %d: %s'%
  199. (file_name,
  200. error.errno,
  201. error.strerror)
  202. )
  203. return None
  204. def load_possibly_compressed_response_json(response):
  205. if response.info().get('Content-Encoding') == 'gzip':
  206. buf = StringIO.StringIO( response.read() )
  207. f = gzip.GzipFile(fileobj=buf)
  208. return json.load(f)
  209. else:
  210. return json.load(response)
  211. def load_json_from_file(json_file_name):
  212. # An exception here may be resolved by deleting the .last_modified
  213. # and .json files, and re-running the script
  214. try:
  215. with open(json_file_name, 'r') as f:
  216. return json.load(f)
  217. except EnvironmentError, error:
  218. raise Exception('Reading not-modified json file %s failed: %d: %s'%
  219. (json_file_name,
  220. error.errno,
  221. error.strerror)
  222. )
  223. ## OnionOO Functions
  224. def onionoo_fetch(what, **kwargs):
  225. params = kwargs
  226. params['type'] = 'relay'
  227. #params['limit'] = 10
  228. params['first_seen_days'] = '%d-'%(ADDRESS_AND_PORT_STABLE_DAYS,)
  229. params['last_seen_days'] = '-7'
  230. params['flag'] = 'V2Dir'
  231. url = ONIONOO + what + '?' + urllib.urlencode(params)
  232. # Unfortunately, the URL is too long for some OS filenames,
  233. # but we still don't want to get files from different URLs mixed up
  234. base_file_name = what + '-' + hashlib.sha1(url).hexdigest()
  235. full_url_file_name = base_file_name + '.full_url'
  236. MAX_FULL_URL_LENGTH = 1024
  237. last_modified_file_name = base_file_name + '.last_modified'
  238. MAX_LAST_MODIFIED_LENGTH = 64
  239. json_file_name = base_file_name + '.json'
  240. if LOCAL_FILES_ONLY:
  241. # Read from the local file, don't write to anything
  242. response_json = load_json_from_file(json_file_name)
  243. else:
  244. # store the full URL to a file for debugging
  245. # no need to compare as long as you trust SHA-1
  246. write_to_file(url, full_url_file_name, MAX_FULL_URL_LENGTH)
  247. request = urllib2.Request(url)
  248. request.add_header('Accept-encoding', 'gzip')
  249. # load the last modified date from the file, if it exists
  250. last_mod_date = read_from_file(last_modified_file_name,
  251. MAX_LAST_MODIFIED_LENGTH)
  252. if last_mod_date is not None:
  253. request.add_header('If-modified-since', last_mod_date)
  254. # Parse datetimes like: Fri, 02 Oct 2015 13:34:14 GMT
  255. if last_mod_date is not None:
  256. last_mod = dateutil.parser.parse(last_mod_date)
  257. else:
  258. # Never modified - use start of epoch
  259. last_mod = datetime.datetime.utcfromtimestamp(0)
  260. # strip any timezone out (in case they're supported in future)
  261. last_mod = last_mod.replace(tzinfo=None)
  262. response_code = 0
  263. try:
  264. response = urllib2.urlopen(request)
  265. response_code = response.getcode()
  266. except urllib2.HTTPError, error:
  267. response_code = error.code
  268. # strip any timezone out (to match dateutil.parser)
  269. six_hours_ago = datetime.datetime.utcnow()
  270. six_hours_ago = six_hours_ago.replace(tzinfo=None)
  271. six_hours_ago -= datetime.timedelta(hours=6)
  272. # Not Modified and still recent enough to be useful (Globe uses 6 hours)
  273. if response_code == 304:
  274. if last_mod < six_hours_ago:
  275. raise Exception("Outdated data from " + url + ": "
  276. + str(error.code) + ": " + error.reason)
  277. else:
  278. pass
  279. else:
  280. raise Exception("Could not get " + url + ": "
  281. + str(error.code) + ": " + error.reason)
  282. if response_code == 200: # OK
  283. response_json = load_possibly_compressed_response_json(response)
  284. with open(json_file_name, 'w') as f:
  285. # use the most compact json representation to save space
  286. json.dump(response_json, f, separators=(',',':'))
  287. # store the last modified date in its own file
  288. if response.info().get('Last-modified') is not None:
  289. write_to_file(response.info().get('Last-Modified'),
  290. last_modified_file_name,
  291. MAX_LAST_MODIFIED_LENGTH)
  292. elif response_code == 304: # Not Modified
  293. response_json = load_json_from_file(json_file_name)
  294. else: # Unexpected HTTP response code not covered in the HTTPError above
  295. raise Exception("Unexpected HTTP response code to " + url + ": "
  296. + str(response_code))
  297. register_fetch_source(what,
  298. url,
  299. response_json['relays_published'],
  300. response_json['version'])
  301. return response_json
  302. def fetch(what, **kwargs):
  303. #x = onionoo_fetch(what, **kwargs)
  304. # don't use sort_keys, as the order of or_addresses is significant
  305. #print json.dumps(x, indent=4, separators=(',', ': '))
  306. #sys.exit(0)
  307. return onionoo_fetch(what, **kwargs)
  308. ## Fallback Candidate Class
  309. class Candidate(object):
  310. CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.now()
  311. - datetime.timedelta(ADDRESS_AND_PORT_STABLE_DAYS))
  312. def __init__(self, details):
  313. for f in ['fingerprint', 'nickname', 'last_changed_address_or_port',
  314. 'consensus_weight', 'or_addresses', 'dir_address']:
  315. if not f in details: raise Exception("Document has no %s field."%(f,))
  316. if not 'contact' in details:
  317. details['contact'] = None
  318. if not 'flags' in details or details['flags'] is None:
  319. details['flags'] = []
  320. details['last_changed_address_or_port'] = parse_ts(
  321. details['last_changed_address_or_port'])
  322. self._data = details
  323. self._stable_sort_or_addresses()
  324. self._fpr = self._data['fingerprint']
  325. self._running = self._guard = self._v2dir = 0.
  326. self._split_dirport()
  327. self._compute_orport()
  328. if self.orport is None:
  329. raise Exception("Failed to get an orport for %s."%(self._fpr,))
  330. self._compute_ipv6addr()
  331. if self.ipv6addr is None:
  332. logging.debug("Failed to get an ipv6 address for %s."%(self._fpr,))
  333. # Reduce the weight of exits to EXIT_WEIGHT_FRACTION * consensus_weight
  334. if self.is_exit():
  335. current_weight = self._data['consensus_weight']
  336. exit_weight = current_weight * EXIT_WEIGHT_FRACTION
  337. self._data['original_consensus_weight'] = current_weight
  338. self._data['consensus_weight'] = exit_weight
  339. def _stable_sort_or_addresses(self):
  340. # replace self._data['or_addresses'] with a stable ordering,
  341. # sorting the secondary addresses in string order
  342. # leave the received order in self._data['or_addresses_raw']
  343. self._data['or_addresses_raw'] = self._data['or_addresses']
  344. or_address_primary = self._data['or_addresses'][:1]
  345. # subsequent entries in the or_addresses array are in an arbitrary order
  346. # so we stabilise the addresses by sorting them in string order
  347. or_addresses_secondaries_stable = sorted(self._data['or_addresses'][1:])
  348. or_addresses_stable = or_address_primary + or_addresses_secondaries_stable
  349. self._data['or_addresses'] = or_addresses_stable
  350. def get_fingerprint(self):
  351. return self._fpr
  352. # is_valid_ipv[46]_address by gsathya, karsten, 2013
  353. @staticmethod
  354. def is_valid_ipv4_address(address):
  355. if not isinstance(address, (str, unicode)):
  356. return False
  357. # check if there are four period separated values
  358. if address.count(".") != 3:
  359. return False
  360. # checks that each value in the octet are decimal values between 0-255
  361. for entry in address.split("."):
  362. if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
  363. return False
  364. elif entry[0] == "0" and len(entry) > 1:
  365. return False # leading zeros, for instance in "1.2.3.001"
  366. return True
  367. @staticmethod
  368. def is_valid_ipv6_address(address):
  369. if not isinstance(address, (str, unicode)):
  370. return False
  371. # remove brackets
  372. address = address[1:-1]
  373. # addresses are made up of eight colon separated groups of four hex digits
  374. # with leading zeros being optional
  375. # https://en.wikipedia.org/wiki/IPv6#Address_format
  376. colon_count = address.count(":")
  377. if colon_count > 7:
  378. return False # too many groups
  379. elif colon_count != 7 and not "::" in address:
  380. return False # not enough groups and none are collapsed
  381. elif address.count("::") > 1 or ":::" in address:
  382. return False # multiple groupings of zeros can't be collapsed
  383. found_ipv4_on_previous_entry = False
  384. for entry in address.split(":"):
  385. # If an IPv6 address has an embedded IPv4 address,
  386. # it must be the last entry
  387. if found_ipv4_on_previous_entry:
  388. return False
  389. if not re.match("^[0-9a-fA-f]{0,4}$", entry):
  390. if not Candidate.is_valid_ipv4_address(entry):
  391. return False
  392. else:
  393. found_ipv4_on_previous_entry = True
  394. return True
  395. def _split_dirport(self):
  396. # Split the dir_address into dirip and dirport
  397. (self.dirip, _dirport) = self._data['dir_address'].split(':', 2)
  398. self.dirport = int(_dirport)
  399. def _compute_orport(self):
  400. # Choose the first ORPort that's on the same IPv4 address as the DirPort.
  401. # In rare circumstances, this might not be the primary ORPort address.
  402. # However, _stable_sort_or_addresses() ensures we choose the same one
  403. # every time, even if onionoo changes the order of the secondaries.
  404. self._split_dirport()
  405. self.orport = None
  406. for i in self._data['or_addresses']:
  407. if i != self._data['or_addresses'][0]:
  408. logging.debug('Secondary IPv4 Address Used for %s: %s'%(self._fpr, i))
  409. (ipaddr, port) = i.rsplit(':', 1)
  410. if (ipaddr == self.dirip) and Candidate.is_valid_ipv4_address(ipaddr):
  411. self.orport = int(port)
  412. return
  413. def _compute_ipv6addr(self):
  414. # Choose the first IPv6 address that uses the same port as the ORPort
  415. # Or, choose the first IPv6 address in the list
  416. # _stable_sort_or_addresses() ensures we choose the same IPv6 address
  417. # every time, even if onionoo changes the order of the secondaries.
  418. self.ipv6addr = None
  419. self.ipv6orport = None
  420. # Choose the first IPv6 address that uses the same port as the ORPort
  421. for i in self._data['or_addresses']:
  422. (ipaddr, port) = i.rsplit(':', 1)
  423. if (port == self.orport) and Candidate.is_valid_ipv6_address(ipaddr):
  424. self.ipv6addr = ipaddr
  425. self.ipv6orport = port
  426. return
  427. # Choose the first IPv6 address in the list
  428. for i in self._data['or_addresses']:
  429. (ipaddr, port) = i.rsplit(':', 1)
  430. if Candidate.is_valid_ipv6_address(ipaddr):
  431. self.ipv6addr = ipaddr
  432. self.ipv6orport = port
  433. return
  434. @staticmethod
  435. def _extract_generic_history(history, which='unknown'):
  436. # given a tree like this:
  437. # {
  438. # "1_month": {
  439. # "count": 187,
  440. # "factor": 0.001001001001001001,
  441. # "first": "2015-02-27 06:00:00",
  442. # "interval": 14400,
  443. # "last": "2015-03-30 06:00:00",
  444. # "values": [
  445. # 999,
  446. # 999
  447. # ]
  448. # },
  449. # "1_week": {
  450. # "count": 169,
  451. # "factor": 0.001001001001001001,
  452. # "first": "2015-03-23 07:30:00",
  453. # "interval": 3600,
  454. # "last": "2015-03-30 07:30:00",
  455. # "values": [ ...]
  456. # },
  457. # "1_year": {
  458. # "count": 177,
  459. # "factor": 0.001001001001001001,
  460. # "first": "2014-04-11 00:00:00",
  461. # "interval": 172800,
  462. # "last": "2015-03-29 00:00:00",
  463. # "values": [ ...]
  464. # },
  465. # "3_months": {
  466. # "count": 185,
  467. # "factor": 0.001001001001001001,
  468. # "first": "2014-12-28 06:00:00",
  469. # "interval": 43200,
  470. # "last": "2015-03-30 06:00:00",
  471. # "values": [ ...]
  472. # }
  473. # },
  474. # extract exactly one piece of data per time interval,
  475. # using smaller intervals where available.
  476. #
  477. # returns list of (age, length, value) dictionaries.
  478. generic_history = []
  479. periods = history.keys()
  480. periods.sort(key = lambda x: history[x]['interval'])
  481. now = datetime.datetime.now()
  482. newest = now
  483. for p in periods:
  484. h = history[p]
  485. interval = datetime.timedelta(seconds = h['interval'])
  486. this_ts = parse_ts(h['last'])
  487. if (len(h['values']) != h['count']):
  488. logging.warn('Inconsistent value count in %s document for %s'
  489. %(p, which))
  490. for v in reversed(h['values']):
  491. if (this_ts <= newest):
  492. agt1 = now - this_ts
  493. agt2 = interval
  494. agetmp1 = (agt1.microseconds + (agt1.seconds + agt1.days * 24 * 3600)
  495. * 10**6) / 10**6
  496. agetmp2 = (agt2.microseconds + (agt2.seconds + agt2.days * 24 * 3600)
  497. * 10**6) / 10**6
  498. generic_history.append(
  499. { 'age': agetmp1,
  500. 'length': agetmp2,
  501. 'value': v
  502. })
  503. newest = this_ts
  504. this_ts -= interval
  505. if (this_ts + interval != parse_ts(h['first'])):
  506. logging.warn('Inconsistent time information in %s document for %s'
  507. %(p, which))
  508. #print json.dumps(generic_history, sort_keys=True,
  509. # indent=4, separators=(',', ': '))
  510. return generic_history
  511. @staticmethod
  512. def _avg_generic_history(generic_history):
  513. a = []
  514. for i in generic_history:
  515. if i['age'] > (ADDRESS_AND_PORT_STABLE_DAYS * 24 * 3600):
  516. continue
  517. if (i['length'] is not None
  518. and i['age'] is not None
  519. and i['value'] is not None):
  520. w = i['length'] * math.pow(AGE_ALPHA, i['age']/(3600*24))
  521. a.append( (i['value'] * w, w) )
  522. sv = math.fsum(map(lambda x: x[0], a))
  523. sw = math.fsum(map(lambda x: x[1], a))
  524. if sw == 0.0:
  525. svw = 0.0
  526. else:
  527. svw = sv/sw
  528. return svw
  529. def _add_generic_history(self, history):
  530. periods = r['read_history'].keys()
  531. periods.sort(key = lambda x: r['read_history'][x]['interval'] )
  532. print periods
  533. def add_running_history(self, history):
  534. pass
  535. def add_uptime(self, uptime):
  536. logging.debug('Adding uptime %s.'%(self._fpr,))
  537. # flags we care about: Running, V2Dir, Guard
  538. if not 'flags' in uptime:
  539. logging.debug('No flags in document for %s.'%(self._fpr,))
  540. return
  541. for f in ['Running', 'Guard', 'V2Dir']:
  542. if not f in uptime['flags']:
  543. logging.debug('No %s in flags for %s.'%(f, self._fpr,))
  544. return
  545. running = self._extract_generic_history(uptime['flags']['Running'],
  546. '%s-Running'%(self._fpr))
  547. guard = self._extract_generic_history(uptime['flags']['Guard'],
  548. '%s-Guard'%(self._fpr))
  549. v2dir = self._extract_generic_history(uptime['flags']['V2Dir'],
  550. '%s-V2Dir'%(self._fpr))
  551. if 'BadExit' in uptime['flags']:
  552. badexit = self._extract_generic_history(uptime['flags']['BadExit'],
  553. '%s-BadExit'%(self._fpr))
  554. self._running = self._avg_generic_history(running) / ONIONOO_SCALE_ONE
  555. self._guard = self._avg_generic_history(guard) / ONIONOO_SCALE_ONE
  556. self._v2dir = self._avg_generic_history(v2dir) / ONIONOO_SCALE_ONE
  557. self._badexit = None
  558. if 'BadExit' in uptime['flags']:
  559. self._badexit = self._avg_generic_history(badexit) / ONIONOO_SCALE_ONE
  560. def is_candidate(self):
  561. if (self._data['last_changed_address_or_port'] >
  562. self.CUTOFF_ADDRESS_AND_PORT_STABLE):
  563. logging.debug('%s not a candidate: changed address/port recently (%s)',
  564. self._fpr, self._data['last_changed_address_or_port'])
  565. return False
  566. if self._running < CUTOFF_RUNNING:
  567. logging.debug('%s not a candidate: running avg too low (%lf)',
  568. self._fpr, self._running)
  569. return False
  570. if self._v2dir < CUTOFF_V2DIR:
  571. logging.debug('%s not a candidate: v2dir avg too low (%lf)',
  572. self._fpr, self._v2dir)
  573. return False
  574. if self._badexit is not None and self._badexit > PERMITTED_BADEXIT:
  575. logging.debug('%s not a candidate: badexit avg too high (%lf)',
  576. self._fpr, self._badexit)
  577. return False
  578. # if the relay doesn't report a version, also exclude the relay
  579. if (not self._data.has_key('recommended_version')
  580. or not self._data['recommended_version']):
  581. return False
  582. if self._guard < CUTOFF_GUARD:
  583. logging.debug('%s not a candidate: guard avg too low (%lf)',
  584. self._fpr, self._guard)
  585. return False
  586. return True
  587. def is_in_whitelist(self, relaylist):
  588. """ A fallback matches if each key in the whitelist line matches:
  589. ipv4
  590. dirport
  591. orport
  592. id
  593. ipv6 address and port (if present)
  594. If the fallback has an ipv6 key, the whitelist line must also have
  595. it, and vice versa, otherwise they don't match. """
  596. for entry in relaylist:
  597. if entry['ipv4'] != self.dirip:
  598. continue
  599. if int(entry['dirport']) != self.dirport:
  600. continue
  601. if int(entry['orport']) != self.orport:
  602. continue
  603. if entry['id'] != self._fpr:
  604. continue
  605. if (entry.has_key('ipv6')
  606. and self.ipv6addr is not None and self.ipv6orport is not None):
  607. # if both entry and fallback have an ipv6 address, compare them
  608. if entry['ipv6'] != self.ipv6addr + ':' + self.ipv6orport:
  609. continue
  610. # if the fallback has an IPv6 address but the whitelist entry
  611. # doesn't, or vice versa, the whitelist entry doesn't match
  612. elif entry.has_key('ipv6') and self.ipv6addr is None:
  613. continue
  614. elif not entry.has_key('ipv6') and self.ipv6addr is not None:
  615. continue
  616. return True
  617. return False
  618. def is_in_blacklist(self, relaylist):
  619. """ A fallback matches a blacklist line if a sufficiently specific group
  620. of attributes matches:
  621. ipv4 & dirport
  622. ipv4 & orport
  623. id
  624. ipv6 & dirport
  625. ipv6 & ipv6 orport
  626. If the fallback and the blacklist line both have an ipv6 key,
  627. their values will be compared, otherwise, they will be ignored.
  628. If there is no dirport and no orport, the entry matches all relays on
  629. that ip. """
  630. for entry in relaylist:
  631. for key in entry:
  632. value = entry[key]
  633. if key == 'ipv4' and value == self.dirip:
  634. # if the dirport is present, check it too
  635. if entry.has_key('dirport'):
  636. if int(entry['dirport']) == self.dirport:
  637. return True
  638. # if the orport is present, check it too
  639. elif entry.has_key('orport'):
  640. if int(entry['orport']) == self.orport:
  641. return True
  642. else:
  643. return True
  644. if key == 'id' and value == self._fpr:
  645. return True
  646. if (key == 'ipv6'
  647. and self.ipv6addr is not None and self.ipv6orport is not None):
  648. # if both entry and fallback have an ipv6 address, compare them,
  649. # otherwise, disregard ipv6 addresses
  650. if value == self.ipv6addr + ':' + self.ipv6orport:
  651. # if the dirport is present, check it too
  652. if entry.has_key('dirport'):
  653. if int(entry['dirport']) == self.dirport:
  654. return True
  655. # if the orport is present, check it too
  656. elif entry.has_key('orport'):
  657. if int(entry['orport']) == self.orport:
  658. return True
  659. else:
  660. return True
  661. return False
  662. def is_exit(self):
  663. return 'Exit' in self._data['flags']
  664. def is_guard(self):
  665. return 'Guard' in self._data['flags']
  666. def fallback_weight_fraction(self, total_weight):
  667. return float(self._data['consensus_weight']) / total_weight
  668. # return the original consensus weight, if it exists,
  669. # or, if not, return the consensus weight
  670. def original_consensus_weight(self):
  671. if self._data.has_key('original_consensus_weight'):
  672. return self._data['original_consensus_weight']
  673. else:
  674. return self._data['consensus_weight']
  675. def original_fallback_weight_fraction(self, total_weight):
  676. return float(self.original_consensus_weight()) / total_weight
  677. def fallbackdir_line(self, total_weight, original_total_weight):
  678. # /*
  679. # nickname
  680. # flags
  681. # weight / total (percentage)
  682. # [original weight / original total (original percentage)]
  683. # [contact]
  684. # */
  685. # "address:dirport orport=port id=fingerprint"
  686. # "[ipv6=addr:orport]"
  687. # "weight=num",
  688. # Multiline C comment
  689. s = '/*'
  690. s += '\n'
  691. s += cleanse_c_multiline_comment(self._data['nickname'])
  692. s += '\n'
  693. s += 'Flags: '
  694. s += cleanse_c_multiline_comment(' '.join(sorted(self._data['flags'])))
  695. s += '\n'
  696. weight = self._data['consensus_weight']
  697. percent_weight = self.fallback_weight_fraction(total_weight)*100
  698. s += 'Fallback Weight: %d / %d (%.3f%%)'%(weight, total_weight,
  699. percent_weight)
  700. s += '\n'
  701. o_weight = self.original_consensus_weight()
  702. if o_weight != weight:
  703. o_percent_weight = self.original_fallback_weight_fraction(
  704. original_total_weight)*100
  705. s += 'Consensus Weight: %d / %d (%.3f%%)'%(o_weight,
  706. original_total_weight,
  707. o_percent_weight)
  708. s += '\n'
  709. if self._data['contact'] is not None:
  710. s += cleanse_c_multiline_comment(self._data['contact'])
  711. s += '\n'
  712. s += '*/'
  713. s += '\n'
  714. # Multi-Line C string with trailing comma (part of a string list)
  715. # This makes it easier to diff the file, and remove IPv6 lines using grep
  716. # Integers don't need escaping
  717. s += '"%s orport=%d id=%s"'%(
  718. cleanse_c_string(self._data['dir_address']),
  719. self.orport,
  720. cleanse_c_string(self._fpr))
  721. s += '\n'
  722. if self.ipv6addr is not None:
  723. s += '" ipv6=%s:%s"'%(
  724. cleanse_c_string(self.ipv6addr), cleanse_c_string(self.ipv6orport))
  725. s += '\n'
  726. s += '" weight=%d",'%(weight)
  727. return s
  728. ## Fallback Candidate List Class
  729. class CandidateList(dict):
  730. def __init__(self):
  731. pass
  732. def _add_relay(self, details):
  733. if not 'dir_address' in details: return
  734. c = Candidate(details)
  735. self[ c.get_fingerprint() ] = c
  736. def _add_uptime(self, uptime):
  737. try:
  738. fpr = uptime['fingerprint']
  739. except KeyError:
  740. raise Exception("Document has no fingerprint field.")
  741. try:
  742. c = self[fpr]
  743. except KeyError:
  744. logging.debug('Got unknown relay %s in uptime document.'%(fpr,))
  745. return
  746. c.add_uptime(uptime)
  747. def _add_details(self):
  748. logging.debug('Loading details document.')
  749. d = fetch('details',
  750. fields=('fingerprint,nickname,contact,last_changed_address_or_port,' +
  751. 'consensus_weight,or_addresses,dir_address,' +
  752. 'recommended_version,flags'))
  753. logging.debug('Loading details document done.')
  754. if not 'relays' in d: raise Exception("No relays found in document.")
  755. for r in d['relays']: self._add_relay(r)
  756. def _add_uptimes(self):
  757. logging.debug('Loading uptime document.')
  758. d = fetch('uptime')
  759. logging.debug('Loading uptime document done.')
  760. if not 'relays' in d: raise Exception("No relays found in document.")
  761. for r in d['relays']: self._add_uptime(r)
  762. def add_relays(self):
  763. self._add_details()
  764. self._add_uptimes()
  765. def count_guards(self):
  766. guard_count = 0
  767. for fpr in self.keys():
  768. if self[fpr].is_guard():
  769. guard_count += 1
  770. return guard_count
  771. # Find fallbacks that fit the uptime, stability, and flags criteria
  772. def compute_fallbacks(self):
  773. self.fallbacks = map(lambda x: self[x],
  774. sorted(
  775. filter(lambda x: self[x].is_candidate(),
  776. self.keys()),
  777. key=lambda x: self[x]._data['consensus_weight'],
  778. reverse=True)
  779. )
  780. @staticmethod
  781. def load_relaylist(file_name):
  782. """ Read each line in the file, and parse it like a FallbackDir line:
  783. an IPv4 address and optional port:
  784. <IPv4 address>:<port>
  785. which are parsed into dictionary entries:
  786. ipv4=<IPv4 address>
  787. dirport=<port>
  788. followed by a series of key=value entries:
  789. orport=<port>
  790. id=<fingerprint>
  791. ipv6=<IPv6 address>:<IPv6 orport>
  792. each line's key/value pairs are placed in a dictonary,
  793. (of string -> string key/value pairs),
  794. and these dictionaries are placed in an array.
  795. comments start with # and are ignored """
  796. relaylist = []
  797. file_data = read_from_file(file_name, MAX_LIST_FILE_SIZE)
  798. if file_data is None:
  799. return relaylist
  800. for line in file_data.split('\n'):
  801. relay_entry = {}
  802. # ignore comments
  803. line_comment_split = line.split('#')
  804. line = line_comment_split[0]
  805. # cleanup whitespace
  806. line = cleanse_whitespace(line)
  807. line = line.strip()
  808. if len(line) == 0:
  809. continue
  810. for item in line.split(' '):
  811. item = item.strip()
  812. if len(item) == 0:
  813. continue
  814. key_value_split = item.split('=')
  815. kvl = len(key_value_split)
  816. if kvl < 1 or kvl > 2:
  817. print '#error Bad %s item: %s, format is key=value.'%(
  818. file_name, item)
  819. if kvl == 1:
  820. # assume that entries without a key are the ipv4 address,
  821. # perhaps with a dirport
  822. ipv4_maybe_dirport = key_value_split[0]
  823. ipv4_maybe_dirport_split = ipv4_maybe_dirport.split(':')
  824. dirl = len(ipv4_maybe_dirport_split)
  825. if dirl < 1 or dirl > 2:
  826. print '#error Bad %s IPv4 item: %s, format is ipv4:port.'%(
  827. file_name, item)
  828. if dirl >= 1:
  829. relay_entry['ipv4'] = ipv4_maybe_dirport_split[0]
  830. if dirl == 2:
  831. relay_entry['dirport'] = ipv4_maybe_dirport_split[1]
  832. elif kvl == 2:
  833. relay_entry[key_value_split[0]] = key_value_split[1]
  834. relaylist.append(relay_entry)
  835. return relaylist
  836. # apply the fallback whitelist and blacklist
  837. def apply_filter_lists(self):
  838. excluded_count = 0
  839. logging.debug('Applying whitelist and blacklist.')
  840. # parse the whitelist and blacklist
  841. whitelist = self.load_relaylist(WHITELIST_FILE_NAME)
  842. blacklist = self.load_relaylist(BLACKLIST_FILE_NAME)
  843. filtered_fallbacks = []
  844. for f in self.fallbacks:
  845. in_whitelist = f.is_in_whitelist(whitelist)
  846. in_blacklist = f.is_in_blacklist(blacklist)
  847. if in_whitelist and in_blacklist:
  848. if BLACKLIST_EXCLUDES_WHITELIST_ENTRIES:
  849. # exclude
  850. excluded_count += 1
  851. logging.debug('Excluding %s: in both blacklist and whitelist.' %
  852. f._fpr)
  853. else:
  854. # include
  855. filtered_fallbacks.append(f)
  856. elif in_whitelist:
  857. # include
  858. filtered_fallbacks.append(f)
  859. elif in_blacklist:
  860. # exclude
  861. excluded_count += 1
  862. logging.debug('Excluding %s: in blacklist.' %
  863. f._fpr)
  864. else:
  865. if INCLUDE_UNLISTED_ENTRIES:
  866. # include
  867. filtered_fallbacks.append(f)
  868. else:
  869. # exclude
  870. excluded_count += 1
  871. logging.debug('Excluding %s: in neither blacklist nor whitelist.' %
  872. f._fpr)
  873. self.fallbacks = filtered_fallbacks
  874. return excluded_count
  875. @staticmethod
  876. def summarise_filters(initial_count, excluded_count):
  877. return '/* Whitelist & blacklist excluded %d of %d candidates. */'%(
  878. excluded_count, initial_count)
  879. # Remove any fallbacks in excess of MAX_FALLBACK_COUNT,
  880. # starting with the lowest-weighted fallbacks
  881. # total_weight should be recalculated after calling this
  882. def exclude_excess_fallbacks(self):
  883. if MAX_FALLBACK_COUNT is not None:
  884. self.fallbacks = self.fallbacks[:MAX_FALLBACK_COUNT]
  885. # Clamp the weight of all fallbacks to MAX_WEIGHT_FRACTION * total_weight
  886. # fallbacks are kept sorted, but since excessive weights are reduced to
  887. # the maximum acceptable weight, these relays end up with equal weights
  888. def clamp_high_weight_fallbacks(self, total_weight):
  889. if MAX_WEIGHT_FRACTION * len(self.fallbacks) < 1.0:
  890. error_str = 'Max Fallback Weight %.3f%% is unachievable'%(
  891. MAX_WEIGHT_FRACTION)
  892. error_str += ' with Current Fallback Count %d.'%(len(self.fallbacks))
  893. if STRICT_FALLBACK_WEIGHTS:
  894. print '#error ' + error_str
  895. else:
  896. print '/* ' + error_str + ' */'
  897. relays_clamped = 0
  898. max_acceptable_weight = total_weight * MAX_WEIGHT_FRACTION
  899. for f in self.fallbacks:
  900. frac_weight = f.fallback_weight_fraction(total_weight)
  901. if frac_weight > MAX_WEIGHT_FRACTION:
  902. relays_clamped += 1
  903. current_weight = f._data['consensus_weight']
  904. # if we already have an original weight, keep it
  905. if (not f._data.has_key('original_consensus_weight')
  906. or f._data['original_consensus_weight'] == current_weight):
  907. f._data['original_consensus_weight'] = current_weight
  908. f._data['consensus_weight'] = max_acceptable_weight
  909. return relays_clamped
  910. # Remove any fallbacks with weights lower than MIN_WEIGHT_FRACTION
  911. # total_weight should be recalculated after calling this
  912. def exclude_low_weight_fallbacks(self, total_weight):
  913. self.fallbacks = filter(
  914. lambda x:
  915. x.fallback_weight_fraction(total_weight) >= MIN_WEIGHT_FRACTION,
  916. self.fallbacks)
  917. def fallback_weight_total(self):
  918. return sum(f._data['consensus_weight'] for f in self.fallbacks)
  919. def fallback_min_weight(self):
  920. if len(self.fallbacks) > 0:
  921. return self.fallbacks[-1]
  922. else:
  923. return None
  924. def fallback_max_weight(self):
  925. if len(self.fallbacks) > 0:
  926. return self.fallbacks[0]
  927. else:
  928. return None
  929. def summarise_fallbacks(self, eligible_count, eligible_weight,
  930. relays_clamped, clamped_weight,
  931. guard_count, target_count, max_count):
  932. # Report:
  933. # the number of fallback directories (with min & max limits);
  934. # #error if below minimum count
  935. # the total weight, min & max fallback proportions
  936. # #error if outside max weight proportion
  937. # Multiline C comment with #error if things go bad
  938. s = '/*'
  939. s += '\n'
  940. s += 'Fallback Directory Summary'
  941. s += '\n'
  942. # Integers don't need escaping in C comments
  943. fallback_count = len(self.fallbacks)
  944. if FALLBACK_PROPORTION_OF_GUARDS is None:
  945. fallback_proportion = ''
  946. else:
  947. fallback_proportion = ' (%d * %f)'%(guard_count,
  948. FALLBACK_PROPORTION_OF_GUARDS)
  949. s += 'Final Count: %d (Eligible %d, Usable %d, Target %d%s'%(
  950. min(max_count, fallback_count),
  951. eligible_count,
  952. fallback_count,
  953. target_count,
  954. fallback_proportion)
  955. if MAX_FALLBACK_COUNT is not None:
  956. s += ', Clamped to %d'%(MAX_FALLBACK_COUNT)
  957. s += ')\n'
  958. if fallback_count < MIN_FALLBACK_COUNT:
  959. s += '*/'
  960. s += '\n'
  961. # We must have a minimum number of fallbacks so they are always
  962. # reachable, and are in diverse locations
  963. s += '#error Fallback Count %d is too low. '%(fallback_count)
  964. s += 'Must be at least %d for diversity. '%(MIN_FALLBACK_COUNT)
  965. s += 'Try adding entries to the whitelist, '
  966. s += 'or setting INCLUDE_UNLISTED_ENTRIES = True.'
  967. s += '\n'
  968. s += '/*'
  969. s += '\n'
  970. total_weight = self.fallback_weight_total()
  971. min_fb = self.fallback_min_weight()
  972. min_weight = min_fb._data['consensus_weight']
  973. min_percent = min_fb.fallback_weight_fraction(total_weight)*100.0
  974. max_fb = self.fallback_max_weight()
  975. max_weight = max_fb._data['consensus_weight']
  976. max_frac = max_fb.fallback_weight_fraction(total_weight)
  977. max_percent = max_frac*100.0
  978. s += 'Final Weight: %d (Eligible %d)'%(total_weight, eligible_weight)
  979. s += '\n'
  980. s += 'Max Weight: %d (%.3f%%) (Clamped to %.3f%%)'%(
  981. max_weight,
  982. max_percent,
  983. TARGET_MAX_WEIGHT_FRACTION*100)
  984. s += '\n'
  985. s += 'Min Weight: %d (%.3f%%) (Clamped to %.3f%%)'%(
  986. min_weight,
  987. min_percent,
  988. MIN_WEIGHT_FRACTION*100)
  989. s += '\n'
  990. if eligible_count != fallback_count:
  991. s += 'Excluded: %d (Clamped, Below Target, or Low Weight)'%(
  992. eligible_count - fallback_count)
  993. s += '\n'
  994. if relays_clamped > 0:
  995. s += 'Clamped: %d (%.3f%%) Excess Weight, '%(
  996. clamped_weight,
  997. (100.0 * clamped_weight) / total_weight)
  998. s += '%d High Weight Fallbacks (%.1f%%)'%(
  999. relays_clamped,
  1000. (100.0 * relays_clamped) / fallback_count)
  1001. s += '\n'
  1002. s += '*/'
  1003. if max_frac > TARGET_MAX_WEIGHT_FRACTION:
  1004. s += '\n'
  1005. # We must restrict the maximum fallback weight, so an adversary
  1006. # at or near the fallback doesn't see too many clients
  1007. error_str = 'Max Fallback Weight %.3f%% is too high. '%(max_frac*100)
  1008. error_str += 'Must be at most %.3f%% for client anonymity.'%(
  1009. TARGET_MAX_WEIGHT_FRACTION*100)
  1010. if STRICT_FALLBACK_WEIGHTS:
  1011. s += '#error ' + error_str
  1012. else:
  1013. s += '/* ' + error_str + ' */'
  1014. return s
  1015. ## Main Function
  1016. def list_fallbacks():
  1017. """ Fetches required onionoo documents and evaluates the
  1018. fallback directory criteria for each of the relays """
  1019. candidates = CandidateList()
  1020. candidates.add_relays()
  1021. guard_count = candidates.count_guards()
  1022. if FALLBACK_PROPORTION_OF_GUARDS is None:
  1023. target_count = guard_count
  1024. else:
  1025. target_count = int(guard_count * FALLBACK_PROPORTION_OF_GUARDS)
  1026. # the maximum number of fallbacks is the least of:
  1027. # - the target fallback count (FALLBACK_PROPORTION_OF_GUARDS * guard count)
  1028. # - the maximum fallback count (MAX_FALLBACK_COUNT)
  1029. if MAX_FALLBACK_COUNT is None:
  1030. max_count = guard_count
  1031. else:
  1032. max_count = min(target_count, MAX_FALLBACK_COUNT)
  1033. candidates.compute_fallbacks()
  1034. initial_count = len(candidates.fallbacks)
  1035. excluded_count = candidates.apply_filter_lists()
  1036. print candidates.summarise_filters(initial_count, excluded_count)
  1037. eligible_count = len(candidates.fallbacks)
  1038. eligible_weight = candidates.fallback_weight_total()
  1039. # print the raw fallback list
  1040. #total_weight = candidates.fallback_weight_total()
  1041. #for x in candidates.fallbacks:
  1042. # print x.fallbackdir_line(total_weight, total_weight)
  1043. # When candidates are excluded, total_weight decreases, and
  1044. # the proportional weight of other candidates increases.
  1045. candidates.exclude_excess_fallbacks()
  1046. total_weight = candidates.fallback_weight_total()
  1047. # When candidates are reweighted, total_weight decreases, and
  1048. # the proportional weight of other candidates increases.
  1049. # Previously low-weight candidates might obtain sufficient proportional
  1050. # weights to be included.
  1051. # Save the weight at which we reweighted fallbacks for the summary.
  1052. pre_clamp_total_weight = total_weight
  1053. relays_clamped = candidates.clamp_high_weight_fallbacks(total_weight)
  1054. # When candidates are excluded, total_weight decreases, and
  1055. # the proportional weight of other candidates increases.
  1056. # No new low weight candidates will be created during exclusions.
  1057. # However, high weight candidates may increase over the maximum proportion.
  1058. # This should not be an issue, except in pathological cases.
  1059. candidates.exclude_low_weight_fallbacks(total_weight)
  1060. total_weight = candidates.fallback_weight_total()
  1061. # check we haven't exceeded TARGET_MAX_WEIGHT_FRACTION
  1062. # since reweighting preserves the orginal sort order,
  1063. # the maximum weights will be at the head of the list
  1064. if len(candidates.fallbacks) > 0:
  1065. max_weight_fb = candidates.fallback_max_weight()
  1066. max_weight = max_weight_fb.fallback_weight_fraction(total_weight)
  1067. if max_weight > TARGET_MAX_WEIGHT_FRACTION:
  1068. error_str = 'Maximum fallback weight: %.3f%% exceeds target %.3f%%. '%(
  1069. max_weight,
  1070. TARGET_MAX_WEIGHT_FRACTION)
  1071. error_str += 'Try decreasing REWEIGHTING_FUDGE_FACTOR.'
  1072. if STRICT_FALLBACK_WEIGHTS:
  1073. print '#error ' + error_str
  1074. else:
  1075. print '/* ' + error_str + ' */'
  1076. print candidates.summarise_fallbacks(eligible_count, eligible_weight,
  1077. relays_clamped,
  1078. pre_clamp_total_weight - total_weight,
  1079. guard_count, target_count, max_count)
  1080. else:
  1081. print '/* No Fallbacks met criteria */'
  1082. for s in fetch_source_list():
  1083. print describe_fetch_source(s)
  1084. for x in candidates.fallbacks[:max_count]:
  1085. print x.fallbackdir_line(total_weight, pre_clamp_total_weight)
  1086. #print json.dumps(candidates[x]._data, sort_keys=True, indent=4,
  1087. # separators=(',', ': '), default=json_util.default)
  1088. if __name__ == "__main__":
  1089. list_fallbacks()