updateFallbackDirs.py 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499
  1. #!/usr/bin/python
  2. # Usage: scripts/maint/updateFallbackDirs.py > src/or/fallback_dirs.inc
  3. # Needs stem available in your PYTHONPATH, or just ln -s ../stem/stem .
  4. #
  5. # Then read the generated list to ensure no-one slipped anything funny into
  6. # their name or contactinfo
  7. # Script by weasel, April 2015
  8. # Portions by gsathya & karsten, 2013
  9. # https://trac.torproject.org/projects/tor/attachment/ticket/8374/dir_list.2.py
  10. # Modifications by teor, 2015
  11. import StringIO
  12. import string
  13. import re
  14. import datetime
  15. import gzip
  16. import os.path
  17. import json
  18. import math
  19. import sys
  20. import urllib
  21. import urllib2
  22. import hashlib
  23. import dateutil.parser
  24. # bson_lazy provides bson
  25. #from bson import json_util
  26. import copy
  27. from stem.descriptor.remote import DescriptorDownloader
  28. import logging
  29. # INFO tells you why each relay was included or excluded
  30. # WARN tells you about potential misconfigurations
  31. logging.basicConfig(level=logging.WARNING)
  32. ## Top-Level Configuration
  33. # Output all candidate fallbacks, or only output selected fallbacks?
  34. OUTPUT_CANDIDATES = False
  35. # Perform DirPort checks over IPv4?
  36. # Change this to False if IPv4 doesn't work for you, or if you don't want to
  37. # download a consensus for each fallback
  38. # Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
  39. PERFORM_IPV4_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else True
  40. # Perform DirPort checks over IPv6?
  41. # If you know IPv6 works for you, set this to True
  42. # This will exclude IPv6 relays without an IPv6 DirPort configured
  43. # So it's best left at False until #18394 is implemented
  44. # Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
  45. PERFORM_IPV6_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else False
  46. # Output fallback name, flags, and ContactInfo in a C comment?
  47. OUTPUT_COMMENTS = True if OUTPUT_CANDIDATES else False
  48. # Output matching ContactInfo in fallbacks list or the blacklist?
  49. # Useful if you're trying to contact operators
  50. CONTACT_COUNT = True if OUTPUT_CANDIDATES else False
  51. CONTACT_BLACKLIST_COUNT = True if OUTPUT_CANDIDATES else False
  52. ## OnionOO Settings
  53. ONIONOO = 'https://onionoo.torproject.org/'
  54. #ONIONOO = 'https://onionoo.thecthulhu.com/'
  55. # Don't bother going out to the Internet, just use the files available locally,
  56. # even if they're very old
  57. LOCAL_FILES_ONLY = False
  58. ## Whitelist / Blacklist Filter Settings
  59. # The whitelist contains entries that are included if all attributes match
  60. # (IPv4, dirport, orport, id, and optionally IPv6 and IPv6 orport)
  61. # The blacklist contains (partial) entries that are excluded if any
  62. # sufficiently specific group of attributes matches:
  63. # IPv4 & DirPort
  64. # IPv4 & ORPort
  65. # ID
  66. # IPv6 & DirPort
  67. # IPv6 & IPv6 ORPort
  68. # If neither port is included in the blacklist, the entire IP address is
  69. # blacklisted.
  70. # What happens to entries in neither list?
  71. # When True, they are included, when False, they are excluded
  72. INCLUDE_UNLISTED_ENTRIES = True if OUTPUT_CANDIDATES else False
  73. # If an entry is in both lists, what happens?
  74. # When True, it is excluded, when False, it is included
  75. BLACKLIST_EXCLUDES_WHITELIST_ENTRIES = True
  76. WHITELIST_FILE_NAME = 'scripts/maint/fallback.whitelist'
  77. BLACKLIST_FILE_NAME = 'scripts/maint/fallback.blacklist'
  78. # The number of bytes we'll read from a filter file before giving up
  79. MAX_LIST_FILE_SIZE = 1024 * 1024
  80. ## Eligibility Settings
  81. # Reduced due to a bug in tor where a relay submits a 0 DirPort when restarted
  82. # This causes OnionOO to (correctly) reset its stability timer
  83. # This issue will be fixed in 0.2.7.7 and 0.2.8.2
  84. # Until then, the CUTOFFs below ensure a decent level of stability.
  85. ADDRESS_AND_PORT_STABLE_DAYS = 7
  86. # What time-weighted-fraction of these flags must FallbackDirs
  87. # Equal or Exceed?
  88. CUTOFF_RUNNING = .95
  89. CUTOFF_V2DIR = .95
  90. CUTOFF_GUARD = .95
  91. # What time-weighted-fraction of these flags must FallbackDirs
  92. # Equal or Fall Under?
  93. # .00 means no bad exits
  94. PERMITTED_BADEXIT = .00
  95. # older entries' weights are adjusted with ALPHA^(age in days)
  96. AGE_ALPHA = 0.99
  97. # this factor is used to scale OnionOO entries to [0,1]
  98. ONIONOO_SCALE_ONE = 999.
  99. ## Fallback Count Limits
  100. # The target for these parameters is 20% of the guards in the network
  101. # This is around 200 as of October 2015
  102. _FB_POG = 0.2
  103. FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else _FB_POG
  104. # We want exactly 100 fallbacks for the initial release
  105. # This gives us scope to add extra fallbacks to the list as needed
  106. # Limit the number of fallbacks (eliminating lowest by advertised bandwidth)
  107. MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 100
  108. # Emit a C #error if the number of fallbacks is below
  109. MIN_FALLBACK_COUNT = 100
  110. ## Fallback Bandwidth Requirements
  111. # Any fallback with the Exit flag has its bandwidth multipled by this fraction
  112. # to make sure we aren't further overloading exits
  113. # (Set to 1.0, because we asked that only lightly loaded exits opt-in,
  114. # and the extra load really isn't that much for large relays.)
  115. EXIT_BANDWIDTH_FRACTION = 1.0
  116. # If a single fallback's bandwidth is too low, it's pointless adding it
  117. # We expect fallbacks to handle an extra 30 kilobytes per second of traffic
  118. # Make sure they can support a hundred times the expected extra load
  119. # (Use 102.4 to make it come out nicely in MB/s)
  120. # We convert this to a consensus weight before applying the filter,
  121. # because all the bandwidth amounts are specified by the relay
  122. MIN_BANDWIDTH = 102.4 * 30.0 * 1024.0
  123. # Clients will time out after 30 seconds trying to download a consensus
  124. # So allow fallback directories half that to deliver a consensus
  125. # The exact download times might change based on the network connection
  126. # running this script, but only by a few seconds
  127. # There is also about a second of python overhead
  128. CONSENSUS_DOWNLOAD_SPEED_MAX = 15.0
  129. # If the relay fails a consensus check, retry the download
  130. # This avoids delisting a relay due to transient network conditions
  131. CONSENSUS_DOWNLOAD_RETRY = True
  132. ## Fallback Weights for Client Selection
  133. # All fallback weights are equal, and set to the value below
  134. # Authorities are weighted 1.0 by default
  135. # Clients use these weights to select fallbacks and authorities at random
  136. # If there are 100 fallbacks and 9 authorities:
  137. # - each fallback is chosen with probability 10.0/(10.0*100 + 1.0*9) ~= 0.99%
  138. # - each authority is chosen with probability 1.0/(10.0*100 + 1.0*9) ~= 0.09%
  139. # A client choosing a bootstrap directory server will choose a fallback for
  140. # 10.0/(10.0*100 + 1.0*9) * 100 = 99.1% of attempts, and an authority for
  141. # 1.0/(10.0*100 + 1.0*9) * 9 = 0.9% of attempts.
  142. # (This disregards the bootstrap schedules, where clients start by choosing
  143. # from fallbacks & authoritites, then later choose from only authorities.)
  144. FALLBACK_OUTPUT_WEIGHT = 10.0
  145. ## Parsing Functions
  146. def parse_ts(t):
  147. return datetime.datetime.strptime(t, "%Y-%m-%d %H:%M:%S")
  148. def remove_bad_chars(raw_string, bad_char_list):
  149. # Remove each character in the bad_char_list
  150. cleansed_string = raw_string
  151. for c in bad_char_list:
  152. cleansed_string = cleansed_string.replace(c, '')
  153. return cleansed_string
  154. def cleanse_unprintable(raw_string):
  155. # Remove all unprintable characters
  156. cleansed_string = ''
  157. for c in raw_string:
  158. if (c in string.ascii_letters or c in string.digits
  159. or c in string.punctuation or c in string.whitespace):
  160. cleansed_string += c
  161. return cleansed_string
  162. def cleanse_whitespace(raw_string):
  163. # Replace all whitespace characters with a space
  164. cleansed_string = raw_string
  165. for c in string.whitespace:
  166. cleansed_string = cleansed_string.replace(c, ' ')
  167. return cleansed_string
  168. def cleanse_c_multiline_comment(raw_string):
  169. cleansed_string = raw_string
  170. # Embedded newlines should be removed by tor/onionoo, but let's be paranoid
  171. cleansed_string = cleanse_whitespace(cleansed_string)
  172. # ContactInfo and Version can be arbitrary binary data
  173. cleansed_string = cleanse_unprintable(cleansed_string)
  174. # Prevent a malicious / unanticipated string from breaking out
  175. # of a C-style multiline comment
  176. # This removes '/*' and '*/' and '//'
  177. bad_char_list = '*/'
  178. # Prevent a malicious string from using C nulls
  179. bad_char_list += '\0'
  180. # Be safer by removing bad characters entirely
  181. cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
  182. # Some compilers may further process the content of comments
  183. # There isn't much we can do to cover every possible case
  184. # But comment-based directives are typically only advisory
  185. return cleansed_string
  186. def cleanse_c_string(raw_string):
  187. cleansed_string = raw_string
  188. # Embedded newlines should be removed by tor/onionoo, but let's be paranoid
  189. cleansed_string = cleanse_whitespace(cleansed_string)
  190. # ContactInfo and Version can be arbitrary binary data
  191. cleansed_string = cleanse_unprintable(cleansed_string)
  192. # Prevent a malicious address/fingerprint string from breaking out
  193. # of a C-style string
  194. bad_char_list = '"'
  195. # Prevent a malicious string from using escapes
  196. bad_char_list += '\\'
  197. # Prevent a malicious string from using C nulls
  198. bad_char_list += '\0'
  199. # Be safer by removing bad characters entirely
  200. cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
  201. # Some compilers may further process the content of strings
  202. # There isn't much we can do to cover every possible case
  203. # But this typically only results in changes to the string data
  204. return cleansed_string
  205. ## OnionOO Source Functions
  206. # a dictionary of source metadata for each onionoo query we've made
  207. fetch_source = {}
  208. # register source metadata for 'what'
  209. # assumes we only retrieve one document for each 'what'
  210. def register_fetch_source(what, url, relays_published, version):
  211. fetch_source[what] = {}
  212. fetch_source[what]['url'] = url
  213. fetch_source[what]['relays_published'] = relays_published
  214. fetch_source[what]['version'] = version
  215. # list each registered source's 'what'
  216. def fetch_source_list():
  217. return sorted(fetch_source.keys())
  218. # given 'what', provide a multiline C comment describing the source
  219. def describe_fetch_source(what):
  220. desc = '/*'
  221. desc += '\n'
  222. desc += 'Onionoo Source: '
  223. desc += cleanse_c_multiline_comment(what)
  224. desc += ' Date: '
  225. desc += cleanse_c_multiline_comment(fetch_source[what]['relays_published'])
  226. desc += ' Version: '
  227. desc += cleanse_c_multiline_comment(fetch_source[what]['version'])
  228. desc += '\n'
  229. desc += 'URL: '
  230. desc += cleanse_c_multiline_comment(fetch_source[what]['url'])
  231. desc += '\n'
  232. desc += '*/'
  233. return desc
  234. ## File Processing Functions
  235. def write_to_file(str, file_name, max_len):
  236. try:
  237. with open(file_name, 'w') as f:
  238. f.write(str[0:max_len])
  239. except EnvironmentError, error:
  240. logging.warning('Writing file %s failed: %d: %s'%
  241. (file_name,
  242. error.errno,
  243. error.strerror)
  244. )
  245. def read_from_file(file_name, max_len):
  246. try:
  247. if os.path.isfile(file_name):
  248. with open(file_name, 'r') as f:
  249. return f.read(max_len)
  250. except EnvironmentError, error:
  251. logging.info('Loading file %s failed: %d: %s'%
  252. (file_name,
  253. error.errno,
  254. error.strerror)
  255. )
  256. return None
  257. def load_possibly_compressed_response_json(response):
  258. if response.info().get('Content-Encoding') == 'gzip':
  259. buf = StringIO.StringIO( response.read() )
  260. f = gzip.GzipFile(fileobj=buf)
  261. return json.load(f)
  262. else:
  263. return json.load(response)
  264. def load_json_from_file(json_file_name):
  265. # An exception here may be resolved by deleting the .last_modified
  266. # and .json files, and re-running the script
  267. try:
  268. with open(json_file_name, 'r') as f:
  269. return json.load(f)
  270. except EnvironmentError, error:
  271. raise Exception('Reading not-modified json file %s failed: %d: %s'%
  272. (json_file_name,
  273. error.errno,
  274. error.strerror)
  275. )
  276. ## OnionOO Functions
  277. def datestr_to_datetime(datestr):
  278. # Parse datetimes like: Fri, 02 Oct 2015 13:34:14 GMT
  279. if datestr is not None:
  280. dt = dateutil.parser.parse(datestr)
  281. else:
  282. # Never modified - use start of epoch
  283. dt = datetime.datetime.utcfromtimestamp(0)
  284. # strip any timezone out (in case they're supported in future)
  285. dt = dt.replace(tzinfo=None)
  286. return dt
  287. def onionoo_fetch(what, **kwargs):
  288. params = kwargs
  289. params['type'] = 'relay'
  290. #params['limit'] = 10
  291. params['first_seen_days'] = '%d-'%(ADDRESS_AND_PORT_STABLE_DAYS,)
  292. params['last_seen_days'] = '-7'
  293. params['flag'] = 'V2Dir'
  294. url = ONIONOO + what + '?' + urllib.urlencode(params)
  295. # Unfortunately, the URL is too long for some OS filenames,
  296. # but we still don't want to get files from different URLs mixed up
  297. base_file_name = what + '-' + hashlib.sha1(url).hexdigest()
  298. full_url_file_name = base_file_name + '.full_url'
  299. MAX_FULL_URL_LENGTH = 1024
  300. last_modified_file_name = base_file_name + '.last_modified'
  301. MAX_LAST_MODIFIED_LENGTH = 64
  302. json_file_name = base_file_name + '.json'
  303. if LOCAL_FILES_ONLY:
  304. # Read from the local file, don't write to anything
  305. response_json = load_json_from_file(json_file_name)
  306. else:
  307. # store the full URL to a file for debugging
  308. # no need to compare as long as you trust SHA-1
  309. write_to_file(url, full_url_file_name, MAX_FULL_URL_LENGTH)
  310. request = urllib2.Request(url)
  311. request.add_header('Accept-encoding', 'gzip')
  312. # load the last modified date from the file, if it exists
  313. last_mod_date = read_from_file(last_modified_file_name,
  314. MAX_LAST_MODIFIED_LENGTH)
  315. if last_mod_date is not None:
  316. request.add_header('If-modified-since', last_mod_date)
  317. # Parse last modified date
  318. last_mod = datestr_to_datetime(last_mod_date)
  319. # Not Modified and still recent enough to be useful
  320. # Onionoo / Globe used to use 6 hours, but we can afford a day
  321. required_freshness = datetime.datetime.utcnow()
  322. # strip any timezone out (to match dateutil.parser)
  323. required_freshness = required_freshness.replace(tzinfo=None)
  324. required_freshness -= datetime.timedelta(hours=24)
  325. # Make the OnionOO request
  326. response_code = 0
  327. try:
  328. response = urllib2.urlopen(request)
  329. response_code = response.getcode()
  330. except urllib2.HTTPError, error:
  331. response_code = error.code
  332. if response_code == 304: # not modified
  333. pass
  334. else:
  335. raise Exception("Could not get " + url + ": "
  336. + str(error.code) + ": " + error.reason)
  337. if response_code == 200: # OK
  338. last_mod = datestr_to_datetime(response.info().get('Last-Modified'))
  339. # Check for freshness
  340. if last_mod < required_freshness:
  341. if last_mod_date is not None:
  342. # This check sometimes fails transiently, retry the script if it does
  343. date_message = "Outdated data: last updated " + last_mod_date
  344. else:
  345. date_message = "No data: never downloaded "
  346. raise Exception(date_message + " from " + url)
  347. # Process the data
  348. if response_code == 200: # OK
  349. response_json = load_possibly_compressed_response_json(response)
  350. with open(json_file_name, 'w') as f:
  351. # use the most compact json representation to save space
  352. json.dump(response_json, f, separators=(',',':'))
  353. # store the last modified date in its own file
  354. if response.info().get('Last-modified') is not None:
  355. write_to_file(response.info().get('Last-Modified'),
  356. last_modified_file_name,
  357. MAX_LAST_MODIFIED_LENGTH)
  358. elif response_code == 304: # Not Modified
  359. response_json = load_json_from_file(json_file_name)
  360. else: # Unexpected HTTP response code not covered in the HTTPError above
  361. raise Exception("Unexpected HTTP response code to " + url + ": "
  362. + str(response_code))
  363. register_fetch_source(what,
  364. url,
  365. response_json['relays_published'],
  366. response_json['version'])
  367. return response_json
  368. def fetch(what, **kwargs):
  369. #x = onionoo_fetch(what, **kwargs)
  370. # don't use sort_keys, as the order of or_addresses is significant
  371. #print json.dumps(x, indent=4, separators=(',', ': '))
  372. #sys.exit(0)
  373. return onionoo_fetch(what, **kwargs)
  374. ## Fallback Candidate Class
  375. class Candidate(object):
  376. CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.utcnow()
  377. - datetime.timedelta(ADDRESS_AND_PORT_STABLE_DAYS))
  378. def __init__(self, details):
  379. for f in ['fingerprint', 'nickname', 'last_changed_address_or_port',
  380. 'consensus_weight', 'or_addresses', 'dir_address']:
  381. if not f in details: raise Exception("Document has no %s field."%(f,))
  382. if not 'contact' in details:
  383. details['contact'] = None
  384. if not 'flags' in details or details['flags'] is None:
  385. details['flags'] = []
  386. if (not 'advertised_bandwidth' in details
  387. or details['advertised_bandwidth'] is None):
  388. # relays without advertised bandwdith have it calculated from their
  389. # consensus weight
  390. details['advertised_bandwidth'] = 0
  391. details['last_changed_address_or_port'] = parse_ts(
  392. details['last_changed_address_or_port'])
  393. self._data = details
  394. self._stable_sort_or_addresses()
  395. self._fpr = self._data['fingerprint']
  396. self._running = self._guard = self._v2dir = 0.
  397. self._split_dirport()
  398. self._compute_orport()
  399. if self.orport is None:
  400. raise Exception("Failed to get an orport for %s."%(self._fpr,))
  401. self._compute_ipv6addr()
  402. if self.ipv6addr is None:
  403. logging.debug("Failed to get an ipv6 address for %s."%(self._fpr,))
  404. def _stable_sort_or_addresses(self):
  405. # replace self._data['or_addresses'] with a stable ordering,
  406. # sorting the secondary addresses in string order
  407. # leave the received order in self._data['or_addresses_raw']
  408. self._data['or_addresses_raw'] = self._data['or_addresses']
  409. or_address_primary = self._data['or_addresses'][:1]
  410. # subsequent entries in the or_addresses array are in an arbitrary order
  411. # so we stabilise the addresses by sorting them in string order
  412. or_addresses_secondaries_stable = sorted(self._data['or_addresses'][1:])
  413. or_addresses_stable = or_address_primary + or_addresses_secondaries_stable
  414. self._data['or_addresses'] = or_addresses_stable
  415. def get_fingerprint(self):
  416. return self._fpr
  417. # is_valid_ipv[46]_address by gsathya, karsten, 2013
  418. @staticmethod
  419. def is_valid_ipv4_address(address):
  420. if not isinstance(address, (str, unicode)):
  421. return False
  422. # check if there are four period separated values
  423. if address.count(".") != 3:
  424. return False
  425. # checks that each value in the octet are decimal values between 0-255
  426. for entry in address.split("."):
  427. if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
  428. return False
  429. elif entry[0] == "0" and len(entry) > 1:
  430. return False # leading zeros, for instance in "1.2.3.001"
  431. return True
  432. @staticmethod
  433. def is_valid_ipv6_address(address):
  434. if not isinstance(address, (str, unicode)):
  435. return False
  436. # remove brackets
  437. address = address[1:-1]
  438. # addresses are made up of eight colon separated groups of four hex digits
  439. # with leading zeros being optional
  440. # https://en.wikipedia.org/wiki/IPv6#Address_format
  441. colon_count = address.count(":")
  442. if colon_count > 7:
  443. return False # too many groups
  444. elif colon_count != 7 and not "::" in address:
  445. return False # not enough groups and none are collapsed
  446. elif address.count("::") > 1 or ":::" in address:
  447. return False # multiple groupings of zeros can't be collapsed
  448. found_ipv4_on_previous_entry = False
  449. for entry in address.split(":"):
  450. # If an IPv6 address has an embedded IPv4 address,
  451. # it must be the last entry
  452. if found_ipv4_on_previous_entry:
  453. return False
  454. if not re.match("^[0-9a-fA-f]{0,4}$", entry):
  455. if not Candidate.is_valid_ipv4_address(entry):
  456. return False
  457. else:
  458. found_ipv4_on_previous_entry = True
  459. return True
  460. def _split_dirport(self):
  461. # Split the dir_address into dirip and dirport
  462. (self.dirip, _dirport) = self._data['dir_address'].split(':', 2)
  463. self.dirport = int(_dirport)
  464. def _compute_orport(self):
  465. # Choose the first ORPort that's on the same IPv4 address as the DirPort.
  466. # In rare circumstances, this might not be the primary ORPort address.
  467. # However, _stable_sort_or_addresses() ensures we choose the same one
  468. # every time, even if onionoo changes the order of the secondaries.
  469. self._split_dirport()
  470. self.orport = None
  471. for i in self._data['or_addresses']:
  472. if i != self._data['or_addresses'][0]:
  473. logging.debug('Secondary IPv4 Address Used for %s: %s'%(self._fpr, i))
  474. (ipaddr, port) = i.rsplit(':', 1)
  475. if (ipaddr == self.dirip) and Candidate.is_valid_ipv4_address(ipaddr):
  476. self.orport = int(port)
  477. return
  478. def _compute_ipv6addr(self):
  479. # Choose the first IPv6 address that uses the same port as the ORPort
  480. # Or, choose the first IPv6 address in the list
  481. # _stable_sort_or_addresses() ensures we choose the same IPv6 address
  482. # every time, even if onionoo changes the order of the secondaries.
  483. self.ipv6addr = None
  484. self.ipv6orport = None
  485. # Choose the first IPv6 address that uses the same port as the ORPort
  486. for i in self._data['or_addresses']:
  487. (ipaddr, port) = i.rsplit(':', 1)
  488. if (port == self.orport) and Candidate.is_valid_ipv6_address(ipaddr):
  489. self.ipv6addr = ipaddr
  490. self.ipv6orport = port
  491. return
  492. # Choose the first IPv6 address in the list
  493. for i in self._data['or_addresses']:
  494. (ipaddr, port) = i.rsplit(':', 1)
  495. if Candidate.is_valid_ipv6_address(ipaddr):
  496. self.ipv6addr = ipaddr
  497. self.ipv6orport = port
  498. return
  499. @staticmethod
  500. def _extract_generic_history(history, which='unknown'):
  501. # given a tree like this:
  502. # {
  503. # "1_month": {
  504. # "count": 187,
  505. # "factor": 0.001001001001001001,
  506. # "first": "2015-02-27 06:00:00",
  507. # "interval": 14400,
  508. # "last": "2015-03-30 06:00:00",
  509. # "values": [
  510. # 999,
  511. # 999
  512. # ]
  513. # },
  514. # "1_week": {
  515. # "count": 169,
  516. # "factor": 0.001001001001001001,
  517. # "first": "2015-03-23 07:30:00",
  518. # "interval": 3600,
  519. # "last": "2015-03-30 07:30:00",
  520. # "values": [ ...]
  521. # },
  522. # "1_year": {
  523. # "count": 177,
  524. # "factor": 0.001001001001001001,
  525. # "first": "2014-04-11 00:00:00",
  526. # "interval": 172800,
  527. # "last": "2015-03-29 00:00:00",
  528. # "values": [ ...]
  529. # },
  530. # "3_months": {
  531. # "count": 185,
  532. # "factor": 0.001001001001001001,
  533. # "first": "2014-12-28 06:00:00",
  534. # "interval": 43200,
  535. # "last": "2015-03-30 06:00:00",
  536. # "values": [ ...]
  537. # }
  538. # },
  539. # extract exactly one piece of data per time interval,
  540. # using smaller intervals where available.
  541. #
  542. # returns list of (age, length, value) dictionaries.
  543. generic_history = []
  544. periods = history.keys()
  545. periods.sort(key = lambda x: history[x]['interval'])
  546. now = datetime.datetime.utcnow()
  547. newest = now
  548. for p in periods:
  549. h = history[p]
  550. interval = datetime.timedelta(seconds = h['interval'])
  551. this_ts = parse_ts(h['last'])
  552. if (len(h['values']) != h['count']):
  553. logging.warn('Inconsistent value count in %s document for %s'
  554. %(p, which))
  555. for v in reversed(h['values']):
  556. if (this_ts <= newest):
  557. agt1 = now - this_ts
  558. agt2 = interval
  559. agetmp1 = (agt1.microseconds + (agt1.seconds + agt1.days * 24 * 3600)
  560. * 10**6) / 10**6
  561. agetmp2 = (agt2.microseconds + (agt2.seconds + agt2.days * 24 * 3600)
  562. * 10**6) / 10**6
  563. generic_history.append(
  564. { 'age': agetmp1,
  565. 'length': agetmp2,
  566. 'value': v
  567. })
  568. newest = this_ts
  569. this_ts -= interval
  570. if (this_ts + interval != parse_ts(h['first'])):
  571. logging.warn('Inconsistent time information in %s document for %s'
  572. %(p, which))
  573. #print json.dumps(generic_history, sort_keys=True,
  574. # indent=4, separators=(',', ': '))
  575. return generic_history
  576. @staticmethod
  577. def _avg_generic_history(generic_history):
  578. a = []
  579. for i in generic_history:
  580. if i['age'] > (ADDRESS_AND_PORT_STABLE_DAYS * 24 * 3600):
  581. continue
  582. if (i['length'] is not None
  583. and i['age'] is not None
  584. and i['value'] is not None):
  585. w = i['length'] * math.pow(AGE_ALPHA, i['age']/(3600*24))
  586. a.append( (i['value'] * w, w) )
  587. sv = math.fsum(map(lambda x: x[0], a))
  588. sw = math.fsum(map(lambda x: x[1], a))
  589. if sw == 0.0:
  590. svw = 0.0
  591. else:
  592. svw = sv/sw
  593. return svw
  594. def _add_generic_history(self, history):
  595. periods = r['read_history'].keys()
  596. periods.sort(key = lambda x: r['read_history'][x]['interval'] )
  597. print periods
  598. def add_running_history(self, history):
  599. pass
  600. def add_uptime(self, uptime):
  601. logging.debug('Adding uptime %s.'%(self._fpr,))
  602. # flags we care about: Running, V2Dir, Guard
  603. if not 'flags' in uptime:
  604. logging.debug('No flags in document for %s.'%(self._fpr,))
  605. return
  606. for f in ['Running', 'Guard', 'V2Dir']:
  607. if not f in uptime['flags']:
  608. logging.debug('No %s in flags for %s.'%(f, self._fpr,))
  609. return
  610. running = self._extract_generic_history(uptime['flags']['Running'],
  611. '%s-Running'%(self._fpr))
  612. guard = self._extract_generic_history(uptime['flags']['Guard'],
  613. '%s-Guard'%(self._fpr))
  614. v2dir = self._extract_generic_history(uptime['flags']['V2Dir'],
  615. '%s-V2Dir'%(self._fpr))
  616. if 'BadExit' in uptime['flags']:
  617. badexit = self._extract_generic_history(uptime['flags']['BadExit'],
  618. '%s-BadExit'%(self._fpr))
  619. self._running = self._avg_generic_history(running) / ONIONOO_SCALE_ONE
  620. self._guard = self._avg_generic_history(guard) / ONIONOO_SCALE_ONE
  621. self._v2dir = self._avg_generic_history(v2dir) / ONIONOO_SCALE_ONE
  622. self._badexit = None
  623. if 'BadExit' in uptime['flags']:
  624. self._badexit = self._avg_generic_history(badexit) / ONIONOO_SCALE_ONE
  625. def is_candidate(self):
  626. must_be_running_now = (PERFORM_IPV4_DIRPORT_CHECKS
  627. or PERFORM_IPV6_DIRPORT_CHECKS)
  628. if (must_be_running_now and not self.is_running()):
  629. logging.info('%s not a candidate: not running now, unable to check ' +
  630. 'DirPort consensus download', self._fpr)
  631. return False
  632. if (self._data['last_changed_address_or_port'] >
  633. self.CUTOFF_ADDRESS_AND_PORT_STABLE):
  634. logging.info('%s not a candidate: changed address/port recently (%s)',
  635. self._fpr, self._data['last_changed_address_or_port'])
  636. return False
  637. if self._running < CUTOFF_RUNNING:
  638. logging.info('%s not a candidate: running avg too low (%lf)',
  639. self._fpr, self._running)
  640. return False
  641. if self._v2dir < CUTOFF_V2DIR:
  642. logging.info('%s not a candidate: v2dir avg too low (%lf)',
  643. self._fpr, self._v2dir)
  644. return False
  645. if self._badexit is not None and self._badexit > PERMITTED_BADEXIT:
  646. logging.info('%s not a candidate: badexit avg too high (%lf)',
  647. self._fpr, self._badexit)
  648. return False
  649. # if the relay doesn't report a version, also exclude the relay
  650. if (not self._data.has_key('recommended_version')
  651. or not self._data['recommended_version']):
  652. logging.info('%s not a candidate: version not recommended', self._fpr)
  653. return False
  654. if self._guard < CUTOFF_GUARD:
  655. logging.info('%s not a candidate: guard avg too low (%lf)',
  656. self._fpr, self._guard)
  657. return False
  658. if (not self._data.has_key('consensus_weight')
  659. or self._data['consensus_weight'] < 1):
  660. logging.info('%s not a candidate: consensus weight invalid', self._fpr)
  661. return False
  662. return True
  663. def is_in_whitelist(self, relaylist):
  664. """ A fallback matches if each key in the whitelist line matches:
  665. ipv4
  666. dirport
  667. orport
  668. id
  669. ipv6 address and port (if present)
  670. If the fallback has an ipv6 key, the whitelist line must also have
  671. it, and vice versa, otherwise they don't match. """
  672. for entry in relaylist:
  673. if entry['id'] != self._fpr:
  674. # can't log here, every relay's fingerprint is compared to the entry
  675. continue
  676. if entry['ipv4'] != self.dirip:
  677. logging.info('%s is not in the whitelist: fingerprint matches, but ' +
  678. 'IPv4 (%s) does not match entry IPv4 (%s)',
  679. self._fpr, self.dirip, entry['ipv4'])
  680. continue
  681. if int(entry['dirport']) != self.dirport:
  682. logging.info('%s is not in the whitelist: fingerprint matches, but ' +
  683. 'DirPort (%d) does not match entry DirPort (%d)',
  684. self._fpr, self.dirport, int(entry['dirport']))
  685. continue
  686. if int(entry['orport']) != self.orport:
  687. logging.info('%s is not in the whitelist: fingerprint matches, but ' +
  688. 'ORPort (%d) does not match entry ORPort (%d)',
  689. self._fpr, self.orport, int(entry['orport']))
  690. continue
  691. has_ipv6 = self.ipv6addr is not None and self.ipv6orport is not None
  692. if (entry.has_key('ipv6') and has_ipv6):
  693. ipv6 = self.ipv6addr + ':' + self.ipv6orport
  694. # if both entry and fallback have an ipv6 address, compare them
  695. if entry['ipv6'] != ipv6:
  696. logging.info('%s is not in the whitelist: fingerprint matches, ' +
  697. 'but IPv6 (%s) does not match entry IPv6 (%s)',
  698. self._fpr, ipv6, entry['ipv6'])
  699. continue
  700. # if the fallback has an IPv6 address but the whitelist entry
  701. # doesn't, or vice versa, the whitelist entry doesn't match
  702. elif entry.has_key('ipv6') and not has_ipv6:
  703. logging.info('%s is not in the whitelist: fingerprint matches, but ' +
  704. 'it has no IPv6, and entry has IPv6 (%s)', self._fpr,
  705. entry['ipv6'])
  706. logging.warning('%s excluded: has it lost its former IPv6 address %s?',
  707. self._fpr, entry['ipv6'])
  708. continue
  709. elif not entry.has_key('ipv6') and has_ipv6:
  710. logging.info('%s is not in the whitelist: fingerprint matches, but ' +
  711. 'it has IPv6 (%s), and entry has no IPv6', self._fpr,
  712. ipv6)
  713. logging.warning('%s excluded: has it gained an IPv6 address %s?',
  714. self._fpr, ipv6)
  715. continue
  716. return True
  717. return False
  718. def is_in_blacklist(self, relaylist):
  719. """ A fallback matches a blacklist line if a sufficiently specific group
  720. of attributes matches:
  721. ipv4 & dirport
  722. ipv4 & orport
  723. id
  724. ipv6 & dirport
  725. ipv6 & ipv6 orport
  726. If the fallback and the blacklist line both have an ipv6 key,
  727. their values will be compared, otherwise, they will be ignored.
  728. If there is no dirport and no orport, the entry matches all relays on
  729. that ip. """
  730. for entry in relaylist:
  731. for key in entry:
  732. value = entry[key]
  733. if key == 'id' and value == self._fpr:
  734. logging.info('%s is in the blacklist: fingerprint matches',
  735. self._fpr)
  736. return True
  737. if key == 'ipv4' and value == self.dirip:
  738. # if the dirport is present, check it too
  739. if entry.has_key('dirport'):
  740. if int(entry['dirport']) == self.dirport:
  741. logging.info('%s is in the blacklist: IPv4 (%s) and ' +
  742. 'DirPort (%d) match', self._fpr, self.dirip,
  743. self.dirport)
  744. return True
  745. # if the orport is present, check it too
  746. elif entry.has_key('orport'):
  747. if int(entry['orport']) == self.orport:
  748. logging.info('%s is in the blacklist: IPv4 (%s) and ' +
  749. 'ORPort (%d) match', self._fpr, self.dirip,
  750. self.orport)
  751. return True
  752. else:
  753. logging.info('%s is in the blacklist: IPv4 (%s) matches, and ' +
  754. 'entry has no DirPort or ORPort', self._fpr,
  755. self.dirip)
  756. return True
  757. has_ipv6 = self.ipv6addr is not None and self.ipv6orport is not None
  758. ipv6 = (self.ipv6addr + ':' + self.ipv6orport) if has_ipv6 else None
  759. if (key == 'ipv6' and has_ipv6):
  760. # if both entry and fallback have an ipv6 address, compare them,
  761. # otherwise, disregard ipv6 addresses
  762. if value == ipv6:
  763. # if the dirport is present, check it too
  764. if entry.has_key('dirport'):
  765. if int(entry['dirport']) == self.dirport:
  766. logging.info('%s is in the blacklist: IPv6 (%s) and ' +
  767. 'DirPort (%d) match', self._fpr, ipv6,
  768. self.dirport)
  769. return True
  770. # we've already checked the ORPort, it's part of entry['ipv6']
  771. else:
  772. logging.info('%s is in the blacklist: IPv6 (%s) matches, and' +
  773. 'entry has no DirPort', self._fpr, ipv6)
  774. return True
  775. elif (key == 'ipv6' or has_ipv6):
  776. # only log if the fingerprint matches but the IPv6 doesn't
  777. if entry.has_key('id') and entry['id'] == self._fpr:
  778. logging.info('%s skipping IPv6 blacklist comparison: relay ' +
  779. 'has%s IPv6%s, but entry has%s IPv6%s', self._fpr,
  780. '' if has_ipv6 else ' no',
  781. (' (' + ipv6 + ')') if has_ipv6 else '',
  782. '' if key == 'ipv6' else ' no',
  783. (' (' + value + ')') if key == 'ipv6' else '')
  784. logging.warning('Has %s %s IPv6 address %s?', self._fpr,
  785. 'gained an' if has_ipv6 else 'lost its former',
  786. ipv6 if has_ipv6 else value)
  787. return False
  788. def cw_to_bw_factor(self):
  789. # any relays with a missing or zero consensus weight are not candidates
  790. # any relays with a missing advertised bandwidth have it set to zero
  791. return self._data['advertised_bandwidth'] / self._data['consensus_weight']
  792. # since advertised_bandwidth is reported by the relay, it can be gamed
  793. # to avoid this, use the median consensus weight to bandwidth factor to
  794. # estimate this relay's measured bandwidth, and make that the upper limit
  795. def measured_bandwidth(self, median_cw_to_bw_factor):
  796. cw_to_bw= median_cw_to_bw_factor
  797. # Reduce exit bandwidth to make sure we're not overloading them
  798. if self.is_exit():
  799. cw_to_bw *= EXIT_BANDWIDTH_FRACTION
  800. measured_bandwidth = self._data['consensus_weight'] * cw_to_bw
  801. if self._data['advertised_bandwidth'] != 0:
  802. # limit advertised bandwidth (if available) to measured bandwidth
  803. return min(measured_bandwidth, self._data['advertised_bandwidth'])
  804. else:
  805. return measured_bandwidth
  806. def set_measured_bandwidth(self, median_cw_to_bw_factor):
  807. self._data['measured_bandwidth'] = self.measured_bandwidth(
  808. median_cw_to_bw_factor)
  809. def is_exit(self):
  810. return 'Exit' in self._data['flags']
  811. def is_guard(self):
  812. return 'Guard' in self._data['flags']
  813. def is_running(self):
  814. return 'Running' in self._data['flags']
  815. # report how long it takes to download a consensus from dirip:dirport
  816. @staticmethod
  817. def fallback_consensus_download_speed(dirip, dirport, nickname, max_time):
  818. download_failed = False
  819. downloader = DescriptorDownloader()
  820. start = datetime.datetime.utcnow()
  821. # some directory mirrors respond to requests in ways that hang python
  822. # sockets, which is why we long this line here
  823. logging.info('Initiating consensus download from %s (%s:%d).', nickname,
  824. dirip, dirport)
  825. # there appears to be about 1 second of overhead when comparing stem's
  826. # internal trace time and the elapsed time calculated here
  827. TIMEOUT_SLOP = 1.0
  828. try:
  829. downloader.get_consensus(endpoints = [(dirip, dirport)],
  830. timeout = (max_time + TIMEOUT_SLOP),
  831. validate = True,
  832. retries = 0,
  833. fall_back_to_authority = False).run()
  834. except Exception, stem_error:
  835. logging.debug('Unable to retrieve a consensus from %s: %s', nickname,
  836. stem_error)
  837. status = 'error: "%s"' % (stem_error)
  838. level = logging.WARNING
  839. download_failed = True
  840. elapsed = (datetime.datetime.utcnow() - start).total_seconds()
  841. if elapsed > max_time:
  842. status = 'too slow'
  843. level = logging.WARNING
  844. download_failed = True
  845. else:
  846. status = 'ok'
  847. level = logging.DEBUG
  848. logging.log(level, 'Consensus download: %0.1fs %s from %s (%s:%d), ' +
  849. 'max download time %0.1fs.', elapsed, status, nickname,
  850. dirip, dirport, max_time)
  851. return download_failed
  852. # does this fallback download the consensus fast enough?
  853. def check_fallback_download_consensus(self):
  854. # include the relay if we're not doing a check, or we can't check (IPv6)
  855. ipv4_failed = False
  856. ipv6_failed = False
  857. if PERFORM_IPV4_DIRPORT_CHECKS:
  858. ipv4_failed = Candidate.fallback_consensus_download_speed(self.dirip,
  859. self.dirport,
  860. self._data['nickname'],
  861. CONSENSUS_DOWNLOAD_SPEED_MAX)
  862. if self.ipv6addr is not None and PERFORM_IPV6_DIRPORT_CHECKS:
  863. # Clients assume the IPv6 DirPort is the same as the IPv4 DirPort
  864. ipv6_failed = Candidate.fallback_consensus_download_speed(self.ipv6addr,
  865. self.dirport,
  866. self._data['nickname'],
  867. CONSENSUS_DOWNLOAD_SPEED_MAX)
  868. return ((not ipv4_failed) and (not ipv6_failed))
  869. # if this fallback has not passed a download check, try it again,
  870. # and record the result, available in get_fallback_download_consensus
  871. def try_fallback_download_consensus(self):
  872. if not self.get_fallback_download_consensus():
  873. self._data['download_check'] = self.check_fallback_download_consensus()
  874. # did this fallback pass the download check?
  875. def get_fallback_download_consensus(self):
  876. # if we're not performing checks, return True
  877. if not PERFORM_IPV4_DIRPORT_CHECKS and not PERFORM_IPV6_DIRPORT_CHECKS:
  878. return True
  879. # if we are performing checks, but haven't done one, return False
  880. if not self._data.has_key('download_check'):
  881. return False
  882. return self._data['download_check']
  883. # output an optional header comment and info for this fallback
  884. # try_fallback_download_consensus before calling this
  885. def fallbackdir_line(self, fallbacks, prefilter_fallbacks):
  886. s = ''
  887. if OUTPUT_COMMENTS:
  888. s += self.fallbackdir_comment(fallbacks, prefilter_fallbacks)
  889. # if the download speed is ok, output a C string
  890. # if it's not, but we OUTPUT_COMMENTS, output a commented-out C string
  891. if self.get_fallback_download_consensus() or OUTPUT_COMMENTS:
  892. s += self.fallbackdir_info(self.get_fallback_download_consensus())
  893. return s
  894. # output a header comment for this fallback
  895. def fallbackdir_comment(self, fallbacks, prefilter_fallbacks):
  896. # /*
  897. # nickname
  898. # flags
  899. # [contact]
  900. # [identical contact counts]
  901. # */
  902. # Multiline C comment
  903. s = '/*'
  904. s += '\n'
  905. s += cleanse_c_multiline_comment(self._data['nickname'])
  906. s += '\n'
  907. s += 'Flags: '
  908. s += cleanse_c_multiline_comment(' '.join(sorted(self._data['flags'])))
  909. s += '\n'
  910. if self._data['contact'] is not None:
  911. s += cleanse_c_multiline_comment(self._data['contact'])
  912. if CONTACT_COUNT or CONTACT_BLACKLIST_COUNT:
  913. fallback_count = len([f for f in fallbacks
  914. if f._data['contact'] == self._data['contact']])
  915. if fallback_count > 1:
  916. s += '\n'
  917. s += '%d identical contacts listed' % (fallback_count)
  918. if CONTACT_BLACKLIST_COUNT:
  919. prefilter_count = len([f for f in prefilter_fallbacks
  920. if f._data['contact'] == self._data['contact']])
  921. filter_count = prefilter_count - fallback_count
  922. if filter_count > 0:
  923. if fallback_count > 1:
  924. s += ' '
  925. else:
  926. s += '\n'
  927. s += '%d blacklisted' % (filter_count)
  928. s += '\n'
  929. s += '*/'
  930. s += '\n'
  931. # output the fallback info C string for this fallback
  932. # this is the text that would go after FallbackDir in a torrc
  933. # if this relay failed the download test and we OUTPUT_COMMENTS,
  934. # comment-out the returned string
  935. def fallbackdir_info(self, dl_speed_ok):
  936. # "address:dirport orport=port id=fingerprint"
  937. # "[ipv6=addr:orport]"
  938. # "weight=FALLBACK_OUTPUT_WEIGHT",
  939. #
  940. # Do we want a C string, or a commented-out string?
  941. c_string = dl_speed_ok
  942. comment_string = not dl_speed_ok and OUTPUT_COMMENTS
  943. # If we don't want either kind of string, bail
  944. if not c_string and not comment_string:
  945. return ''
  946. s = ''
  947. # Comment out the fallback directory entry if it's too slow
  948. # See the debug output for which address and port is failing
  949. if comment_string:
  950. s += '/* Consensus download failed or was too slow:\n'
  951. # Multi-Line C string with trailing comma (part of a string list)
  952. # This makes it easier to diff the file, and remove IPv6 lines using grep
  953. # Integers don't need escaping
  954. s += '"%s orport=%d id=%s"'%(
  955. cleanse_c_string(self._data['dir_address']),
  956. self.orport,
  957. cleanse_c_string(self._fpr))
  958. s += '\n'
  959. if self.ipv6addr is not None:
  960. s += '" ipv6=%s:%s"'%(
  961. cleanse_c_string(self.ipv6addr), cleanse_c_string(self.ipv6orport))
  962. s += '\n'
  963. s += '" weight=%d",'%(FALLBACK_OUTPUT_WEIGHT)
  964. if comment_string:
  965. s += '\n'
  966. s += '*/'
  967. return s
  968. ## Fallback Candidate List Class
  969. class CandidateList(dict):
  970. def __init__(self):
  971. pass
  972. def _add_relay(self, details):
  973. if not 'dir_address' in details: return
  974. c = Candidate(details)
  975. self[ c.get_fingerprint() ] = c
  976. def _add_uptime(self, uptime):
  977. try:
  978. fpr = uptime['fingerprint']
  979. except KeyError:
  980. raise Exception("Document has no fingerprint field.")
  981. try:
  982. c = self[fpr]
  983. except KeyError:
  984. logging.debug('Got unknown relay %s in uptime document.'%(fpr,))
  985. return
  986. c.add_uptime(uptime)
  987. def _add_details(self):
  988. logging.debug('Loading details document.')
  989. d = fetch('details',
  990. fields=('fingerprint,nickname,contact,last_changed_address_or_port,' +
  991. 'consensus_weight,advertised_bandwidth,or_addresses,' +
  992. 'dir_address,recommended_version,flags'))
  993. logging.debug('Loading details document done.')
  994. if not 'relays' in d: raise Exception("No relays found in document.")
  995. for r in d['relays']: self._add_relay(r)
  996. def _add_uptimes(self):
  997. logging.debug('Loading uptime document.')
  998. d = fetch('uptime')
  999. logging.debug('Loading uptime document done.')
  1000. if not 'relays' in d: raise Exception("No relays found in document.")
  1001. for r in d['relays']: self._add_uptime(r)
  1002. def add_relays(self):
  1003. self._add_details()
  1004. self._add_uptimes()
  1005. def count_guards(self):
  1006. guard_count = 0
  1007. for fpr in self.keys():
  1008. if self[fpr].is_guard():
  1009. guard_count += 1
  1010. return guard_count
  1011. # Find fallbacks that fit the uptime, stability, and flags criteria,
  1012. # and make an array of them in self.fallbacks
  1013. def compute_fallbacks(self):
  1014. self.fallbacks = map(lambda x: self[x],
  1015. filter(lambda x: self[x].is_candidate(),
  1016. self.keys()))
  1017. # sort fallbacks by their consensus weight to advertised bandwidth factor,
  1018. # lowest to highest
  1019. # used to find the median cw_to_bw_factor()
  1020. def sort_fallbacks_by_cw_to_bw_factor(self):
  1021. self.fallbacks.sort(key=lambda f: f.cw_to_bw_factor(), self.fallbacks)
  1022. # sort fallbacks by their measured bandwidth, highest to lowest
  1023. # calculate_measured_bandwidth before calling this
  1024. # this is useful for reviewing candidates in priority order
  1025. def sort_fallbacks_by_measured_bandwidth(self):
  1026. self.fallbacks.sort(key=lambda f: f._data['measured_bandwidth'],
  1027. self.fallbacks, reverse=True)
  1028. # sort fallbacks by their fingerprint, lowest to highest
  1029. # this is useful for stable diffs of fallback lists
  1030. def sort_fallbacks_by_fingerprint(self):
  1031. self.fallbacks.sort(key=lambda f: self[f]._fpr, self.fallbacks)
  1032. @staticmethod
  1033. def load_relaylist(file_name):
  1034. """ Read each line in the file, and parse it like a FallbackDir line:
  1035. an IPv4 address and optional port:
  1036. <IPv4 address>:<port>
  1037. which are parsed into dictionary entries:
  1038. ipv4=<IPv4 address>
  1039. dirport=<port>
  1040. followed by a series of key=value entries:
  1041. orport=<port>
  1042. id=<fingerprint>
  1043. ipv6=<IPv6 address>:<IPv6 orport>
  1044. each line's key/value pairs are placed in a dictonary,
  1045. (of string -> string key/value pairs),
  1046. and these dictionaries are placed in an array.
  1047. comments start with # and are ignored """
  1048. relaylist = []
  1049. file_data = read_from_file(file_name, MAX_LIST_FILE_SIZE)
  1050. if file_data is None:
  1051. return relaylist
  1052. for line in file_data.split('\n'):
  1053. relay_entry = {}
  1054. # ignore comments
  1055. line_comment_split = line.split('#')
  1056. line = line_comment_split[0]
  1057. # cleanup whitespace
  1058. line = cleanse_whitespace(line)
  1059. line = line.strip()
  1060. if len(line) == 0:
  1061. continue
  1062. for item in line.split(' '):
  1063. item = item.strip()
  1064. if len(item) == 0:
  1065. continue
  1066. key_value_split = item.split('=')
  1067. kvl = len(key_value_split)
  1068. if kvl < 1 or kvl > 2:
  1069. print '#error Bad %s item: %s, format is key=value.'%(
  1070. file_name, item)
  1071. if kvl == 1:
  1072. # assume that entries without a key are the ipv4 address,
  1073. # perhaps with a dirport
  1074. ipv4_maybe_dirport = key_value_split[0]
  1075. ipv4_maybe_dirport_split = ipv4_maybe_dirport.split(':')
  1076. dirl = len(ipv4_maybe_dirport_split)
  1077. if dirl < 1 or dirl > 2:
  1078. print '#error Bad %s IPv4 item: %s, format is ipv4:port.'%(
  1079. file_name, item)
  1080. if dirl >= 1:
  1081. relay_entry['ipv4'] = ipv4_maybe_dirport_split[0]
  1082. if dirl == 2:
  1083. relay_entry['dirport'] = ipv4_maybe_dirport_split[1]
  1084. elif kvl == 2:
  1085. relay_entry[key_value_split[0]] = key_value_split[1]
  1086. relaylist.append(relay_entry)
  1087. return relaylist
  1088. # apply the fallback whitelist and blacklist
  1089. def apply_filter_lists(self):
  1090. excluded_count = 0
  1091. logging.debug('Applying whitelist and blacklist.')
  1092. # parse the whitelist and blacklist
  1093. whitelist = self.load_relaylist(WHITELIST_FILE_NAME)
  1094. blacklist = self.load_relaylist(BLACKLIST_FILE_NAME)
  1095. filtered_fallbacks = []
  1096. for f in self.fallbacks:
  1097. in_whitelist = f.is_in_whitelist(whitelist)
  1098. in_blacklist = f.is_in_blacklist(blacklist)
  1099. if in_whitelist and in_blacklist:
  1100. if BLACKLIST_EXCLUDES_WHITELIST_ENTRIES:
  1101. # exclude
  1102. excluded_count += 1
  1103. logging.warning('Excluding %s: in both blacklist and whitelist.',
  1104. f._fpr)
  1105. else:
  1106. # include
  1107. filtered_fallbacks.append(f)
  1108. elif in_whitelist:
  1109. # include
  1110. filtered_fallbacks.append(f)
  1111. elif in_blacklist:
  1112. # exclude
  1113. excluded_count += 1
  1114. logging.debug('Excluding %s: in blacklist.', f._fpr)
  1115. else:
  1116. if INCLUDE_UNLISTED_ENTRIES:
  1117. # include
  1118. filtered_fallbacks.append(f)
  1119. else:
  1120. # exclude
  1121. excluded_count += 1
  1122. logging.info('Excluding %s: in neither blacklist nor whitelist.',
  1123. f._fpr)
  1124. self.fallbacks = filtered_fallbacks
  1125. return excluded_count
  1126. @staticmethod
  1127. def summarise_filters(initial_count, excluded_count):
  1128. return '/* Whitelist & blacklist excluded %d of %d candidates. */'%(
  1129. excluded_count, initial_count)
  1130. # calculate each fallback's measured bandwidth based on the median
  1131. # consensus weight to advertised bandwdith ratio
  1132. def calculate_measured_bandwidth(self):
  1133. self.sort_fallbacks_by_cw_to_bw_factor()
  1134. median_fallback = self.fallback_median(True)
  1135. median_cw_to_bw_factor = median_fallback.cw_to_bw_factor()
  1136. for f in self.fallbacks:
  1137. f.set_measured_bandwidth(median_cw_to_bw_factor)
  1138. # remove relays with low measured bandwidth from the fallback list
  1139. # calculate_measured_bandwidth for each relay before calling this
  1140. def remove_low_bandwidth_relays(self):
  1141. if MIN_BANDWIDTH is None:
  1142. return
  1143. above_min_bw_fallbacks = []
  1144. for f in self.fallbacks:
  1145. if f._data['measured_bandwidth'] >= MIN_BANDWIDTH:
  1146. above_min_bw_fallbacks.append(f)
  1147. else:
  1148. # the bandwidth we log here is limited by the relay's consensus weight
  1149. # as well as its adverttised bandwidth. See set_measured_bandwidth
  1150. # for details
  1151. logging.info('%s not a candidate: bandwidth %.1fMB/s too low, must ' +
  1152. 'be at least %.1fMB/s', f._fpr,
  1153. f._data['measured_bandwidth']/(1024.0*1024.0),
  1154. MIN_BANDWIDTH/(1024.0*1024.0))
  1155. self.fallbacks = above_min_bw_fallbacks
  1156. # the minimum fallback in the list
  1157. # call one of the sort_fallbacks_* functions before calling this
  1158. def fallback_min(self):
  1159. if len(self.fallbacks) > 0:
  1160. return self.fallbacks[-1]
  1161. else:
  1162. return None
  1163. # the median fallback in the list
  1164. # call one of the sort_fallbacks_* functions before calling this
  1165. def fallback_median(self, require_advertised_bandwidth):
  1166. # use the low-median when there are an evan number of fallbacks,
  1167. # for consistency with the bandwidth authorities
  1168. if len(self.fallbacks) > 0:
  1169. median_position = (len(self.fallbacks) - 1) / 2
  1170. if not require_advertised_bandwidth:
  1171. return self.fallbacks[median_position]
  1172. # if we need advertised_bandwidth but this relay doesn't have it,
  1173. # move to a fallback with greater consensus weight until we find one
  1174. while not self.fallbacks[median_position]._data['advertised_bandwidth']:
  1175. median_position += 1
  1176. if median_position >= len(self.fallbacks):
  1177. return None
  1178. return self.fallbacks[median_position]
  1179. else:
  1180. return None
  1181. # the maximum fallback in the list
  1182. # call one of the sort_fallbacks_* functions before calling this
  1183. def fallback_max(self):
  1184. if len(self.fallbacks) > 0:
  1185. return self.fallbacks[0]
  1186. else:
  1187. return None
  1188. # try a download check on each fallback candidate in order
  1189. # stop after max_count successful downloads
  1190. # but don't remove any candidates from the array
  1191. def try_download_consensus_checks(self, max_count):
  1192. dl_ok_count = 0
  1193. for f in self.fallbacks:
  1194. f.try_fallback_download_consensus()
  1195. if f.get_fallback_download_consensus():
  1196. # this fallback downloaded a consensus ok
  1197. dl_ok_count += 1
  1198. if dl_ok_count >= max_count:
  1199. # we have enough fallbacks
  1200. return
  1201. # put max_count successful candidates in the fallbacks array:
  1202. # - perform download checks on each fallback candidate
  1203. # - retry failed candidates if CONSENSUS_DOWNLOAD_RETRY is set
  1204. # - eliminate failed candidates
  1205. # - if there are more than max_count candidates, eliminate lowest bandwidth
  1206. # - if there are fewer than max_count candidates, leave only successful
  1207. def perform_download_consensus_checks(self, max_count):
  1208. self.sort_fallbacks_by_measured_bandwidth()
  1209. self.try_download_consensus_checks(max_count)
  1210. if CONSENSUS_DOWNLOAD_RETRY:
  1211. # try unsuccessful candidates again
  1212. # we could end up with more than max_count successful candidates here
  1213. self.try_download_consensus_checks(max_count)
  1214. # now we have at least max_count successful candidates,
  1215. # or we've tried them all
  1216. self.fallbacks = filter(lambda x: x.get_fallback_download_consensus(),
  1217. self.fallbacks)
  1218. self.fallbacks = self.fallbacks[:max_count]
  1219. def summarise_fallbacks(self, eligible_count, guard_count, target_count,
  1220. max_count):
  1221. # Report:
  1222. # whether we checked consensus download times
  1223. # the number of fallback directories (and limits/exclusions, if relevant)
  1224. # min & max fallback bandwidths
  1225. # #error if below minimum count
  1226. if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
  1227. s = '/* Checked %s%s%s DirPorts served a consensus within %.1fs. */'%(
  1228. 'IPv4' if PERFORM_IPV4_DIRPORT_CHECKS else '',
  1229. ' and ' if (PERFORM_IPV4_DIRPORT_CHECKS
  1230. and PERFORM_IPV6_DIRPORT_CHECKS) else '',
  1231. 'IPv6' if PERFORM_IPV6_DIRPORT_CHECKS else '',
  1232. CONSENSUS_DOWNLOAD_SPEED_MAX)
  1233. else:
  1234. s = '/* Did not check IPv4 or IPv6 DirPort consensus downloads. */'
  1235. s += '\n'
  1236. # Multiline C comment with #error if things go bad
  1237. s += '/*'
  1238. s += '\n'
  1239. # Integers don't need escaping in C comments
  1240. fallback_count = len(self.fallbacks)
  1241. if FALLBACK_PROPORTION_OF_GUARDS is None:
  1242. fallback_proportion = ''
  1243. else:
  1244. fallback_proportion = ', Target %d (%d * %f)'%(target_count, guard_count,
  1245. FALLBACK_PROPORTION_OF_GUARDS)
  1246. s += 'Final Count: %d (Eligible %d%s'%(fallback_count,
  1247. eligible_count,
  1248. fallback_proportion)
  1249. if MAX_FALLBACK_COUNT is not None:
  1250. s += ', Clamped to %d'%(MAX_FALLBACK_COUNT)
  1251. s += ')\n'
  1252. if eligible_count != fallback_count:
  1253. s += 'Excluded: %d (Eligible Count Exceeded Target Count)'%(
  1254. eligible_count - fallback_count)
  1255. s += '\n'
  1256. min_fb = self.fallback_min()
  1257. min_bw = min_fb._data['measured_bandwidth']
  1258. max_fb = self.fallback_max()
  1259. max_bw = max_fb._data['measured_bandwidth']
  1260. s += 'Bandwidth Range: %.1f - %.1f MB/s'%(min_bw/(1024.0*1024.0),
  1261. max_bw/(1024.0*1024.0))
  1262. s += '\n'
  1263. s += '*/'
  1264. if fallback_count < MIN_FALLBACK_COUNT:
  1265. # We must have a minimum number of fallbacks so they are always
  1266. # reachable, and are in diverse locations
  1267. s += '\n'
  1268. s += '#error Fallback Count %d is too low. '%(fallback_count)
  1269. s += 'Must be at least %d for diversity. '%(MIN_FALLBACK_COUNT)
  1270. s += 'Try adding entries to the whitelist, '
  1271. s += 'or setting INCLUDE_UNLISTED_ENTRIES = True.'
  1272. return s
  1273. ## Main Function
  1274. def list_fallbacks():
  1275. """ Fetches required onionoo documents and evaluates the
  1276. fallback directory criteria for each of the relays """
  1277. # find relays that could be fallbacks
  1278. candidates = CandidateList()
  1279. candidates.add_relays()
  1280. # work out how many fallbacks we want
  1281. guard_count = candidates.count_guards()
  1282. if FALLBACK_PROPORTION_OF_GUARDS is None:
  1283. target_count = guard_count
  1284. else:
  1285. target_count = int(guard_count * FALLBACK_PROPORTION_OF_GUARDS)
  1286. # the maximum number of fallbacks is the least of:
  1287. # - the target fallback count (FALLBACK_PROPORTION_OF_GUARDS * guard count)
  1288. # - the maximum fallback count (MAX_FALLBACK_COUNT)
  1289. if MAX_FALLBACK_COUNT is None:
  1290. max_count = target_count
  1291. else:
  1292. max_count = min(target_count, MAX_FALLBACK_COUNT)
  1293. candidates.compute_fallbacks()
  1294. prefilter_fallbacks = copy.copy(candidates.fallbacks)
  1295. # filter with the whitelist and blacklist
  1296. initial_count = len(candidates.fallbacks)
  1297. excluded_count = candidates.apply_filter_lists()
  1298. print candidates.summarise_filters(initial_count, excluded_count)
  1299. eligible_count = len(candidates.fallbacks)
  1300. # calculate the measured bandwidth of each relay,
  1301. # then remove low-bandwidth relays
  1302. candidates.calculate_measured_bandwidth()
  1303. candidates.remove_low_bandwidth_relays()
  1304. # print the raw fallback list
  1305. #for x in candidates.fallbacks:
  1306. # print x.fallbackdir_line(True)
  1307. # print json.dumps(candidates[x]._data, sort_keys=True, indent=4,
  1308. # separators=(',', ': '), default=json_util.default)
  1309. if len(candidates.fallbacks) > 0:
  1310. print candidates.summarise_fallbacks(eligible_count, guard_count,
  1311. target_count, max_count)
  1312. else:
  1313. print '/* No Fallbacks met criteria */'
  1314. for s in fetch_source_list():
  1315. print describe_fetch_source(s)
  1316. # check if each candidate can serve a consensus
  1317. candidates.perform_download_consensus_checks(max_count)
  1318. # if we're outputting the final fallback list, sort by fingerprint
  1319. # this makes diffs much more stable
  1320. # otherwise, leave sorted by bandwidth, which allows operators to be
  1321. # contacted in priority order
  1322. if not OUTPUT_CANDIDATES:
  1323. candidates.sort_fallbacks_by_fingerprint()
  1324. for x in candidates.fallbacks:
  1325. print x.fallbackdir_line(candidates.fallbacks, prefilter_fallbacks)
  1326. if __name__ == "__main__":
  1327. list_fallbacks()