updateFallbackDirs.py 92 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304
  1. #!/usr/bin/python
  2. # Usage:
  3. #
  4. # Regenerate the list:
  5. # scripts/maint/updateFallbackDirs.py > src/or/fallback_dirs.inc 2> fallback_dirs.log
  6. #
  7. # Check the existing list:
  8. # scripts/maint/updateFallbackDirs.py check_existing > fallback_dirs.inc.ok 2> fallback_dirs.log
  9. # mv fallback_dirs.inc.ok src/or/fallback_dirs.inc
  10. #
  11. # This script should be run from a stable, reliable network connection,
  12. # with no other network activity (and not over tor).
  13. # If this is not possible, please disable:
  14. # PERFORM_IPV4_DIRPORT_CHECKS and PERFORM_IPV6_DIRPORT_CHECKS
  15. #
  16. # Needs dateutil (and potentially other python packages)
  17. # Needs stem available in your PYTHONPATH, or just ln -s ../stem/stem .
  18. # Optionally uses ipaddress (python 3 builtin) or py2-ipaddress (package)
  19. # for netblock analysis, in PYTHONPATH, or just
  20. # ln -s ../py2-ipaddress-3.4.1/ipaddress.py .
  21. #
  22. # Then read the logs to make sure the fallbacks aren't dominated by a single
  23. # netblock or port
  24. # Script by weasel, April 2015
  25. # Portions by gsathya & karsten, 2013
  26. # https://trac.torproject.org/projects/tor/attachment/ticket/8374/dir_list.2.py
  27. # Modifications by teor, 2015
  28. import StringIO
  29. import string
  30. import re
  31. import datetime
  32. import gzip
  33. import os.path
  34. import json
  35. import math
  36. import sys
  37. import urllib
  38. import urllib2
  39. import hashlib
  40. import dateutil.parser
  41. # bson_lazy provides bson
  42. #from bson import json_util
  43. import copy
  44. import re
  45. from stem.descriptor import DocumentHandler
  46. from stem.descriptor.remote import get_consensus, get_server_descriptors, MAX_FINGERPRINTS
  47. import logging
  48. logging.root.name = ''
  49. HAVE_IPADDRESS = False
  50. try:
  51. # python 3 builtin, or install package py2-ipaddress
  52. # there are several ipaddress implementations for python 2
  53. # with slightly different semantics with str typed text
  54. # fortunately, all our IP addresses are in unicode
  55. import ipaddress
  56. HAVE_IPADDRESS = True
  57. except ImportError:
  58. # if this happens, we avoid doing netblock analysis
  59. logging.warning('Unable to import ipaddress, please install py2-ipaddress.' +
  60. ' A fallback list will be created, but optional netblock' +
  61. ' analysis will not be performed.')
  62. ## Top-Level Configuration
  63. # We use semantic versioning: https://semver.org
  64. # In particular:
  65. # * major changes include removing a mandatory field, or anything else that
  66. # would break an appropriately tolerant parser,
  67. # * minor changes include adding a field,
  68. # * patch changes include changing header comments or other unstructured
  69. # content
  70. FALLBACK_FORMAT_VERSION = '2.0.0'
  71. # Output all candidate fallbacks, or only output selected fallbacks?
  72. OUTPUT_CANDIDATES = False
  73. # Perform DirPort checks over IPv4?
  74. # Change this to False if IPv4 doesn't work for you, or if you don't want to
  75. # download a consensus for each fallback
  76. # Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
  77. PERFORM_IPV4_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else True
  78. # Perform DirPort checks over IPv6?
  79. # If you know IPv6 works for you, set this to True
  80. # This will exclude IPv6 relays without an IPv6 DirPort configured
  81. # So it's best left at False until #18394 is implemented
  82. # Don't check ~1000 candidates when OUTPUT_CANDIDATES is True
  83. PERFORM_IPV6_DIRPORT_CHECKS = False if OUTPUT_CANDIDATES else False
  84. # Must relays be running now?
  85. MUST_BE_RUNNING_NOW = (PERFORM_IPV4_DIRPORT_CHECKS
  86. or PERFORM_IPV6_DIRPORT_CHECKS)
  87. # Clients have been using microdesc consensuses by default for a while now
  88. DOWNLOAD_MICRODESC_CONSENSUS = True
  89. # If a relay delivers an expired consensus, if it expired less than this many
  90. # seconds ago, we still allow the relay. This should never be less than -90,
  91. # as all directory mirrors should have downloaded a consensus 90 minutes
  92. # before it expires. It should never be more than 24 hours, because clients
  93. # reject consensuses that are older than REASONABLY_LIVE_TIME.
  94. # For the consensus expiry check to be accurate, the machine running this
  95. # script needs an accurate clock.
  96. #
  97. # Relays on 0.3.0 and later return a 404 when they are about to serve an
  98. # expired consensus. This makes them fail the download check.
  99. # We use a tolerance of 0, so that 0.2.x series relays also fail the download
  100. # check if they serve an expired consensus.
  101. CONSENSUS_EXPIRY_TOLERANCE = 0
  102. # Output fallback name, flags, bandwidth, and ContactInfo in a C comment?
  103. OUTPUT_COMMENTS = True if OUTPUT_CANDIDATES else False
  104. # Output matching ContactInfo in fallbacks list or the blacklist?
  105. # Useful if you're trying to contact operators
  106. CONTACT_COUNT = True if OUTPUT_CANDIDATES else False
  107. CONTACT_BLACKLIST_COUNT = True if OUTPUT_CANDIDATES else False
  108. # How the list should be sorted:
  109. # fingerprint: is useful for stable diffs of fallback lists
  110. # measured_bandwidth: is useful when pruning the list based on bandwidth
  111. # contact: is useful for contacting operators once the list has been pruned
  112. OUTPUT_SORT_FIELD = 'contact' if OUTPUT_CANDIDATES else 'fingerprint'
  113. ## OnionOO Settings
  114. ONIONOO = 'https://onionoo.torproject.org/'
  115. #ONIONOO = 'https://onionoo.thecthulhu.com/'
  116. # Don't bother going out to the Internet, just use the files available locally,
  117. # even if they're very old
  118. LOCAL_FILES_ONLY = False
  119. ## Whitelist / Blacklist Filter Settings
  120. # The whitelist contains entries that are included if all attributes match
  121. # (IPv4, dirport, orport, id, and optionally IPv6 and IPv6 orport)
  122. # The blacklist contains (partial) entries that are excluded if any
  123. # sufficiently specific group of attributes matches:
  124. # IPv4 & DirPort
  125. # IPv4 & ORPort
  126. # ID
  127. # IPv6 & DirPort
  128. # IPv6 & IPv6 ORPort
  129. # If neither port is included in the blacklist, the entire IP address is
  130. # blacklisted.
  131. # What happens to entries in neither list?
  132. # When True, they are included, when False, they are excluded
  133. INCLUDE_UNLISTED_ENTRIES = True if OUTPUT_CANDIDATES else False
  134. # If an entry is in both lists, what happens?
  135. # When True, it is excluded, when False, it is included
  136. BLACKLIST_EXCLUDES_WHITELIST_ENTRIES = True
  137. WHITELIST_FILE_NAME = 'scripts/maint/fallback.whitelist'
  138. BLACKLIST_FILE_NAME = 'scripts/maint/fallback.blacklist'
  139. FALLBACK_FILE_NAME = 'src/or/fallback_dirs.inc'
  140. # The number of bytes we'll read from a filter file before giving up
  141. MAX_LIST_FILE_SIZE = 1024 * 1024
  142. ## Eligibility Settings
  143. # Require fallbacks to have the same address and port for a set amount of time
  144. # We used to have this at 1 week, but that caused many fallback failures, which
  145. # meant that we had to rebuild the list more often.
  146. #
  147. # There was a bug in Tor 0.2.8.1-alpha and earlier where a relay temporarily
  148. # submits a 0 DirPort when restarted.
  149. # This causes OnionOO to (correctly) reset its stability timer.
  150. # Affected relays should upgrade to Tor 0.2.8.7 or later, which has a fix
  151. # for this issue.
  152. ADDRESS_AND_PORT_STABLE_DAYS = 30
  153. # We ignore relays that have been down for more than this period
  154. MAX_DOWNTIME_DAYS = 0 if MUST_BE_RUNNING_NOW else 7
  155. # What time-weighted-fraction of these flags must FallbackDirs
  156. # Equal or Exceed?
  157. CUTOFF_RUNNING = .90
  158. CUTOFF_V2DIR = .90
  159. # Tolerate lower guard flag averages, as guard flags are removed for some time
  160. # after a relay restarts
  161. CUTOFF_GUARD = .80
  162. # What time-weighted-fraction of these flags must FallbackDirs
  163. # Equal or Fall Under?
  164. # .00 means no bad exits
  165. PERMITTED_BADEXIT = .00
  166. # older entries' weights are adjusted with ALPHA^(age in days)
  167. AGE_ALPHA = 0.99
  168. # this factor is used to scale OnionOO entries to [0,1]
  169. ONIONOO_SCALE_ONE = 999.
  170. ## Fallback Count Limits
  171. # The target for these parameters is 20% of the guards in the network
  172. # This is around 200 as of October 2015
  173. _FB_POG = 0.2
  174. FALLBACK_PROPORTION_OF_GUARDS = None if OUTPUT_CANDIDATES else _FB_POG
  175. # Limit the number of fallbacks (eliminating lowest by advertised bandwidth)
  176. MAX_FALLBACK_COUNT = None if OUTPUT_CANDIDATES else 200
  177. # Emit a C #error if the number of fallbacks is less than expected
  178. MIN_FALLBACK_COUNT = 0 if OUTPUT_CANDIDATES else MAX_FALLBACK_COUNT*0.5
  179. # The maximum number of fallbacks on the same address, contact, or family
  180. # With 200 fallbacks, this means each operator can see 1% of client bootstraps
  181. # (The directory authorities used to see ~12% of client bootstraps each.)
  182. MAX_FALLBACKS_PER_IP = 1
  183. MAX_FALLBACKS_PER_IPV4 = MAX_FALLBACKS_PER_IP
  184. MAX_FALLBACKS_PER_IPV6 = MAX_FALLBACKS_PER_IP
  185. MAX_FALLBACKS_PER_CONTACT = 3
  186. MAX_FALLBACKS_PER_FAMILY = 3
  187. ## Fallback Bandwidth Requirements
  188. # Any fallback with the Exit flag has its bandwidth multipled by this fraction
  189. # to make sure we aren't further overloading exits
  190. # (Set to 1.0, because we asked that only lightly loaded exits opt-in,
  191. # and the extra load really isn't that much for large relays.)
  192. EXIT_BANDWIDTH_FRACTION = 1.0
  193. # If a single fallback's bandwidth is too low, it's pointless adding it
  194. # We expect fallbacks to handle an extra 10 kilobytes per second of traffic
  195. # Make sure they can support a hundred times the expected extra load
  196. # (Use 102.4 to make it come out nicely in MByte/s)
  197. # We convert this to a consensus weight before applying the filter,
  198. # because all the bandwidth amounts are specified by the relay
  199. MIN_BANDWIDTH = 102.4 * 10.0 * 1024.0
  200. # Clients will time out after 30 seconds trying to download a consensus
  201. # So allow fallback directories half that to deliver a consensus
  202. # The exact download times might change based on the network connection
  203. # running this script, but only by a few seconds
  204. # There is also about a second of python overhead
  205. CONSENSUS_DOWNLOAD_SPEED_MAX = 15.0
  206. # If the relay fails a consensus check, retry the download
  207. # This avoids delisting a relay due to transient network conditions
  208. CONSENSUS_DOWNLOAD_RETRY = True
  209. ## Parsing Functions
  210. def parse_ts(t):
  211. return datetime.datetime.strptime(t, "%Y-%m-%d %H:%M:%S")
  212. def remove_bad_chars(raw_string, bad_char_list):
  213. # Remove each character in the bad_char_list
  214. cleansed_string = raw_string
  215. for c in bad_char_list:
  216. cleansed_string = cleansed_string.replace(c, '')
  217. return cleansed_string
  218. def cleanse_unprintable(raw_string):
  219. # Remove all unprintable characters
  220. cleansed_string = ''
  221. for c in raw_string:
  222. if c in string.printable:
  223. cleansed_string += c
  224. return cleansed_string
  225. def cleanse_whitespace(raw_string):
  226. # Replace all whitespace characters with a space
  227. cleansed_string = raw_string
  228. for c in string.whitespace:
  229. cleansed_string = cleansed_string.replace(c, ' ')
  230. return cleansed_string
  231. def cleanse_c_multiline_comment(raw_string):
  232. cleansed_string = raw_string
  233. # Embedded newlines should be removed by tor/onionoo, but let's be paranoid
  234. cleansed_string = cleanse_whitespace(cleansed_string)
  235. # ContactInfo and Version can be arbitrary binary data
  236. cleansed_string = cleanse_unprintable(cleansed_string)
  237. # Prevent a malicious / unanticipated string from breaking out
  238. # of a C-style multiline comment
  239. # This removes '/*' and '*/' and '//'
  240. bad_char_list = '*/'
  241. # Prevent a malicious string from using C nulls
  242. bad_char_list += '\0'
  243. # Be safer by removing bad characters entirely
  244. cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
  245. # Some compilers may further process the content of comments
  246. # There isn't much we can do to cover every possible case
  247. # But comment-based directives are typically only advisory
  248. return cleansed_string
  249. def cleanse_c_string(raw_string):
  250. cleansed_string = raw_string
  251. # Embedded newlines should be removed by tor/onionoo, but let's be paranoid
  252. cleansed_string = cleanse_whitespace(cleansed_string)
  253. # ContactInfo and Version can be arbitrary binary data
  254. cleansed_string = cleanse_unprintable(cleansed_string)
  255. # Prevent a malicious address/fingerprint string from breaking out
  256. # of a C-style string
  257. bad_char_list = '"'
  258. # Prevent a malicious string from using escapes
  259. bad_char_list += '\\'
  260. # Prevent a malicious string from using C nulls
  261. bad_char_list += '\0'
  262. # Be safer by removing bad characters entirely
  263. cleansed_string = remove_bad_chars(cleansed_string, bad_char_list)
  264. # Some compilers may further process the content of strings
  265. # There isn't much we can do to cover every possible case
  266. # But this typically only results in changes to the string data
  267. return cleansed_string
  268. ## OnionOO Source Functions
  269. # a dictionary of source metadata for each onionoo query we've made
  270. fetch_source = {}
  271. # register source metadata for 'what'
  272. # assumes we only retrieve one document for each 'what'
  273. def register_fetch_source(what, url, relays_published, version):
  274. fetch_source[what] = {}
  275. fetch_source[what]['url'] = url
  276. fetch_source[what]['relays_published'] = relays_published
  277. fetch_source[what]['version'] = version
  278. # list each registered source's 'what'
  279. def fetch_source_list():
  280. return sorted(fetch_source.keys())
  281. # given 'what', provide a multiline C comment describing the source
  282. def describe_fetch_source(what):
  283. desc = '/*'
  284. desc += '\n'
  285. desc += 'Onionoo Source: '
  286. desc += cleanse_c_multiline_comment(what)
  287. desc += ' Date: '
  288. desc += cleanse_c_multiline_comment(fetch_source[what]['relays_published'])
  289. desc += ' Version: '
  290. desc += cleanse_c_multiline_comment(fetch_source[what]['version'])
  291. desc += '\n'
  292. desc += 'URL: '
  293. desc += cleanse_c_multiline_comment(fetch_source[what]['url'])
  294. desc += '\n'
  295. desc += '*/'
  296. return desc
  297. ## File Processing Functions
  298. def write_to_file(str, file_name, max_len):
  299. try:
  300. with open(file_name, 'w') as f:
  301. f.write(str[0:max_len])
  302. except EnvironmentError, error:
  303. logging.error('Writing file %s failed: %d: %s'%
  304. (file_name,
  305. error.errno,
  306. error.strerror)
  307. )
  308. def read_from_file(file_name, max_len):
  309. try:
  310. if os.path.isfile(file_name):
  311. with open(file_name, 'r') as f:
  312. return f.read(max_len)
  313. except EnvironmentError, error:
  314. logging.info('Loading file %s failed: %d: %s'%
  315. (file_name,
  316. error.errno,
  317. error.strerror)
  318. )
  319. return None
  320. def parse_fallback_file(file_name):
  321. file_data = read_from_file(file_name, MAX_LIST_FILE_SIZE)
  322. file_data = cleanse_unprintable(file_data)
  323. file_data = remove_bad_chars(file_data, '\n"\0')
  324. file_data = re.sub('/\*.*?\*/', '', file_data)
  325. file_data = file_data.replace(',', '\n')
  326. file_data = file_data.replace(' weight=10', '')
  327. return file_data
  328. def load_possibly_compressed_response_json(response):
  329. if response.info().get('Content-Encoding') == 'gzip':
  330. buf = StringIO.StringIO( response.read() )
  331. f = gzip.GzipFile(fileobj=buf)
  332. return json.load(f)
  333. else:
  334. return json.load(response)
  335. def load_json_from_file(json_file_name):
  336. # An exception here may be resolved by deleting the .last_modified
  337. # and .json files, and re-running the script
  338. try:
  339. with open(json_file_name, 'r') as f:
  340. return json.load(f)
  341. except EnvironmentError, error:
  342. raise Exception('Reading not-modified json file %s failed: %d: %s'%
  343. (json_file_name,
  344. error.errno,
  345. error.strerror)
  346. )
  347. ## OnionOO Functions
  348. def datestr_to_datetime(datestr):
  349. # Parse datetimes like: Fri, 02 Oct 2015 13:34:14 GMT
  350. if datestr is not None:
  351. dt = dateutil.parser.parse(datestr)
  352. else:
  353. # Never modified - use start of epoch
  354. dt = datetime.datetime.utcfromtimestamp(0)
  355. # strip any timezone out (in case they're supported in future)
  356. dt = dt.replace(tzinfo=None)
  357. return dt
  358. def onionoo_fetch(what, **kwargs):
  359. params = kwargs
  360. params['type'] = 'relay'
  361. #params['limit'] = 10
  362. params['first_seen_days'] = '%d-'%(ADDRESS_AND_PORT_STABLE_DAYS)
  363. params['last_seen_days'] = '-%d'%(MAX_DOWNTIME_DAYS)
  364. params['flag'] = 'V2Dir'
  365. url = ONIONOO + what + '?' + urllib.urlencode(params)
  366. # Unfortunately, the URL is too long for some OS filenames,
  367. # but we still don't want to get files from different URLs mixed up
  368. base_file_name = what + '-' + hashlib.sha1(url).hexdigest()
  369. full_url_file_name = base_file_name + '.full_url'
  370. MAX_FULL_URL_LENGTH = 1024
  371. last_modified_file_name = base_file_name + '.last_modified'
  372. MAX_LAST_MODIFIED_LENGTH = 64
  373. json_file_name = base_file_name + '.json'
  374. if LOCAL_FILES_ONLY:
  375. # Read from the local file, don't write to anything
  376. response_json = load_json_from_file(json_file_name)
  377. else:
  378. # store the full URL to a file for debugging
  379. # no need to compare as long as you trust SHA-1
  380. write_to_file(url, full_url_file_name, MAX_FULL_URL_LENGTH)
  381. request = urllib2.Request(url)
  382. request.add_header('Accept-encoding', 'gzip')
  383. # load the last modified date from the file, if it exists
  384. last_mod_date = read_from_file(last_modified_file_name,
  385. MAX_LAST_MODIFIED_LENGTH)
  386. if last_mod_date is not None:
  387. request.add_header('If-modified-since', last_mod_date)
  388. # Parse last modified date
  389. last_mod = datestr_to_datetime(last_mod_date)
  390. # Not Modified and still recent enough to be useful
  391. # Onionoo / Globe used to use 6 hours, but we can afford a day
  392. required_freshness = datetime.datetime.utcnow()
  393. # strip any timezone out (to match dateutil.parser)
  394. required_freshness = required_freshness.replace(tzinfo=None)
  395. required_freshness -= datetime.timedelta(hours=24)
  396. # Make the OnionOO request
  397. response_code = 0
  398. try:
  399. response = urllib2.urlopen(request)
  400. response_code = response.getcode()
  401. except urllib2.HTTPError, error:
  402. response_code = error.code
  403. if response_code == 304: # not modified
  404. pass
  405. else:
  406. raise Exception("Could not get " + url + ": "
  407. + str(error.code) + ": " + error.reason)
  408. if response_code == 200: # OK
  409. last_mod = datestr_to_datetime(response.info().get('Last-Modified'))
  410. # Check for freshness
  411. if last_mod < required_freshness:
  412. if last_mod_date is not None:
  413. # This check sometimes fails transiently, retry the script if it does
  414. date_message = "Outdated data: last updated " + last_mod_date
  415. else:
  416. date_message = "No data: never downloaded "
  417. raise Exception(date_message + " from " + url)
  418. # Process the data
  419. if response_code == 200: # OK
  420. response_json = load_possibly_compressed_response_json(response)
  421. with open(json_file_name, 'w') as f:
  422. # use the most compact json representation to save space
  423. json.dump(response_json, f, separators=(',',':'))
  424. # store the last modified date in its own file
  425. if response.info().get('Last-modified') is not None:
  426. write_to_file(response.info().get('Last-Modified'),
  427. last_modified_file_name,
  428. MAX_LAST_MODIFIED_LENGTH)
  429. elif response_code == 304: # Not Modified
  430. response_json = load_json_from_file(json_file_name)
  431. else: # Unexpected HTTP response code not covered in the HTTPError above
  432. raise Exception("Unexpected HTTP response code to " + url + ": "
  433. + str(response_code))
  434. register_fetch_source(what,
  435. url,
  436. response_json['relays_published'],
  437. response_json['version'])
  438. return response_json
  439. def fetch(what, **kwargs):
  440. #x = onionoo_fetch(what, **kwargs)
  441. # don't use sort_keys, as the order of or_addresses is significant
  442. #print json.dumps(x, indent=4, separators=(',', ': '))
  443. #sys.exit(0)
  444. return onionoo_fetch(what, **kwargs)
  445. ## Fallback Candidate Class
  446. class Candidate(object):
  447. CUTOFF_ADDRESS_AND_PORT_STABLE = (datetime.datetime.utcnow()
  448. - datetime.timedelta(ADDRESS_AND_PORT_STABLE_DAYS))
  449. def __init__(self, details):
  450. for f in ['fingerprint', 'nickname', 'last_changed_address_or_port',
  451. 'consensus_weight', 'or_addresses', 'dir_address']:
  452. if not f in details: raise Exception("Document has no %s field."%(f,))
  453. if not 'contact' in details:
  454. details['contact'] = None
  455. if not 'flags' in details or details['flags'] is None:
  456. details['flags'] = []
  457. if (not 'advertised_bandwidth' in details
  458. or details['advertised_bandwidth'] is None):
  459. # relays without advertised bandwdith have it calculated from their
  460. # consensus weight
  461. details['advertised_bandwidth'] = 0
  462. if (not 'effective_family' in details
  463. or details['effective_family'] is None):
  464. details['effective_family'] = []
  465. if not 'platform' in details:
  466. details['platform'] = None
  467. details['last_changed_address_or_port'] = parse_ts(
  468. details['last_changed_address_or_port'])
  469. self._data = details
  470. self._stable_sort_or_addresses()
  471. self._fpr = self._data['fingerprint']
  472. self._running = self._guard = self._v2dir = 0.
  473. self._split_dirport()
  474. self._compute_orport()
  475. if self.orport is None:
  476. raise Exception("Failed to get an orport for %s."%(self._fpr,))
  477. self._compute_ipv6addr()
  478. if not self.has_ipv6():
  479. logging.debug("Failed to get an ipv6 address for %s."%(self._fpr,))
  480. self._compute_version()
  481. self._extra_info_cache = None
  482. def _stable_sort_or_addresses(self):
  483. # replace self._data['or_addresses'] with a stable ordering,
  484. # sorting the secondary addresses in string order
  485. # leave the received order in self._data['or_addresses_raw']
  486. self._data['or_addresses_raw'] = self._data['or_addresses']
  487. or_address_primary = self._data['or_addresses'][:1]
  488. # subsequent entries in the or_addresses array are in an arbitrary order
  489. # so we stabilise the addresses by sorting them in string order
  490. or_addresses_secondaries_stable = sorted(self._data['or_addresses'][1:])
  491. or_addresses_stable = or_address_primary + or_addresses_secondaries_stable
  492. self._data['or_addresses'] = or_addresses_stable
  493. def get_fingerprint(self):
  494. return self._fpr
  495. # is_valid_ipv[46]_address by gsathya, karsten, 2013
  496. @staticmethod
  497. def is_valid_ipv4_address(address):
  498. if not isinstance(address, (str, unicode)):
  499. return False
  500. # check if there are four period separated values
  501. if address.count(".") != 3:
  502. return False
  503. # checks that each value in the octet are decimal values between 0-255
  504. for entry in address.split("."):
  505. if not entry.isdigit() or int(entry) < 0 or int(entry) > 255:
  506. return False
  507. elif entry[0] == "0" and len(entry) > 1:
  508. return False # leading zeros, for instance in "1.2.3.001"
  509. return True
  510. @staticmethod
  511. def is_valid_ipv6_address(address):
  512. if not isinstance(address, (str, unicode)):
  513. return False
  514. # remove brackets
  515. address = address[1:-1]
  516. # addresses are made up of eight colon separated groups of four hex digits
  517. # with leading zeros being optional
  518. # https://en.wikipedia.org/wiki/IPv6#Address_format
  519. colon_count = address.count(":")
  520. if colon_count > 7:
  521. return False # too many groups
  522. elif colon_count != 7 and not "::" in address:
  523. return False # not enough groups and none are collapsed
  524. elif address.count("::") > 1 or ":::" in address:
  525. return False # multiple groupings of zeros can't be collapsed
  526. found_ipv4_on_previous_entry = False
  527. for entry in address.split(":"):
  528. # If an IPv6 address has an embedded IPv4 address,
  529. # it must be the last entry
  530. if found_ipv4_on_previous_entry:
  531. return False
  532. if not re.match("^[0-9a-fA-f]{0,4}$", entry):
  533. if not Candidate.is_valid_ipv4_address(entry):
  534. return False
  535. else:
  536. found_ipv4_on_previous_entry = True
  537. return True
  538. def _split_dirport(self):
  539. # Split the dir_address into dirip and dirport
  540. (self.dirip, _dirport) = self._data['dir_address'].split(':', 2)
  541. self.dirport = int(_dirport)
  542. def _compute_orport(self):
  543. # Choose the first ORPort that's on the same IPv4 address as the DirPort.
  544. # In rare circumstances, this might not be the primary ORPort address.
  545. # However, _stable_sort_or_addresses() ensures we choose the same one
  546. # every time, even if onionoo changes the order of the secondaries.
  547. self._split_dirport()
  548. self.orport = None
  549. for i in self._data['or_addresses']:
  550. if i != self._data['or_addresses'][0]:
  551. logging.debug('Secondary IPv4 Address Used for %s: %s'%(self._fpr, i))
  552. (ipaddr, port) = i.rsplit(':', 1)
  553. if (ipaddr == self.dirip) and Candidate.is_valid_ipv4_address(ipaddr):
  554. self.orport = int(port)
  555. return
  556. def _compute_ipv6addr(self):
  557. # Choose the first IPv6 address that uses the same port as the ORPort
  558. # Or, choose the first IPv6 address in the list
  559. # _stable_sort_or_addresses() ensures we choose the same IPv6 address
  560. # every time, even if onionoo changes the order of the secondaries.
  561. self.ipv6addr = None
  562. self.ipv6orport = None
  563. # Choose the first IPv6 address that uses the same port as the ORPort
  564. for i in self._data['or_addresses']:
  565. (ipaddr, port) = i.rsplit(':', 1)
  566. if (port == self.orport) and Candidate.is_valid_ipv6_address(ipaddr):
  567. self.ipv6addr = ipaddr
  568. self.ipv6orport = int(port)
  569. return
  570. # Choose the first IPv6 address in the list
  571. for i in self._data['or_addresses']:
  572. (ipaddr, port) = i.rsplit(':', 1)
  573. if Candidate.is_valid_ipv6_address(ipaddr):
  574. self.ipv6addr = ipaddr
  575. self.ipv6orport = int(port)
  576. return
  577. def _compute_version(self):
  578. # parse the version out of the platform string
  579. # The platform looks like: "Tor 0.2.7.6 on Linux"
  580. self._data['version'] = None
  581. if self._data['platform'] is None:
  582. return
  583. # be tolerant of weird whitespacing, use a whitespace split
  584. tokens = self._data['platform'].split()
  585. for token in tokens:
  586. vnums = token.split('.')
  587. # if it's at least a.b.c.d, with potentially an -alpha-dev, -alpha, -rc
  588. if (len(vnums) >= 4 and vnums[0].isdigit() and vnums[1].isdigit() and
  589. vnums[2].isdigit()):
  590. self._data['version'] = token
  591. return
  592. # From #20509
  593. # bug #20499 affects versions from 0.2.9.1-alpha-dev to 0.2.9.4-alpha-dev
  594. # and version 0.3.0.0-alpha-dev
  595. # Exhaustive lists are hard to get wrong
  596. STALE_CONSENSUS_VERSIONS = ['0.2.9.1-alpha-dev',
  597. '0.2.9.2-alpha',
  598. '0.2.9.2-alpha-dev',
  599. '0.2.9.3-alpha',
  600. '0.2.9.3-alpha-dev',
  601. '0.2.9.4-alpha',
  602. '0.2.9.4-alpha-dev',
  603. '0.3.0.0-alpha-dev'
  604. ]
  605. def is_valid_version(self):
  606. # call _compute_version before calling this
  607. # is the version of the relay a version we want as a fallback?
  608. # checks both recommended versions and bug #20499 / #20509
  609. #
  610. # if the relay doesn't have a recommended version field, exclude the relay
  611. if not self._data.has_key('recommended_version'):
  612. log_excluded('%s not a candidate: no recommended_version field',
  613. self._fpr)
  614. return False
  615. if not self._data['recommended_version']:
  616. log_excluded('%s not a candidate: version not recommended', self._fpr)
  617. return False
  618. # if the relay doesn't have version field, exclude the relay
  619. if not self._data.has_key('version'):
  620. log_excluded('%s not a candidate: no version field', self._fpr)
  621. return False
  622. if self._data['version'] in Candidate.STALE_CONSENSUS_VERSIONS:
  623. logging.warning('%s not a candidate: version delivers stale consensuses',
  624. self._fpr)
  625. return False
  626. return True
  627. @staticmethod
  628. def _extract_generic_history(history, which='unknown'):
  629. # given a tree like this:
  630. # {
  631. # "1_month": {
  632. # "count": 187,
  633. # "factor": 0.001001001001001001,
  634. # "first": "2015-02-27 06:00:00",
  635. # "interval": 14400,
  636. # "last": "2015-03-30 06:00:00",
  637. # "values": [
  638. # 999,
  639. # 999
  640. # ]
  641. # },
  642. # "1_week": {
  643. # "count": 169,
  644. # "factor": 0.001001001001001001,
  645. # "first": "2015-03-23 07:30:00",
  646. # "interval": 3600,
  647. # "last": "2015-03-30 07:30:00",
  648. # "values": [ ...]
  649. # },
  650. # "1_year": {
  651. # "count": 177,
  652. # "factor": 0.001001001001001001,
  653. # "first": "2014-04-11 00:00:00",
  654. # "interval": 172800,
  655. # "last": "2015-03-29 00:00:00",
  656. # "values": [ ...]
  657. # },
  658. # "3_months": {
  659. # "count": 185,
  660. # "factor": 0.001001001001001001,
  661. # "first": "2014-12-28 06:00:00",
  662. # "interval": 43200,
  663. # "last": "2015-03-30 06:00:00",
  664. # "values": [ ...]
  665. # }
  666. # },
  667. # extract exactly one piece of data per time interval,
  668. # using smaller intervals where available.
  669. #
  670. # returns list of (age, length, value) dictionaries.
  671. generic_history = []
  672. periods = history.keys()
  673. periods.sort(key = lambda x: history[x]['interval'])
  674. now = datetime.datetime.utcnow()
  675. newest = now
  676. for p in periods:
  677. h = history[p]
  678. interval = datetime.timedelta(seconds = h['interval'])
  679. this_ts = parse_ts(h['last'])
  680. if (len(h['values']) != h['count']):
  681. logging.warning('Inconsistent value count in %s document for %s'
  682. %(p, which))
  683. for v in reversed(h['values']):
  684. if (this_ts <= newest):
  685. agt1 = now - this_ts
  686. agt2 = interval
  687. agetmp1 = (agt1.microseconds + (agt1.seconds + agt1.days * 24 * 3600)
  688. * 10**6) / 10**6
  689. agetmp2 = (agt2.microseconds + (agt2.seconds + agt2.days * 24 * 3600)
  690. * 10**6) / 10**6
  691. generic_history.append(
  692. { 'age': agetmp1,
  693. 'length': agetmp2,
  694. 'value': v
  695. })
  696. newest = this_ts
  697. this_ts -= interval
  698. if (this_ts + interval != parse_ts(h['first'])):
  699. logging.warning('Inconsistent time information in %s document for %s'
  700. %(p, which))
  701. #print json.dumps(generic_history, sort_keys=True,
  702. # indent=4, separators=(',', ': '))
  703. return generic_history
  704. @staticmethod
  705. def _avg_generic_history(generic_history):
  706. a = []
  707. for i in generic_history:
  708. if i['age'] > (ADDRESS_AND_PORT_STABLE_DAYS * 24 * 3600):
  709. continue
  710. if (i['length'] is not None
  711. and i['age'] is not None
  712. and i['value'] is not None):
  713. w = i['length'] * math.pow(AGE_ALPHA, i['age']/(3600*24))
  714. a.append( (i['value'] * w, w) )
  715. sv = math.fsum(map(lambda x: x[0], a))
  716. sw = math.fsum(map(lambda x: x[1], a))
  717. if sw == 0.0:
  718. svw = 0.0
  719. else:
  720. svw = sv/sw
  721. return svw
  722. def _add_generic_history(self, history):
  723. periods = r['read_history'].keys()
  724. periods.sort(key = lambda x: r['read_history'][x]['interval'] )
  725. print periods
  726. def add_running_history(self, history):
  727. pass
  728. def add_uptime(self, uptime):
  729. logging.debug('Adding uptime %s.'%(self._fpr,))
  730. # flags we care about: Running, V2Dir, Guard
  731. if not 'flags' in uptime:
  732. logging.debug('No flags in document for %s.'%(self._fpr,))
  733. return
  734. for f in ['Running', 'Guard', 'V2Dir']:
  735. if not f in uptime['flags']:
  736. logging.debug('No %s in flags for %s.'%(f, self._fpr,))
  737. return
  738. running = self._extract_generic_history(uptime['flags']['Running'],
  739. '%s-Running'%(self._fpr))
  740. guard = self._extract_generic_history(uptime['flags']['Guard'],
  741. '%s-Guard'%(self._fpr))
  742. v2dir = self._extract_generic_history(uptime['flags']['V2Dir'],
  743. '%s-V2Dir'%(self._fpr))
  744. if 'BadExit' in uptime['flags']:
  745. badexit = self._extract_generic_history(uptime['flags']['BadExit'],
  746. '%s-BadExit'%(self._fpr))
  747. self._running = self._avg_generic_history(running) / ONIONOO_SCALE_ONE
  748. self._guard = self._avg_generic_history(guard) / ONIONOO_SCALE_ONE
  749. self._v2dir = self._avg_generic_history(v2dir) / ONIONOO_SCALE_ONE
  750. self._badexit = None
  751. if 'BadExit' in uptime['flags']:
  752. self._badexit = self._avg_generic_history(badexit) / ONIONOO_SCALE_ONE
  753. def is_candidate(self):
  754. try:
  755. if (MUST_BE_RUNNING_NOW and not self.is_running()):
  756. log_excluded('%s not a candidate: not running now, unable to check ' +
  757. 'DirPort consensus download', self._fpr)
  758. return False
  759. if (self._data['last_changed_address_or_port'] >
  760. self.CUTOFF_ADDRESS_AND_PORT_STABLE):
  761. log_excluded('%s not a candidate: changed address/port recently (%s)',
  762. self._fpr, self._data['last_changed_address_or_port'])
  763. return False
  764. if self._running < CUTOFF_RUNNING:
  765. log_excluded('%s not a candidate: running avg too low (%lf)',
  766. self._fpr, self._running)
  767. return False
  768. if self._v2dir < CUTOFF_V2DIR:
  769. log_excluded('%s not a candidate: v2dir avg too low (%lf)',
  770. self._fpr, self._v2dir)
  771. return False
  772. if self._badexit is not None and self._badexit > PERMITTED_BADEXIT:
  773. log_excluded('%s not a candidate: badexit avg too high (%lf)',
  774. self._fpr, self._badexit)
  775. return False
  776. # this function logs a message depending on which check fails
  777. if not self.is_valid_version():
  778. return False
  779. if self._guard < CUTOFF_GUARD:
  780. log_excluded('%s not a candidate: guard avg too low (%lf)',
  781. self._fpr, self._guard)
  782. return False
  783. if (not self._data.has_key('consensus_weight')
  784. or self._data['consensus_weight'] < 1):
  785. log_excluded('%s not a candidate: consensus weight invalid', self._fpr)
  786. return False
  787. except BaseException as e:
  788. logging.warning("Exception %s when checking if fallback is a candidate",
  789. str(e))
  790. return False
  791. return True
  792. def is_in_whitelist(self, relaylist):
  793. """ A fallback matches if each key in the whitelist line matches:
  794. ipv4
  795. dirport
  796. orport
  797. id
  798. ipv6 address and port (if present)
  799. If the fallback has an ipv6 key, the whitelist line must also have
  800. it, and vice versa, otherwise they don't match. """
  801. ipv6 = None
  802. if self.has_ipv6():
  803. ipv6 = '%s:%d'%(self.ipv6addr, self.ipv6orport)
  804. for entry in relaylist:
  805. if entry['id'] != self._fpr:
  806. # can't log here unless we match an IP and port, because every relay's
  807. # fingerprint is compared to every entry's fingerprint
  808. if entry['ipv4'] == self.dirip and int(entry['orport']) == self.orport:
  809. logging.warning('%s excluded: has OR %s:%d changed fingerprint to ' +
  810. '%s?', entry['id'], self.dirip, self.orport,
  811. self._fpr)
  812. if self.has_ipv6() and entry.has_key('ipv6') and entry['ipv6'] == ipv6:
  813. logging.warning('%s excluded: has OR %s changed fingerprint to ' +
  814. '%s?', entry['id'], ipv6, self._fpr)
  815. continue
  816. if entry['ipv4'] != self.dirip:
  817. logging.warning('%s excluded: has it changed IPv4 from %s to %s?',
  818. self._fpr, entry['ipv4'], self.dirip)
  819. continue
  820. if int(entry['dirport']) != self.dirport:
  821. logging.warning('%s excluded: has it changed DirPort from %s:%d to ' +
  822. '%s:%d?', self._fpr, self.dirip, int(entry['dirport']),
  823. self.dirip, self.dirport)
  824. continue
  825. if int(entry['orport']) != self.orport:
  826. logging.warning('%s excluded: has it changed ORPort from %s:%d to ' +
  827. '%s:%d?', self._fpr, self.dirip, int(entry['orport']),
  828. self.dirip, self.orport)
  829. continue
  830. if entry.has_key('ipv6') and self.has_ipv6():
  831. # if both entry and fallback have an ipv6 address, compare them
  832. if entry['ipv6'] != ipv6:
  833. logging.warning('%s excluded: has it changed IPv6 ORPort from %s ' +
  834. 'to %s?', self._fpr, entry['ipv6'], ipv6)
  835. continue
  836. # if the fallback has an IPv6 address but the whitelist entry
  837. # doesn't, or vice versa, the whitelist entry doesn't match
  838. elif entry.has_key('ipv6') and not self.has_ipv6():
  839. logging.warning('%s excluded: has it lost its former IPv6 address %s?',
  840. self._fpr, entry['ipv6'])
  841. continue
  842. elif not entry.has_key('ipv6') and self.has_ipv6():
  843. logging.warning('%s excluded: has it gained an IPv6 address %s?',
  844. self._fpr, ipv6)
  845. continue
  846. return True
  847. return False
  848. def is_in_blacklist(self, relaylist):
  849. """ A fallback matches a blacklist line if a sufficiently specific group
  850. of attributes matches:
  851. ipv4 & dirport
  852. ipv4 & orport
  853. id
  854. ipv6 & dirport
  855. ipv6 & ipv6 orport
  856. If the fallback and the blacklist line both have an ipv6 key,
  857. their values will be compared, otherwise, they will be ignored.
  858. If there is no dirport and no orport, the entry matches all relays on
  859. that ip. """
  860. for entry in relaylist:
  861. for key in entry:
  862. value = entry[key]
  863. if key == 'id' and value == self._fpr:
  864. log_excluded('%s is in the blacklist: fingerprint matches',
  865. self._fpr)
  866. return True
  867. if key == 'ipv4' and value == self.dirip:
  868. # if the dirport is present, check it too
  869. if entry.has_key('dirport'):
  870. if int(entry['dirport']) == self.dirport:
  871. log_excluded('%s is in the blacklist: IPv4 (%s) and ' +
  872. 'DirPort (%d) match', self._fpr, self.dirip,
  873. self.dirport)
  874. return True
  875. # if the orport is present, check it too
  876. elif entry.has_key('orport'):
  877. if int(entry['orport']) == self.orport:
  878. log_excluded('%s is in the blacklist: IPv4 (%s) and ' +
  879. 'ORPort (%d) match', self._fpr, self.dirip,
  880. self.orport)
  881. return True
  882. else:
  883. log_excluded('%s is in the blacklist: IPv4 (%s) matches, and ' +
  884. 'entry has no DirPort or ORPort', self._fpr,
  885. self.dirip)
  886. return True
  887. ipv6 = None
  888. if self.has_ipv6():
  889. ipv6 = '%s:%d'%(self.ipv6addr, self.ipv6orport)
  890. if (key == 'ipv6' and self.has_ipv6()):
  891. # if both entry and fallback have an ipv6 address, compare them,
  892. # otherwise, disregard ipv6 addresses
  893. if value == ipv6:
  894. # if the dirport is present, check it too
  895. if entry.has_key('dirport'):
  896. if int(entry['dirport']) == self.dirport:
  897. log_excluded('%s is in the blacklist: IPv6 (%s) and ' +
  898. 'DirPort (%d) match', self._fpr, ipv6,
  899. self.dirport)
  900. return True
  901. # we've already checked the ORPort, it's part of entry['ipv6']
  902. else:
  903. log_excluded('%s is in the blacklist: IPv6 (%s) matches, and' +
  904. 'entry has no DirPort', self._fpr, ipv6)
  905. return True
  906. elif (key == 'ipv6' or self.has_ipv6()):
  907. # only log if the fingerprint matches but the IPv6 doesn't
  908. if entry.has_key('id') and entry['id'] == self._fpr:
  909. log_excluded('%s skipping IPv6 blacklist comparison: relay ' +
  910. 'has%s IPv6%s, but entry has%s IPv6%s', self._fpr,
  911. '' if self.has_ipv6() else ' no',
  912. (' (' + ipv6 + ')') if self.has_ipv6() else '',
  913. '' if key == 'ipv6' else ' no',
  914. (' (' + value + ')') if key == 'ipv6' else '')
  915. logging.warning('Has %s %s IPv6 address %s?', self._fpr,
  916. 'gained an' if self.has_ipv6() else 'lost its former',
  917. ipv6 if self.has_ipv6() else value)
  918. return False
  919. def cw_to_bw_factor(self):
  920. # any relays with a missing or zero consensus weight are not candidates
  921. # any relays with a missing advertised bandwidth have it set to zero
  922. return self._data['advertised_bandwidth'] / self._data['consensus_weight']
  923. # since advertised_bandwidth is reported by the relay, it can be gamed
  924. # to avoid this, use the median consensus weight to bandwidth factor to
  925. # estimate this relay's measured bandwidth, and make that the upper limit
  926. def measured_bandwidth(self, median_cw_to_bw_factor):
  927. cw_to_bw= median_cw_to_bw_factor
  928. # Reduce exit bandwidth to make sure we're not overloading them
  929. if self.is_exit():
  930. cw_to_bw *= EXIT_BANDWIDTH_FRACTION
  931. measured_bandwidth = self._data['consensus_weight'] * cw_to_bw
  932. if self._data['advertised_bandwidth'] != 0:
  933. # limit advertised bandwidth (if available) to measured bandwidth
  934. return min(measured_bandwidth, self._data['advertised_bandwidth'])
  935. else:
  936. return measured_bandwidth
  937. def set_measured_bandwidth(self, median_cw_to_bw_factor):
  938. self._data['measured_bandwidth'] = self.measured_bandwidth(
  939. median_cw_to_bw_factor)
  940. def is_exit(self):
  941. return 'Exit' in self._data['flags']
  942. def is_guard(self):
  943. return 'Guard' in self._data['flags']
  944. def is_running(self):
  945. return 'Running' in self._data['flags']
  946. # does this fallback have an IPv6 address and orport?
  947. def has_ipv6(self):
  948. return self.ipv6addr is not None and self.ipv6orport is not None
  949. # strip leading and trailing brackets from an IPv6 address
  950. # safe to use on non-bracketed IPv6 and on IPv4 addresses
  951. # also convert to unicode, and make None appear as ''
  952. @staticmethod
  953. def strip_ipv6_brackets(ip):
  954. if ip is None:
  955. return unicode('')
  956. if len(ip) < 2:
  957. return unicode(ip)
  958. if ip[0] == '[' and ip[-1] == ']':
  959. return unicode(ip[1:-1])
  960. return unicode(ip)
  961. # are ip_a and ip_b in the same netblock?
  962. # mask_bits is the size of the netblock
  963. # takes both IPv4 and IPv6 addresses
  964. # the versions of ip_a and ip_b must be the same
  965. # the mask must be valid for the IP version
  966. @staticmethod
  967. def netblocks_equal(ip_a, ip_b, mask_bits):
  968. if ip_a is None or ip_b is None:
  969. return False
  970. ip_a = Candidate.strip_ipv6_brackets(ip_a)
  971. ip_b = Candidate.strip_ipv6_brackets(ip_b)
  972. a = ipaddress.ip_address(ip_a)
  973. b = ipaddress.ip_address(ip_b)
  974. if a.version != b.version:
  975. raise Exception('Mismatching IP versions in %s and %s'%(ip_a, ip_b))
  976. if mask_bits > a.max_prefixlen:
  977. logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b))
  978. mask_bits = a.max_prefixlen
  979. if mask_bits < 0:
  980. logging.error('Bad IP mask %d for %s and %s'%(mask_bits, ip_a, ip_b))
  981. mask_bits = 0
  982. a_net = ipaddress.ip_network('%s/%d'%(ip_a, mask_bits), strict=False)
  983. return b in a_net
  984. # is this fallback's IPv4 address (dirip) in the same netblock as other's
  985. # IPv4 address?
  986. # mask_bits is the size of the netblock
  987. def ipv4_netblocks_equal(self, other, mask_bits):
  988. return Candidate.netblocks_equal(self.dirip, other.dirip, mask_bits)
  989. # is this fallback's IPv6 address (ipv6addr) in the same netblock as
  990. # other's IPv6 address?
  991. # Returns False if either fallback has no IPv6 address
  992. # mask_bits is the size of the netblock
  993. def ipv6_netblocks_equal(self, other, mask_bits):
  994. if not self.has_ipv6() or not other.has_ipv6():
  995. return False
  996. return Candidate.netblocks_equal(self.ipv6addr, other.ipv6addr, mask_bits)
  997. # is this fallback's IPv4 DirPort the same as other's IPv4 DirPort?
  998. def dirport_equal(self, other):
  999. return self.dirport == other.dirport
  1000. # is this fallback's IPv4 ORPort the same as other's IPv4 ORPort?
  1001. def ipv4_orport_equal(self, other):
  1002. return self.orport == other.orport
  1003. # is this fallback's IPv6 ORPort the same as other's IPv6 ORPort?
  1004. # Returns False if either fallback has no IPv6 address
  1005. def ipv6_orport_equal(self, other):
  1006. if not self.has_ipv6() or not other.has_ipv6():
  1007. return False
  1008. return self.ipv6orport == other.ipv6orport
  1009. # does this fallback have the same DirPort, IPv4 ORPort, or
  1010. # IPv6 ORPort as other?
  1011. # Ignores IPv6 ORPort if either fallback has no IPv6 address
  1012. def port_equal(self, other):
  1013. return (self.dirport_equal(other) or self.ipv4_orport_equal(other)
  1014. or self.ipv6_orport_equal(other))
  1015. # return a list containing IPv4 ORPort, DirPort, and IPv6 ORPort (if present)
  1016. def port_list(self):
  1017. ports = [self.dirport, self.orport]
  1018. if self.has_ipv6() and not self.ipv6orport in ports:
  1019. ports.append(self.ipv6orport)
  1020. return ports
  1021. # does this fallback share a port with other, regardless of whether the
  1022. # port types match?
  1023. # For example, if self's IPv4 ORPort is 80 and other's DirPort is 80,
  1024. # return True
  1025. def port_shared(self, other):
  1026. for p in self.port_list():
  1027. if p in other.port_list():
  1028. return True
  1029. return False
  1030. # log how long it takes to download a consensus from dirip:dirport
  1031. # returns True if the download failed, False if it succeeded within max_time
  1032. @staticmethod
  1033. def fallback_consensus_download_speed(dirip, dirport, nickname, fingerprint,
  1034. max_time):
  1035. download_failed = False
  1036. # some directory mirrors respond to requests in ways that hang python
  1037. # sockets, which is why we log this line here
  1038. logging.info('Initiating %sconsensus download from %s (%s:%d) %s.',
  1039. 'microdesc ' if DOWNLOAD_MICRODESC_CONSENSUS else '',
  1040. nickname, dirip, dirport, fingerprint)
  1041. # there appears to be about 1 second of overhead when comparing stem's
  1042. # internal trace time and the elapsed time calculated here
  1043. TIMEOUT_SLOP = 1.0
  1044. start = datetime.datetime.utcnow()
  1045. try:
  1046. consensus = get_consensus(
  1047. endpoints = [(dirip, dirport)],
  1048. timeout = (max_time + TIMEOUT_SLOP),
  1049. validate = True,
  1050. retries = 0,
  1051. fall_back_to_authority = False,
  1052. document_handler = DocumentHandler.BARE_DOCUMENT,
  1053. microdescriptor = DOWNLOAD_MICRODESC_CONSENSUS
  1054. ).run()[0]
  1055. end = datetime.datetime.utcnow()
  1056. time_since_expiry = (end - consensus.valid_until).total_seconds()
  1057. except Exception, stem_error:
  1058. end = datetime.datetime.utcnow()
  1059. log_excluded('Unable to retrieve a consensus from %s: %s', nickname,
  1060. stem_error)
  1061. status = 'error: "%s"' % (stem_error)
  1062. level = logging.WARNING
  1063. download_failed = True
  1064. elapsed = (end - start).total_seconds()
  1065. if download_failed:
  1066. # keep the error failure status, and avoid using the variables
  1067. pass
  1068. elif elapsed > max_time:
  1069. status = 'too slow'
  1070. level = logging.WARNING
  1071. download_failed = True
  1072. elif (time_since_expiry > 0):
  1073. status = 'outdated consensus, expired %ds ago'%(int(time_since_expiry))
  1074. if time_since_expiry <= CONSENSUS_EXPIRY_TOLERANCE:
  1075. status += ', tolerating up to %ds'%(CONSENSUS_EXPIRY_TOLERANCE)
  1076. level = logging.INFO
  1077. else:
  1078. status += ', invalid'
  1079. level = logging.WARNING
  1080. download_failed = True
  1081. else:
  1082. status = 'ok'
  1083. level = logging.DEBUG
  1084. logging.log(level, 'Consensus download: %0.1fs %s from %s (%s:%d) %s, ' +
  1085. 'max download time %0.1fs.', elapsed, status, nickname,
  1086. dirip, dirport, fingerprint, max_time)
  1087. return download_failed
  1088. # does this fallback download the consensus fast enough?
  1089. def check_fallback_download_consensus(self):
  1090. # include the relay if we're not doing a check, or we can't check (IPv6)
  1091. ipv4_failed = False
  1092. ipv6_failed = False
  1093. if PERFORM_IPV4_DIRPORT_CHECKS:
  1094. ipv4_failed = Candidate.fallback_consensus_download_speed(self.dirip,
  1095. self.dirport,
  1096. self._data['nickname'],
  1097. self._fpr,
  1098. CONSENSUS_DOWNLOAD_SPEED_MAX)
  1099. if self.has_ipv6() and PERFORM_IPV6_DIRPORT_CHECKS:
  1100. # Clients assume the IPv6 DirPort is the same as the IPv4 DirPort
  1101. ipv6_failed = Candidate.fallback_consensus_download_speed(self.ipv6addr,
  1102. self.dirport,
  1103. self._data['nickname'],
  1104. self._fpr,
  1105. CONSENSUS_DOWNLOAD_SPEED_MAX)
  1106. return ((not ipv4_failed) and (not ipv6_failed))
  1107. # if this fallback has not passed a download check, try it again,
  1108. # and record the result, available in get_fallback_download_consensus
  1109. def try_fallback_download_consensus(self):
  1110. if not self.get_fallback_download_consensus():
  1111. self._data['download_check'] = self.check_fallback_download_consensus()
  1112. # did this fallback pass the download check?
  1113. def get_fallback_download_consensus(self):
  1114. # if we're not performing checks, return True
  1115. if not PERFORM_IPV4_DIRPORT_CHECKS and not PERFORM_IPV6_DIRPORT_CHECKS:
  1116. return True
  1117. # if we are performing checks, but haven't done one, return False
  1118. if not self._data.has_key('download_check'):
  1119. return False
  1120. return self._data['download_check']
  1121. # output an optional header comment and info for this fallback
  1122. # try_fallback_download_consensus before calling this
  1123. def fallbackdir_line(self, fallbacks, prefilter_fallbacks):
  1124. s = ''
  1125. if OUTPUT_COMMENTS:
  1126. s += self.fallbackdir_comment(fallbacks, prefilter_fallbacks)
  1127. # if the download speed is ok, output a C string
  1128. # if it's not, but we OUTPUT_COMMENTS, output a commented-out C string
  1129. if self.get_fallback_download_consensus() or OUTPUT_COMMENTS:
  1130. s += self.fallbackdir_info(self.get_fallback_download_consensus())
  1131. return s
  1132. # output a header comment for this fallback
  1133. def fallbackdir_comment(self, fallbacks, prefilter_fallbacks):
  1134. # /*
  1135. # nickname
  1136. # flags
  1137. # adjusted bandwidth, consensus weight
  1138. # [contact]
  1139. # [identical contact counts]
  1140. # */
  1141. # Multiline C comment
  1142. s = '/*'
  1143. s += '\n'
  1144. s += cleanse_c_multiline_comment(self._data['nickname'])
  1145. s += '\n'
  1146. s += 'Flags: '
  1147. s += cleanse_c_multiline_comment(' '.join(sorted(self._data['flags'])))
  1148. s += '\n'
  1149. # this is an adjusted bandwidth, see calculate_measured_bandwidth()
  1150. bandwidth = self._data['measured_bandwidth']
  1151. weight = self._data['consensus_weight']
  1152. s += 'Bandwidth: %.1f MByte/s, Consensus Weight: %d'%(
  1153. bandwidth/(1024.0*1024.0),
  1154. weight)
  1155. s += '\n'
  1156. if self._data['contact'] is not None:
  1157. s += cleanse_c_multiline_comment(self._data['contact'])
  1158. if CONTACT_COUNT or CONTACT_BLACKLIST_COUNT:
  1159. fallback_count = len([f for f in fallbacks
  1160. if f._data['contact'] == self._data['contact']])
  1161. if fallback_count > 1:
  1162. s += '\n'
  1163. s += '%d identical contacts listed' % (fallback_count)
  1164. if CONTACT_BLACKLIST_COUNT:
  1165. prefilter_count = len([f for f in prefilter_fallbacks
  1166. if f._data['contact'] == self._data['contact']])
  1167. filter_count = prefilter_count - fallback_count
  1168. if filter_count > 0:
  1169. if fallback_count > 1:
  1170. s += ' '
  1171. else:
  1172. s += '\n'
  1173. s += '%d blacklisted' % (filter_count)
  1174. s += '\n'
  1175. s += '*/'
  1176. s += '\n'
  1177. return s
  1178. # output the fallback info C string for this fallback
  1179. # this is the text that would go after FallbackDir in a torrc
  1180. # if this relay failed the download test and we OUTPUT_COMMENTS,
  1181. # comment-out the returned string
  1182. def fallbackdir_info(self, dl_speed_ok):
  1183. # "address:dirport orport=port id=fingerprint"
  1184. # "[ipv6=addr:orport]"
  1185. # /* nickname=name */
  1186. # /* extrainfo={0,1} */
  1187. # ,
  1188. #
  1189. # Do we want a C string, or a commented-out string?
  1190. c_string = dl_speed_ok
  1191. comment_string = not dl_speed_ok and OUTPUT_COMMENTS
  1192. # If we don't want either kind of string, bail
  1193. if not c_string and not comment_string:
  1194. return ''
  1195. s = ''
  1196. # Comment out the fallback directory entry if it's too slow
  1197. # See the debug output for which address and port is failing
  1198. if comment_string:
  1199. s += '/* Consensus download failed or was too slow:\n'
  1200. # Multi-Line C string with trailing comma (part of a string list)
  1201. # This makes it easier to diff the file, and remove IPv6 lines using grep
  1202. # Integers don't need escaping
  1203. s += '"%s orport=%d id=%s"'%(
  1204. cleanse_c_string(self._data['dir_address']),
  1205. self.orport,
  1206. cleanse_c_string(self._fpr))
  1207. s += '\n'
  1208. if self.has_ipv6():
  1209. s += '" ipv6=%s:%d"'%(cleanse_c_string(self.ipv6addr), self.ipv6orport)
  1210. s += '\n'
  1211. if not comment_string:
  1212. s += '/* '
  1213. s += 'nickname=%s'%(cleanse_c_string(self._data['nickname']))
  1214. if not comment_string:
  1215. s += ' */'
  1216. s += '\n'
  1217. # if we know that the fallback is an extrainfo cache, flag it
  1218. # and if we don't know, assume it is not
  1219. if not comment_string:
  1220. s += '/* '
  1221. s += 'extrainfo=%d'%(1 if self._extra_info_cache else 0)
  1222. if not comment_string:
  1223. s += ' */'
  1224. s += '\n'
  1225. s += ','
  1226. if comment_string:
  1227. s += '\n'
  1228. s += '*/'
  1229. return s
  1230. ## Fallback Candidate List Class
  1231. class CandidateList(dict):
  1232. def __init__(self):
  1233. pass
  1234. def _add_relay(self, details):
  1235. if not 'dir_address' in details: return
  1236. c = Candidate(details)
  1237. self[ c.get_fingerprint() ] = c
  1238. def _add_uptime(self, uptime):
  1239. try:
  1240. fpr = uptime['fingerprint']
  1241. except KeyError:
  1242. raise Exception("Document has no fingerprint field.")
  1243. try:
  1244. c = self[fpr]
  1245. except KeyError:
  1246. logging.debug('Got unknown relay %s in uptime document.'%(fpr,))
  1247. return
  1248. c.add_uptime(uptime)
  1249. def _add_details(self):
  1250. logging.debug('Loading details document.')
  1251. d = fetch('details',
  1252. fields=('fingerprint,nickname,contact,last_changed_address_or_port,' +
  1253. 'consensus_weight,advertised_bandwidth,or_addresses,' +
  1254. 'dir_address,recommended_version,flags,effective_family,' +
  1255. 'platform'))
  1256. logging.debug('Loading details document done.')
  1257. if not 'relays' in d: raise Exception("No relays found in document.")
  1258. for r in d['relays']: self._add_relay(r)
  1259. def _add_uptimes(self):
  1260. logging.debug('Loading uptime document.')
  1261. d = fetch('uptime')
  1262. logging.debug('Loading uptime document done.')
  1263. if not 'relays' in d: raise Exception("No relays found in document.")
  1264. for r in d['relays']: self._add_uptime(r)
  1265. def add_relays(self):
  1266. self._add_details()
  1267. self._add_uptimes()
  1268. def count_guards(self):
  1269. guard_count = 0
  1270. for fpr in self.keys():
  1271. if self[fpr].is_guard():
  1272. guard_count += 1
  1273. return guard_count
  1274. # Find fallbacks that fit the uptime, stability, and flags criteria,
  1275. # and make an array of them in self.fallbacks
  1276. def compute_fallbacks(self):
  1277. self.fallbacks = map(lambda x: self[x],
  1278. filter(lambda x: self[x].is_candidate(),
  1279. self.keys()))
  1280. # sort fallbacks by their consensus weight to advertised bandwidth factor,
  1281. # lowest to highest
  1282. # used to find the median cw_to_bw_factor()
  1283. def sort_fallbacks_by_cw_to_bw_factor(self):
  1284. self.fallbacks.sort(key=lambda f: f.cw_to_bw_factor())
  1285. # sort fallbacks by their measured bandwidth, highest to lowest
  1286. # calculate_measured_bandwidth before calling this
  1287. # this is useful for reviewing candidates in priority order
  1288. def sort_fallbacks_by_measured_bandwidth(self):
  1289. self.fallbacks.sort(key=lambda f: f._data['measured_bandwidth'],
  1290. reverse=True)
  1291. # sort fallbacks by the data field data_field, lowest to highest
  1292. def sort_fallbacks_by(self, data_field):
  1293. self.fallbacks.sort(key=lambda f: f._data[data_field])
  1294. @staticmethod
  1295. def load_relaylist(file_obj):
  1296. """ Read each line in the file, and parse it like a FallbackDir line:
  1297. an IPv4 address and optional port:
  1298. <IPv4 address>:<port>
  1299. which are parsed into dictionary entries:
  1300. ipv4=<IPv4 address>
  1301. dirport=<port>
  1302. followed by a series of key=value entries:
  1303. orport=<port>
  1304. id=<fingerprint>
  1305. ipv6=<IPv6 address>:<IPv6 orport>
  1306. each line's key/value pairs are placed in a dictonary,
  1307. (of string -> string key/value pairs),
  1308. and these dictionaries are placed in an array.
  1309. comments start with # and are ignored """
  1310. file_data = file_obj['data']
  1311. file_name = file_obj['name']
  1312. relaylist = []
  1313. if file_data is None:
  1314. return relaylist
  1315. for line in file_data.split('\n'):
  1316. relay_entry = {}
  1317. # ignore comments
  1318. line_comment_split = line.split('#')
  1319. line = line_comment_split[0]
  1320. # cleanup whitespace
  1321. line = cleanse_whitespace(line)
  1322. line = line.strip()
  1323. if len(line) == 0:
  1324. continue
  1325. for item in line.split(' '):
  1326. item = item.strip()
  1327. if len(item) == 0:
  1328. continue
  1329. key_value_split = item.split('=')
  1330. kvl = len(key_value_split)
  1331. if kvl < 1 or kvl > 2:
  1332. print '#error Bad %s item: %s, format is key=value.'%(
  1333. file_name, item)
  1334. if kvl == 1:
  1335. # assume that entries without a key are the ipv4 address,
  1336. # perhaps with a dirport
  1337. ipv4_maybe_dirport = key_value_split[0]
  1338. ipv4_maybe_dirport_split = ipv4_maybe_dirport.split(':')
  1339. dirl = len(ipv4_maybe_dirport_split)
  1340. if dirl < 1 or dirl > 2:
  1341. print '#error Bad %s IPv4 item: %s, format is ipv4:port.'%(
  1342. file_name, item)
  1343. if dirl >= 1:
  1344. relay_entry['ipv4'] = ipv4_maybe_dirport_split[0]
  1345. if dirl == 2:
  1346. relay_entry['dirport'] = ipv4_maybe_dirport_split[1]
  1347. elif kvl == 2:
  1348. relay_entry[key_value_split[0]] = key_value_split[1]
  1349. relaylist.append(relay_entry)
  1350. return relaylist
  1351. # apply the fallback whitelist and blacklist
  1352. def apply_filter_lists(self, whitelist_obj, blacklist_obj):
  1353. excluded_count = 0
  1354. logging.debug('Applying whitelist and blacklist.')
  1355. # parse the whitelist and blacklist
  1356. whitelist = self.load_relaylist(whitelist_obj)
  1357. blacklist = self.load_relaylist(blacklist_obj)
  1358. filtered_fallbacks = []
  1359. for f in self.fallbacks:
  1360. in_whitelist = f.is_in_whitelist(whitelist)
  1361. in_blacklist = f.is_in_blacklist(blacklist)
  1362. if in_whitelist and in_blacklist:
  1363. if BLACKLIST_EXCLUDES_WHITELIST_ENTRIES:
  1364. # exclude
  1365. excluded_count += 1
  1366. logging.warning('Excluding %s: in both blacklist and whitelist.',
  1367. f._fpr)
  1368. else:
  1369. # include
  1370. filtered_fallbacks.append(f)
  1371. elif in_whitelist:
  1372. # include
  1373. filtered_fallbacks.append(f)
  1374. elif in_blacklist:
  1375. # exclude
  1376. excluded_count += 1
  1377. log_excluded('Excluding %s: in blacklist.', f._fpr)
  1378. else:
  1379. if INCLUDE_UNLISTED_ENTRIES:
  1380. # include
  1381. filtered_fallbacks.append(f)
  1382. else:
  1383. # exclude
  1384. excluded_count += 1
  1385. log_excluded('Excluding %s: in neither blacklist nor whitelist.',
  1386. f._fpr)
  1387. self.fallbacks = filtered_fallbacks
  1388. return excluded_count
  1389. @staticmethod
  1390. def summarise_filters(initial_count, excluded_count):
  1391. return '/* Whitelist & blacklist excluded %d of %d candidates. */'%(
  1392. excluded_count, initial_count)
  1393. # calculate each fallback's measured bandwidth based on the median
  1394. # consensus weight to advertised bandwdith ratio
  1395. def calculate_measured_bandwidth(self):
  1396. self.sort_fallbacks_by_cw_to_bw_factor()
  1397. median_fallback = self.fallback_median(True)
  1398. if median_fallback is not None:
  1399. median_cw_to_bw_factor = median_fallback.cw_to_bw_factor()
  1400. else:
  1401. # this will never be used, because there are no fallbacks
  1402. median_cw_to_bw_factor = None
  1403. for f in self.fallbacks:
  1404. f.set_measured_bandwidth(median_cw_to_bw_factor)
  1405. # remove relays with low measured bandwidth from the fallback list
  1406. # calculate_measured_bandwidth for each relay before calling this
  1407. def remove_low_bandwidth_relays(self):
  1408. if MIN_BANDWIDTH is None:
  1409. return
  1410. above_min_bw_fallbacks = []
  1411. for f in self.fallbacks:
  1412. if f._data['measured_bandwidth'] >= MIN_BANDWIDTH:
  1413. above_min_bw_fallbacks.append(f)
  1414. else:
  1415. # the bandwidth we log here is limited by the relay's consensus weight
  1416. # as well as its adverttised bandwidth. See set_measured_bandwidth
  1417. # for details
  1418. log_excluded('%s not a candidate: bandwidth %.1fMByte/s too low, ' +
  1419. 'must be at least %.1fMByte/s', f._fpr,
  1420. f._data['measured_bandwidth']/(1024.0*1024.0),
  1421. MIN_BANDWIDTH/(1024.0*1024.0))
  1422. self.fallbacks = above_min_bw_fallbacks
  1423. # the minimum fallback in the list
  1424. # call one of the sort_fallbacks_* functions before calling this
  1425. def fallback_min(self):
  1426. if len(self.fallbacks) > 0:
  1427. return self.fallbacks[-1]
  1428. else:
  1429. return None
  1430. # the median fallback in the list
  1431. # call one of the sort_fallbacks_* functions before calling this
  1432. def fallback_median(self, require_advertised_bandwidth):
  1433. # use the low-median when there are an evan number of fallbacks,
  1434. # for consistency with the bandwidth authorities
  1435. if len(self.fallbacks) > 0:
  1436. median_position = (len(self.fallbacks) - 1) / 2
  1437. if not require_advertised_bandwidth:
  1438. return self.fallbacks[median_position]
  1439. # if we need advertised_bandwidth but this relay doesn't have it,
  1440. # move to a fallback with greater consensus weight until we find one
  1441. while not self.fallbacks[median_position]._data['advertised_bandwidth']:
  1442. median_position += 1
  1443. if median_position >= len(self.fallbacks):
  1444. return None
  1445. return self.fallbacks[median_position]
  1446. else:
  1447. return None
  1448. # the maximum fallback in the list
  1449. # call one of the sort_fallbacks_* functions before calling this
  1450. def fallback_max(self):
  1451. if len(self.fallbacks) > 0:
  1452. return self.fallbacks[0]
  1453. else:
  1454. return None
  1455. # return a new bag suitable for storing attributes
  1456. @staticmethod
  1457. def attribute_new():
  1458. return dict()
  1459. # get the count of attribute in attribute_bag
  1460. # if attribute is None or the empty string, return 0
  1461. @staticmethod
  1462. def attribute_count(attribute, attribute_bag):
  1463. if attribute is None or attribute == '':
  1464. return 0
  1465. if attribute not in attribute_bag:
  1466. return 0
  1467. return attribute_bag[attribute]
  1468. # does attribute_bag contain more than max_count instances of attribute?
  1469. # if so, return False
  1470. # if not, return True
  1471. # if attribute is None or the empty string, or max_count is invalid,
  1472. # always return True
  1473. @staticmethod
  1474. def attribute_allow(attribute, attribute_bag, max_count=1):
  1475. if attribute is None or attribute == '' or max_count <= 0:
  1476. return True
  1477. elif CandidateList.attribute_count(attribute, attribute_bag) >= max_count:
  1478. return False
  1479. else:
  1480. return True
  1481. # add attribute to attribute_bag, incrementing the count if it is already
  1482. # present
  1483. # if attribute is None or the empty string, or count is invalid,
  1484. # do nothing
  1485. @staticmethod
  1486. def attribute_add(attribute, attribute_bag, count=1):
  1487. if attribute is None or attribute == '' or count <= 0:
  1488. pass
  1489. attribute_bag.setdefault(attribute, 0)
  1490. attribute_bag[attribute] += count
  1491. # make sure there are only MAX_FALLBACKS_PER_IP fallbacks per IPv4 address,
  1492. # and per IPv6 address
  1493. # there is only one IPv4 address on each fallback: the IPv4 DirPort address
  1494. # (we choose the IPv4 ORPort which is on the same IPv4 as the DirPort)
  1495. # there is at most one IPv6 address on each fallback: the IPv6 ORPort address
  1496. # we try to match the IPv4 ORPort, but will use any IPv6 address if needed
  1497. # (clients only use the IPv6 ORPort)
  1498. # if there is no IPv6 address, only the IPv4 address is checked
  1499. # return the number of candidates we excluded
  1500. def limit_fallbacks_same_ip(self):
  1501. ip_limit_fallbacks = []
  1502. ip_list = CandidateList.attribute_new()
  1503. for f in self.fallbacks:
  1504. if (CandidateList.attribute_allow(f.dirip, ip_list,
  1505. MAX_FALLBACKS_PER_IPV4)
  1506. and CandidateList.attribute_allow(f.ipv6addr, ip_list,
  1507. MAX_FALLBACKS_PER_IPV6)):
  1508. ip_limit_fallbacks.append(f)
  1509. CandidateList.attribute_add(f.dirip, ip_list)
  1510. if f.has_ipv6():
  1511. CandidateList.attribute_add(f.ipv6addr, ip_list)
  1512. elif not CandidateList.attribute_allow(f.dirip, ip_list,
  1513. MAX_FALLBACKS_PER_IPV4):
  1514. log_excluded('Eliminated %s: already have %d fallback(s) on IPv4 %s'
  1515. %(f._fpr, CandidateList.attribute_count(f.dirip, ip_list),
  1516. f.dirip))
  1517. elif (f.has_ipv6() and
  1518. not CandidateList.attribute_allow(f.ipv6addr, ip_list,
  1519. MAX_FALLBACKS_PER_IPV6)):
  1520. log_excluded('Eliminated %s: already have %d fallback(s) on IPv6 %s'
  1521. %(f._fpr, CandidateList.attribute_count(f.ipv6addr,
  1522. ip_list),
  1523. f.ipv6addr))
  1524. original_count = len(self.fallbacks)
  1525. self.fallbacks = ip_limit_fallbacks
  1526. return original_count - len(self.fallbacks)
  1527. # make sure there are only MAX_FALLBACKS_PER_CONTACT fallbacks for each
  1528. # ContactInfo
  1529. # if there is no ContactInfo, allow the fallback
  1530. # this check can be gamed by providing no ContactInfo, or by setting the
  1531. # ContactInfo to match another fallback
  1532. # However, given the likelihood that relays with the same ContactInfo will
  1533. # go down at similar times, its usefulness outweighs the risk
  1534. def limit_fallbacks_same_contact(self):
  1535. contact_limit_fallbacks = []
  1536. contact_list = CandidateList.attribute_new()
  1537. for f in self.fallbacks:
  1538. if CandidateList.attribute_allow(f._data['contact'], contact_list,
  1539. MAX_FALLBACKS_PER_CONTACT):
  1540. contact_limit_fallbacks.append(f)
  1541. CandidateList.attribute_add(f._data['contact'], contact_list)
  1542. else:
  1543. log_excluded(
  1544. 'Eliminated %s: already have %d fallback(s) on ContactInfo %s'
  1545. %(f._fpr, CandidateList.attribute_count(f._data['contact'],
  1546. contact_list),
  1547. f._data['contact']))
  1548. original_count = len(self.fallbacks)
  1549. self.fallbacks = contact_limit_fallbacks
  1550. return original_count - len(self.fallbacks)
  1551. # make sure there are only MAX_FALLBACKS_PER_FAMILY fallbacks per effective
  1552. # family
  1553. # if there is no family, allow the fallback
  1554. # we use effective family, which ensures mutual family declarations
  1555. # but the check can be gamed by not declaring a family at all
  1556. # if any indirect families exist, the result depends on the order in which
  1557. # fallbacks are sorted in the list
  1558. def limit_fallbacks_same_family(self):
  1559. family_limit_fallbacks = []
  1560. fingerprint_list = CandidateList.attribute_new()
  1561. for f in self.fallbacks:
  1562. if CandidateList.attribute_allow(f._fpr, fingerprint_list,
  1563. MAX_FALLBACKS_PER_FAMILY):
  1564. family_limit_fallbacks.append(f)
  1565. CandidateList.attribute_add(f._fpr, fingerprint_list)
  1566. for family_fingerprint in f._data['effective_family']:
  1567. CandidateList.attribute_add(family_fingerprint, fingerprint_list)
  1568. else:
  1569. # we already have a fallback with this fallback in its effective
  1570. # family
  1571. log_excluded(
  1572. 'Eliminated %s: already have %d fallback(s) in effective family'
  1573. %(f._fpr, CandidateList.attribute_count(f._fpr, fingerprint_list)))
  1574. original_count = len(self.fallbacks)
  1575. self.fallbacks = family_limit_fallbacks
  1576. return original_count - len(self.fallbacks)
  1577. # try once to get the descriptors for fingerprint_list using stem
  1578. # returns an empty list on exception
  1579. @staticmethod
  1580. def get_fallback_descriptors_once(fingerprint_list):
  1581. desc_list = get_server_descriptors(fingerprints=fingerprint_list).run(suppress=True)
  1582. return desc_list
  1583. # try up to max_retries times to get the descriptors for fingerprint_list
  1584. # using stem. Stops retrying when all descriptors have been retrieved.
  1585. # returns a list containing the descriptors that were retrieved
  1586. @staticmethod
  1587. def get_fallback_descriptors(fingerprint_list, max_retries=5):
  1588. # we can't use stem's retries=, because we want to support more than 96
  1589. # descriptors
  1590. #
  1591. # add an attempt for every MAX_FINGERPRINTS (or part thereof) in the list
  1592. max_retries += (len(fingerprint_list) + MAX_FINGERPRINTS - 1) / MAX_FINGERPRINTS
  1593. remaining_list = fingerprint_list
  1594. desc_list = []
  1595. for _ in xrange(max_retries):
  1596. if len(remaining_list) == 0:
  1597. break
  1598. new_desc_list = CandidateList.get_fallback_descriptors_once(remaining_list[0:MAX_FINGERPRINTS])
  1599. for d in new_desc_list:
  1600. try:
  1601. remaining_list.remove(d.fingerprint)
  1602. except ValueError:
  1603. # warn and ignore if a directory mirror returned a bad descriptor
  1604. logging.warning("Directory mirror returned unwanted descriptor %s, ignoring",
  1605. d.fingerprint)
  1606. continue
  1607. desc_list.append(d)
  1608. return desc_list
  1609. # find the fallbacks that cache extra-info documents
  1610. # Onionoo doesn't know this, so we have to use stem
  1611. def mark_extra_info_caches(self):
  1612. fingerprint_list = [ f._fpr for f in self.fallbacks ]
  1613. logging.info("Downloading fallback descriptors to find extra-info caches")
  1614. desc_list = CandidateList.get_fallback_descriptors(fingerprint_list)
  1615. for d in desc_list:
  1616. self[d.fingerprint]._extra_info_cache = d.extra_info_cache
  1617. missing_descriptor_list = [ f._fpr for f in self.fallbacks
  1618. if f._extra_info_cache is None ]
  1619. for f in missing_descriptor_list:
  1620. logging.warning("No descriptor for {}. Assuming extrainfo=0.".format(f))
  1621. # try a download check on each fallback candidate in order
  1622. # stop after max_count successful downloads
  1623. # but don't remove any candidates from the array
  1624. def try_download_consensus_checks(self, max_count):
  1625. dl_ok_count = 0
  1626. for f in self.fallbacks:
  1627. f.try_fallback_download_consensus()
  1628. if f.get_fallback_download_consensus():
  1629. # this fallback downloaded a consensus ok
  1630. dl_ok_count += 1
  1631. if dl_ok_count >= max_count:
  1632. # we have enough fallbacks
  1633. return
  1634. # put max_count successful candidates in the fallbacks array:
  1635. # - perform download checks on each fallback candidate
  1636. # - retry failed candidates if CONSENSUS_DOWNLOAD_RETRY is set
  1637. # - eliminate failed candidates
  1638. # - if there are more than max_count candidates, eliminate lowest bandwidth
  1639. # - if there are fewer than max_count candidates, leave only successful
  1640. # Return the number of fallbacks that failed the consensus check
  1641. def perform_download_consensus_checks(self, max_count):
  1642. self.sort_fallbacks_by_measured_bandwidth()
  1643. self.try_download_consensus_checks(max_count)
  1644. if CONSENSUS_DOWNLOAD_RETRY:
  1645. # try unsuccessful candidates again
  1646. # we could end up with more than max_count successful candidates here
  1647. self.try_download_consensus_checks(max_count)
  1648. # now we have at least max_count successful candidates,
  1649. # or we've tried them all
  1650. original_count = len(self.fallbacks)
  1651. self.fallbacks = filter(lambda x: x.get_fallback_download_consensus(),
  1652. self.fallbacks)
  1653. # some of these failed the check, others skipped the check,
  1654. # if we already had enough successful downloads
  1655. failed_count = original_count - len(self.fallbacks)
  1656. self.fallbacks = self.fallbacks[:max_count]
  1657. return failed_count
  1658. # return a string that describes a/b as a percentage
  1659. @staticmethod
  1660. def describe_percentage(a, b):
  1661. if b != 0:
  1662. return '%d/%d = %.0f%%'%(a, b, (a*100.0)/b)
  1663. else:
  1664. # technically, 0/0 is undefined, but 0.0% is a sensible result
  1665. return '%d/%d = %.0f%%'%(a, b, 0.0)
  1666. # return a dictionary of lists of fallbacks by IPv4 netblock
  1667. # the dictionary is keyed by the fingerprint of an arbitrary fallback
  1668. # in each netblock
  1669. # mask_bits is the size of the netblock
  1670. def fallbacks_by_ipv4_netblock(self, mask_bits):
  1671. netblocks = {}
  1672. for f in self.fallbacks:
  1673. found_netblock = False
  1674. for b in netblocks.keys():
  1675. # we found an existing netblock containing this fallback
  1676. if f.ipv4_netblocks_equal(self[b], mask_bits):
  1677. # add it to the list
  1678. netblocks[b].append(f)
  1679. found_netblock = True
  1680. break
  1681. # make a new netblock based on this fallback's fingerprint
  1682. if not found_netblock:
  1683. netblocks[f._fpr] = [f]
  1684. return netblocks
  1685. # return a dictionary of lists of fallbacks by IPv6 netblock
  1686. # where mask_bits is the size of the netblock
  1687. def fallbacks_by_ipv6_netblock(self, mask_bits):
  1688. netblocks = {}
  1689. for f in self.fallbacks:
  1690. # skip fallbacks without IPv6 addresses
  1691. if not f.has_ipv6():
  1692. continue
  1693. found_netblock = False
  1694. for b in netblocks.keys():
  1695. # we found an existing netblock containing this fallback
  1696. if f.ipv6_netblocks_equal(self[b], mask_bits):
  1697. # add it to the list
  1698. netblocks[b].append(f)
  1699. found_netblock = True
  1700. break
  1701. # make a new netblock based on this fallback's fingerprint
  1702. if not found_netblock:
  1703. netblocks[f._fpr] = [f]
  1704. return netblocks
  1705. # log a message about the proportion of fallbacks in each IPv4 netblock,
  1706. # where mask_bits is the size of the netblock
  1707. def describe_fallback_ipv4_netblock_mask(self, mask_bits):
  1708. fallback_count = len(self.fallbacks)
  1709. shared_netblock_fallback_count = 0
  1710. most_frequent_netblock = None
  1711. netblocks = self.fallbacks_by_ipv4_netblock(mask_bits)
  1712. for b in netblocks.keys():
  1713. if len(netblocks[b]) > 1:
  1714. # how many fallbacks are in a netblock with other fallbacks?
  1715. shared_netblock_fallback_count += len(netblocks[b])
  1716. # what's the netblock with the most fallbacks?
  1717. if (most_frequent_netblock is None
  1718. or len(netblocks[b]) > len(netblocks[most_frequent_netblock])):
  1719. most_frequent_netblock = b
  1720. logging.debug('Fallback IPv4 addresses in the same /%d:'%(mask_bits))
  1721. for f in netblocks[b]:
  1722. logging.debug('%s - %s', f.dirip, f._fpr)
  1723. if most_frequent_netblock is not None:
  1724. logging.warning('There are %s fallbacks in the IPv4 /%d containing %s'%(
  1725. CandidateList.describe_percentage(
  1726. len(netblocks[most_frequent_netblock]),
  1727. fallback_count),
  1728. mask_bits,
  1729. self[most_frequent_netblock].dirip))
  1730. if shared_netblock_fallback_count > 0:
  1731. logging.warning(('%s of fallbacks are in an IPv4 /%d with other ' +
  1732. 'fallbacks')%(CandidateList.describe_percentage(
  1733. shared_netblock_fallback_count,
  1734. fallback_count),
  1735. mask_bits))
  1736. # log a message about the proportion of fallbacks in each IPv6 netblock,
  1737. # where mask_bits is the size of the netblock
  1738. def describe_fallback_ipv6_netblock_mask(self, mask_bits):
  1739. fallback_count = len(self.fallbacks_with_ipv6())
  1740. shared_netblock_fallback_count = 0
  1741. most_frequent_netblock = None
  1742. netblocks = self.fallbacks_by_ipv6_netblock(mask_bits)
  1743. for b in netblocks.keys():
  1744. if len(netblocks[b]) > 1:
  1745. # how many fallbacks are in a netblock with other fallbacks?
  1746. shared_netblock_fallback_count += len(netblocks[b])
  1747. # what's the netblock with the most fallbacks?
  1748. if (most_frequent_netblock is None
  1749. or len(netblocks[b]) > len(netblocks[most_frequent_netblock])):
  1750. most_frequent_netblock = b
  1751. logging.debug('Fallback IPv6 addresses in the same /%d:'%(mask_bits))
  1752. for f in netblocks[b]:
  1753. logging.debug('%s - %s', f.ipv6addr, f._fpr)
  1754. if most_frequent_netblock is not None:
  1755. logging.warning('There are %s fallbacks in the IPv6 /%d containing %s'%(
  1756. CandidateList.describe_percentage(
  1757. len(netblocks[most_frequent_netblock]),
  1758. fallback_count),
  1759. mask_bits,
  1760. self[most_frequent_netblock].ipv6addr))
  1761. if shared_netblock_fallback_count > 0:
  1762. logging.warning(('%s of fallbacks are in an IPv6 /%d with other ' +
  1763. 'fallbacks')%(CandidateList.describe_percentage(
  1764. shared_netblock_fallback_count,
  1765. fallback_count),
  1766. mask_bits))
  1767. # log a message about the proportion of fallbacks in each IPv4 /8, /16,
  1768. # and /24
  1769. def describe_fallback_ipv4_netblocks(self):
  1770. # this doesn't actually tell us anything useful
  1771. #self.describe_fallback_ipv4_netblock_mask(8)
  1772. self.describe_fallback_ipv4_netblock_mask(16)
  1773. self.describe_fallback_ipv4_netblock_mask(24)
  1774. # log a message about the proportion of fallbacks in each IPv6 /12 (RIR),
  1775. # /23 (smaller RIR blocks), /32 (LIR), /48 (Customer), and /64 (Host)
  1776. # https://www.iana.org/assignments/ipv6-unicast-address-assignments/
  1777. def describe_fallback_ipv6_netblocks(self):
  1778. # these don't actually tell us anything useful
  1779. #self.describe_fallback_ipv6_netblock_mask(12)
  1780. #self.describe_fallback_ipv6_netblock_mask(23)
  1781. self.describe_fallback_ipv6_netblock_mask(32)
  1782. self.describe_fallback_ipv6_netblock_mask(48)
  1783. self.describe_fallback_ipv6_netblock_mask(64)
  1784. # log a message about the proportion of fallbacks in each IPv4 and IPv6
  1785. # netblock
  1786. def describe_fallback_netblocks(self):
  1787. self.describe_fallback_ipv4_netblocks()
  1788. self.describe_fallback_ipv6_netblocks()
  1789. # return a list of fallbacks which are on the IPv4 ORPort port
  1790. def fallbacks_on_ipv4_orport(self, port):
  1791. return filter(lambda x: x.orport == port, self.fallbacks)
  1792. # return a list of fallbacks which are on the IPv6 ORPort port
  1793. def fallbacks_on_ipv6_orport(self, port):
  1794. return filter(lambda x: x.ipv6orport == port, self.fallbacks_with_ipv6())
  1795. # return a list of fallbacks which are on the DirPort port
  1796. def fallbacks_on_dirport(self, port):
  1797. return filter(lambda x: x.dirport == port, self.fallbacks)
  1798. # log a message about the proportion of fallbacks on IPv4 ORPort port
  1799. # and return that count
  1800. def describe_fallback_ipv4_orport(self, port):
  1801. port_count = len(self.fallbacks_on_ipv4_orport(port))
  1802. fallback_count = len(self.fallbacks)
  1803. logging.warning('%s of fallbacks are on IPv4 ORPort %d'%(
  1804. CandidateList.describe_percentage(port_count,
  1805. fallback_count),
  1806. port))
  1807. return port_count
  1808. # log a message about the proportion of IPv6 fallbacks on IPv6 ORPort port
  1809. # and return that count
  1810. def describe_fallback_ipv6_orport(self, port):
  1811. port_count = len(self.fallbacks_on_ipv6_orport(port))
  1812. fallback_count = len(self.fallbacks_with_ipv6())
  1813. logging.warning('%s of IPv6 fallbacks are on IPv6 ORPort %d'%(
  1814. CandidateList.describe_percentage(port_count,
  1815. fallback_count),
  1816. port))
  1817. return port_count
  1818. # log a message about the proportion of fallbacks on DirPort port
  1819. # and return that count
  1820. def describe_fallback_dirport(self, port):
  1821. port_count = len(self.fallbacks_on_dirport(port))
  1822. fallback_count = len(self.fallbacks)
  1823. logging.warning('%s of fallbacks are on DirPort %d'%(
  1824. CandidateList.describe_percentage(port_count,
  1825. fallback_count),
  1826. port))
  1827. return port_count
  1828. # log a message about the proportion of fallbacks on each dirport,
  1829. # each IPv4 orport, and each IPv6 orport
  1830. def describe_fallback_ports(self):
  1831. fallback_count = len(self.fallbacks)
  1832. ipv4_or_count = fallback_count
  1833. ipv4_or_count -= self.describe_fallback_ipv4_orport(443)
  1834. ipv4_or_count -= self.describe_fallback_ipv4_orport(9001)
  1835. logging.warning('%s of fallbacks are on other IPv4 ORPorts'%(
  1836. CandidateList.describe_percentage(ipv4_or_count,
  1837. fallback_count)))
  1838. ipv6_fallback_count = len(self.fallbacks_with_ipv6())
  1839. ipv6_or_count = ipv6_fallback_count
  1840. ipv6_or_count -= self.describe_fallback_ipv6_orport(443)
  1841. ipv6_or_count -= self.describe_fallback_ipv6_orport(9001)
  1842. logging.warning('%s of IPv6 fallbacks are on other IPv6 ORPorts'%(
  1843. CandidateList.describe_percentage(ipv6_or_count,
  1844. ipv6_fallback_count)))
  1845. dir_count = fallback_count
  1846. dir_count -= self.describe_fallback_dirport(80)
  1847. dir_count -= self.describe_fallback_dirport(9030)
  1848. logging.warning('%s of fallbacks are on other DirPorts'%(
  1849. CandidateList.describe_percentage(dir_count,
  1850. fallback_count)))
  1851. # return a list of fallbacks which cache extra-info documents
  1852. def fallbacks_with_extra_info_cache(self):
  1853. return filter(lambda x: x._extra_info_cache, self.fallbacks)
  1854. # log a message about the proportion of fallbacks that cache extra-info docs
  1855. def describe_fallback_extra_info_caches(self):
  1856. extra_info_falback_count = len(self.fallbacks_with_extra_info_cache())
  1857. fallback_count = len(self.fallbacks)
  1858. logging.warning('%s of fallbacks cache extra-info documents'%(
  1859. CandidateList.describe_percentage(extra_info_falback_count,
  1860. fallback_count)))
  1861. # return a list of fallbacks which have the Exit flag
  1862. def fallbacks_with_exit(self):
  1863. return filter(lambda x: x.is_exit(), self.fallbacks)
  1864. # log a message about the proportion of fallbacks with an Exit flag
  1865. def describe_fallback_exit_flag(self):
  1866. exit_falback_count = len(self.fallbacks_with_exit())
  1867. fallback_count = len(self.fallbacks)
  1868. logging.warning('%s of fallbacks have the Exit flag'%(
  1869. CandidateList.describe_percentage(exit_falback_count,
  1870. fallback_count)))
  1871. # return a list of fallbacks which have an IPv6 address
  1872. def fallbacks_with_ipv6(self):
  1873. return filter(lambda x: x.has_ipv6(), self.fallbacks)
  1874. # log a message about the proportion of fallbacks on IPv6
  1875. def describe_fallback_ip_family(self):
  1876. ipv6_falback_count = len(self.fallbacks_with_ipv6())
  1877. fallback_count = len(self.fallbacks)
  1878. logging.warning('%s of fallbacks are on IPv6'%(
  1879. CandidateList.describe_percentage(ipv6_falback_count,
  1880. fallback_count)))
  1881. def summarise_fallbacks(self, eligible_count, operator_count, failed_count,
  1882. guard_count, target_count):
  1883. s = ''
  1884. s += '/* To comment-out entries in this file, use C comments, and add *'
  1885. s += ' to the start of each line. (stem finds fallback entries using "'
  1886. s += ' at the start of a line.) */'
  1887. s += '\n'
  1888. # Report:
  1889. # whether we checked consensus download times
  1890. # the number of fallback directories (and limits/exclusions, if relevant)
  1891. # min & max fallback bandwidths
  1892. # #error if below minimum count
  1893. if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
  1894. s += '/* Checked %s%s%s DirPorts served a consensus within %.1fs. */'%(
  1895. 'IPv4' if PERFORM_IPV4_DIRPORT_CHECKS else '',
  1896. ' and ' if (PERFORM_IPV4_DIRPORT_CHECKS
  1897. and PERFORM_IPV6_DIRPORT_CHECKS) else '',
  1898. 'IPv6' if PERFORM_IPV6_DIRPORT_CHECKS else '',
  1899. CONSENSUS_DOWNLOAD_SPEED_MAX)
  1900. else:
  1901. s += '/* Did not check IPv4 or IPv6 DirPort consensus downloads. */'
  1902. s += '\n'
  1903. # Multiline C comment with #error if things go bad
  1904. s += '/*'
  1905. s += '\n'
  1906. # Integers don't need escaping in C comments
  1907. fallback_count = len(self.fallbacks)
  1908. if FALLBACK_PROPORTION_OF_GUARDS is None:
  1909. fallback_proportion = ''
  1910. else:
  1911. fallback_proportion = ', Target %d (%d * %.2f)'%(target_count,
  1912. guard_count,
  1913. FALLBACK_PROPORTION_OF_GUARDS)
  1914. s += 'Final Count: %d (Eligible %d%s'%(fallback_count, eligible_count,
  1915. fallback_proportion)
  1916. if MAX_FALLBACK_COUNT is not None:
  1917. s += ', Max %d'%(MAX_FALLBACK_COUNT)
  1918. s += ')\n'
  1919. if eligible_count != fallback_count:
  1920. removed_count = eligible_count - fallback_count
  1921. excess_to_target_or_max = (eligible_count - operator_count - failed_count
  1922. - fallback_count)
  1923. # some 'Failed' failed the check, others 'Skipped' the check,
  1924. # if we already had enough successful downloads
  1925. s += ('Excluded: %d (Same Operator %d, Failed/Skipped Download %d, ' +
  1926. 'Excess %d)')%(removed_count, operator_count, failed_count,
  1927. excess_to_target_or_max)
  1928. s += '\n'
  1929. min_fb = self.fallback_min()
  1930. min_bw = min_fb._data['measured_bandwidth']
  1931. max_fb = self.fallback_max()
  1932. max_bw = max_fb._data['measured_bandwidth']
  1933. s += 'Bandwidth Range: %.1f - %.1f MByte/s'%(min_bw/(1024.0*1024.0),
  1934. max_bw/(1024.0*1024.0))
  1935. s += '\n'
  1936. s += '*/'
  1937. if fallback_count < MIN_FALLBACK_COUNT:
  1938. # We must have a minimum number of fallbacks so they are always
  1939. # reachable, and are in diverse locations
  1940. s += '\n'
  1941. s += '#error Fallback Count %d is too low. '%(fallback_count)
  1942. s += 'Must be at least %d for diversity. '%(MIN_FALLBACK_COUNT)
  1943. s += 'Try adding entries to the whitelist, '
  1944. s += 'or setting INCLUDE_UNLISTED_ENTRIES = True.'
  1945. return s
  1946. def process_existing():
  1947. logging.basicConfig(level=logging.INFO)
  1948. logging.getLogger('stem').setLevel(logging.INFO)
  1949. whitelist = {'data': parse_fallback_file(FALLBACK_FILE_NAME),
  1950. 'name': FALLBACK_FILE_NAME}
  1951. blacklist = {'data': read_from_file(BLACKLIST_FILE_NAME, MAX_LIST_FILE_SIZE),
  1952. 'name': BLACKLIST_FILE_NAME}
  1953. list_fallbacks(whitelist, blacklist)
  1954. def process_default():
  1955. logging.basicConfig(level=logging.WARNING)
  1956. logging.getLogger('stem').setLevel(logging.WARNING)
  1957. whitelist = {'data': read_from_file(WHITELIST_FILE_NAME, MAX_LIST_FILE_SIZE),
  1958. 'name': WHITELIST_FILE_NAME}
  1959. blacklist = {'data': read_from_file(BLACKLIST_FILE_NAME, MAX_LIST_FILE_SIZE),
  1960. 'name': BLACKLIST_FILE_NAME}
  1961. list_fallbacks(whitelist, blacklist)
  1962. ## Main Function
  1963. def main():
  1964. if get_command() == 'check_existing':
  1965. process_existing()
  1966. else:
  1967. process_default()
  1968. def get_command():
  1969. if len(sys.argv) == 2:
  1970. return sys.argv[1]
  1971. else:
  1972. return None
  1973. def log_excluded(msg, *args):
  1974. if get_command() == 'check_existing':
  1975. logging.warning(msg, *args)
  1976. else:
  1977. logging.info(msg, *args)
  1978. def list_fallbacks(whitelist, blacklist):
  1979. """ Fetches required onionoo documents and evaluates the
  1980. fallback directory criteria for each of the relays """
  1981. print "/* type=fallback */"
  1982. print ("/* version={} */"
  1983. .format(cleanse_c_multiline_comment(FALLBACK_FORMAT_VERSION)))
  1984. logging.warning('Downloading and parsing Onionoo data. ' +
  1985. 'This may take some time.')
  1986. # find relays that could be fallbacks
  1987. candidates = CandidateList()
  1988. candidates.add_relays()
  1989. # work out how many fallbacks we want
  1990. guard_count = candidates.count_guards()
  1991. if FALLBACK_PROPORTION_OF_GUARDS is None:
  1992. target_count = guard_count
  1993. else:
  1994. target_count = int(guard_count * FALLBACK_PROPORTION_OF_GUARDS)
  1995. # the maximum number of fallbacks is the least of:
  1996. # - the target fallback count (FALLBACK_PROPORTION_OF_GUARDS * guard count)
  1997. # - the maximum fallback count (MAX_FALLBACK_COUNT)
  1998. if MAX_FALLBACK_COUNT is None:
  1999. max_count = target_count
  2000. else:
  2001. max_count = min(target_count, MAX_FALLBACK_COUNT)
  2002. candidates.compute_fallbacks()
  2003. prefilter_fallbacks = copy.copy(candidates.fallbacks)
  2004. # filter with the whitelist and blacklist
  2005. # if a relay has changed IPv4 address or ports recently, it will be excluded
  2006. # as ineligible before we call apply_filter_lists, and so there will be no
  2007. # warning that the details have changed from those in the whitelist.
  2008. # instead, there will be an info-level log during the eligibility check.
  2009. initial_count = len(candidates.fallbacks)
  2010. excluded_count = candidates.apply_filter_lists(whitelist, blacklist)
  2011. print candidates.summarise_filters(initial_count, excluded_count)
  2012. eligible_count = len(candidates.fallbacks)
  2013. # calculate the measured bandwidth of each relay,
  2014. # then remove low-bandwidth relays
  2015. candidates.calculate_measured_bandwidth()
  2016. candidates.remove_low_bandwidth_relays()
  2017. # print the raw fallback list
  2018. #for x in candidates.fallbacks:
  2019. # print x.fallbackdir_line(True)
  2020. # print json.dumps(candidates[x]._data, sort_keys=True, indent=4,
  2021. # separators=(',', ': '), default=json_util.default)
  2022. # impose mandatory conditions here, like one per contact, family, IP
  2023. # in measured bandwidth order
  2024. candidates.sort_fallbacks_by_measured_bandwidth()
  2025. operator_count = 0
  2026. # only impose these limits on the final list - operators can nominate
  2027. # multiple candidate fallbacks, and then we choose the best set
  2028. if not OUTPUT_CANDIDATES:
  2029. operator_count += candidates.limit_fallbacks_same_ip()
  2030. operator_count += candidates.limit_fallbacks_same_contact()
  2031. operator_count += candidates.limit_fallbacks_same_family()
  2032. # check if each candidate can serve a consensus
  2033. # there's a small risk we've eliminated relays from the same operator that
  2034. # can serve a consensus, in favour of one that can't
  2035. # but given it takes up to 15 seconds to check each consensus download,
  2036. # the risk is worth it
  2037. if PERFORM_IPV4_DIRPORT_CHECKS or PERFORM_IPV6_DIRPORT_CHECKS:
  2038. logging.warning('Checking consensus download speeds. ' +
  2039. 'This may take some time.')
  2040. failed_count = candidates.perform_download_consensus_checks(max_count)
  2041. # work out which fallbacks cache extra-infos
  2042. candidates.mark_extra_info_caches()
  2043. # analyse and log interesting diversity metrics
  2044. # like netblock, ports, exit, IPv4-only
  2045. # (we can't easily analyse AS, and it's hard to accurately analyse country)
  2046. candidates.describe_fallback_ip_family()
  2047. # if we can't import the ipaddress module, we can't do netblock analysis
  2048. if HAVE_IPADDRESS:
  2049. candidates.describe_fallback_netblocks()
  2050. candidates.describe_fallback_ports()
  2051. candidates.describe_fallback_extra_info_caches()
  2052. candidates.describe_fallback_exit_flag()
  2053. # output C comments summarising the fallback selection process
  2054. if len(candidates.fallbacks) > 0:
  2055. print candidates.summarise_fallbacks(eligible_count, operator_count,
  2056. failed_count, guard_count,
  2057. target_count)
  2058. else:
  2059. print '/* No Fallbacks met criteria */'
  2060. # output C comments specifying the OnionOO data used to create the list
  2061. for s in fetch_source_list():
  2062. print describe_fetch_source(s)
  2063. # sort the list differently depending on why we've created it:
  2064. # if we're outputting the final fallback list, sort by fingerprint
  2065. # this makes diffs much more stable
  2066. # otherwise, if we're trying to find a bandwidth cutoff, or we want to
  2067. # contact operators in priority order, sort by bandwidth (not yet
  2068. # implemented)
  2069. # otherwise, if we're contacting operators, sort by contact
  2070. candidates.sort_fallbacks_by(OUTPUT_SORT_FIELD)
  2071. for x in candidates.fallbacks:
  2072. print x.fallbackdir_line(candidates.fallbacks, prefilter_fallbacks)
  2073. if __name__ == "__main__":
  2074. main()