simulator.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. #!/usr/bin/env python3
  2. import random # For simulation, not cryptography!
  3. import math
  4. import sys
  5. import logging
  6. import resource
  7. import network
  8. import dirauth
  9. import relay
  10. import client
  11. class Simulator:
  12. def __init__(self, relaytarget, clienttarget, statslogger):
  13. self.relaytarget = relaytarget
  14. self.clienttarget = clienttarget
  15. self.statslogger = statslogger
  16. # Some (for now) hard-coded parameters
  17. # The number of directory authorities
  18. numdirauths = 9
  19. # The fraction of relays that are fallback relays
  20. fracfallbackrelays = 0.05
  21. # Mean number of circuits created per client per epoch
  22. self.gamma = 8.9
  23. # Churn is controlled by three parameters:
  24. # newmean: the mean number of new arrivals per epoch
  25. # newstddev: the stddev number of new arrivals per epoch
  26. # oldprob: the probability any given existing one leaves per epoch
  27. # If target is the desired steady state number, then it should
  28. # be the case that target * oldprob = newmean. That way, if the
  29. # current number is below target, on average you add more than
  30. # you remove, and if the current number is above target, on
  31. # average you add fewer than you remove.
  32. # For relays, looking at all the consensuses for Nov and Dec
  33. # 2019, newmean is about 1.0% of the network size, and newstddev
  34. # is about 0.3% of the network size.
  35. self.relay_newmean = 0.010 * self.relaytarget
  36. self.relay_newstddev = 0.003 * self.relaytarget
  37. self.relay_oldprob = 0.010
  38. # For clients, looking at how many clients request a consensus
  39. # with an if-modified-since date more than 3 hours old (and so
  40. # we treat them as "new") over several days in late Dec 2019,
  41. # newmean is about 16% of all clients, and newstddev is about 4%
  42. # of all clients.
  43. self.client_newmean = 0.16 * self.clienttarget
  44. self.client_newstddev = 0.04 * self.clienttarget
  45. self.client_oldprob = 0.16
  46. # Start some dirauths
  47. self.dirauthaddrs = []
  48. self.dirauths = []
  49. for i in range(numdirauths):
  50. dira = dirauth.DirAuth(i, numdirauths)
  51. self.dirauths.append(dira)
  52. self.dirauthaddrs.append(dira.netaddr)
  53. # Start some relays
  54. self.relays = []
  55. for i in range(self.relaytarget):
  56. # Relay bandwidths (at least the ones fast enough to get used)
  57. # in the live Tor network (as of Dec 2019) are well approximated
  58. # by (200000-(200000-25000)/3*log10(x)) where x is a
  59. # uniform integer in [1,2500]
  60. x = random.randint(1,2500)
  61. bw = int(200000-(200000-25000)/3*math.log10(x))
  62. self.relays.append(relay.Relay(self.dirauthaddrs, bw, 0))
  63. # The fallback relays are a hardcoded list of a small fraction
  64. # of the relays, used by clients for bootstrapping
  65. numfallbackrelays = int(self.relaytarget * fracfallbackrelays) + 1
  66. fallbackrelays = random.sample(self.relays, numfallbackrelays)
  67. for r in fallbackrelays:
  68. r.set_is_fallbackrelay()
  69. network.thenetwork.setfallbackrelays(fallbackrelays)
  70. # Tick the epoch to build the first consensus
  71. network.thenetwork.nextepoch()
  72. # Start some clients
  73. self.clients = []
  74. for i in range(clienttarget):
  75. self.clients.append(client.Client(self.dirauthaddrs))
  76. # Throw away all the performance statistics to this point
  77. for d in self.dirauths: d.perfstats.reset()
  78. for r in self.relays: r.perfstats.reset()
  79. for c in self.clients: c.perfstats.reset()
  80. # Tick the epoch to bootstrap the clients
  81. network.thenetwork.nextepoch()
  82. def one_epoch(self):
  83. """Simulate one epoch."""
  84. epoch = network.thenetwork.getepoch()
  85. # Each client will start a random number of circuits in a
  86. # Poisson distribution with mean gamma. To randomize the order
  87. # of the clients creating each circuit, we actually use a
  88. # Poisson distribution with mean (gamma*num_clients), and assign
  89. # each event to a uniformly random client. (This does in fact
  90. # give the required distribution.)
  91. numclients = len(self.clients)
  92. # simtime is the simulated time, measured in epochs (i.e.,
  93. # 0=start of this epoch; 1=end of this epoch)
  94. simtime = 0
  95. numcircs = 0
  96. allcircs = []
  97. lastpercent = -1
  98. while simtime < 1.0:
  99. allcircs.append(
  100. random.choice(self.clients).channelmgr.new_circuit())
  101. simtime += random.expovariate(self.gamma * numclients)
  102. numcircs += 1
  103. percent = int(100*simtime)
  104. #if percent != lastpercent:
  105. if numcircs % 100 == 0:
  106. logging.info("Creating circuits in epoch %s: %d%% (%d circuits)",
  107. epoch, percent, numcircs)
  108. lastpercent = percent
  109. # gather stats
  110. totsent = 0
  111. totrecv = 0
  112. dirasent = 0
  113. dirarecv = 0
  114. relaysent = 0
  115. relayrecv = 0
  116. clisent = 0
  117. clirecv = 0
  118. dirastats = network.PerfStatsStats()
  119. for d in self.dirauths:
  120. logging.debug("%s", d.perfstats)
  121. dirasent += d.perfstats.bytes_sent
  122. dirarecv += d.perfstats.bytes_received
  123. dirastats.accum(d.perfstats)
  124. totsent += dirasent
  125. totrecv += dirarecv
  126. relaystats = network.PerfStatsStats(True)
  127. relaybstats = network.PerfStatsStats(True)
  128. relaynbstats = network.PerfStatsStats(True)
  129. for r in self.relays:
  130. logging.debug("%s", r.perfstats)
  131. relaysent += r.perfstats.bytes_sent
  132. relayrecv += r.perfstats.bytes_received
  133. relaystats.accum(r.perfstats)
  134. if r.perfstats.is_bootstrapping:
  135. relaybstats.accum(r.perfstats)
  136. else:
  137. relaynbstats.accum(r.perfstats)
  138. totsent += relaysent
  139. totrecv += relayrecv
  140. clistats = network.PerfStatsStats()
  141. clibstats = network.PerfStatsStats()
  142. clinbstats = network.PerfStatsStats()
  143. for c in self.clients:
  144. logging.debug("%s", c.perfstats)
  145. clisent += c.perfstats.bytes_sent
  146. clirecv += c.perfstats.bytes_received
  147. clistats.accum(c.perfstats)
  148. if c.perfstats.is_bootstrapping:
  149. clibstats.accum(c.perfstats)
  150. else:
  151. clinbstats.accum(c.perfstats)
  152. totsent += clisent
  153. totrecv += clirecv
  154. self.statslogger.info("DirAuths sent=%s recv=%s" % (dirasent, dirarecv))
  155. self.statslogger.info("Relays sent=%s recv=%s" % (relaysent, relayrecv))
  156. self.statslogger.info("Client sent=%s recv=%s" % (clisent, clirecv))
  157. self.statslogger.info("Total sent=%s recv=%s" % (totsent, totrecv))
  158. numdirauths = len(self.dirauths)
  159. numrelays = len(self.relays)
  160. numclients = len(self.clients)
  161. self.statslogger.info("Dirauths %s", dirastats)
  162. self.statslogger.info("Relays %s", relaystats)
  163. self.statslogger.info("Relays(B) %s", relaybstats)
  164. self.statslogger.info("Relays(NB) %s", relaynbstats)
  165. self.statslogger.info("Clients %s", clistats)
  166. self.statslogger.info("Clients(B) %s", clibstats)
  167. self.statslogger.info("Clients(NB) %s", clinbstats)
  168. # Close circuits
  169. for c in allcircs:
  170. c.close()
  171. # Reset stats
  172. for d in self.dirauths: d.perfstats.reset()
  173. for r in self.relays: r.perfstats.reset()
  174. for c in self.clients: c.perfstats.reset()
  175. # Churn relays
  176. # Stop some of the (non-fallback) relays
  177. relays_remaining = []
  178. numrelays = len(self.relays)
  179. numrelaysterminated = 0
  180. lastpercent = 0
  181. logging.info("Terminating some relays")
  182. for i, r in enumerate(self.relays):
  183. percent = int(100*(i+1)/numrelays)
  184. if not r.is_fallbackrelay and \
  185. random.random() < self.relay_oldprob:
  186. r.terminate()
  187. numrelaysterminated += 1
  188. else:
  189. # Keep this relay
  190. relays_remaining.append(r)
  191. if percent != lastpercent:
  192. lastpercent = percent
  193. logging.info("%d%% relays considered, %d terminated",
  194. percent, numrelaysterminated)
  195. self.relays = relays_remaining
  196. # Start some new relays
  197. relays_new = int(random.normalvariate(self.relay_newmean,
  198. self.relay_newstddev))
  199. logging.info("Starting %d new relays", relays_new)
  200. if relays_new > 0:
  201. for i in range(relays_new):
  202. x = random.randint(1,2500)
  203. bw = int(200000-(200000-25000)/3*math.log10(x))
  204. self.relays.append(relay.Relay(self.dirauthaddrs, bw, 0))
  205. # TODO: churn clients
  206. # Stop some of the clients
  207. clients_remaining = []
  208. numclients = len(self.clients)
  209. numclientsterminated = 0
  210. lastpercent = 0
  211. logging.info("Terminating some clients")
  212. for i, c in enumerate(self.clients):
  213. percent = int(100*(i+1)/numclients)
  214. if random.random() < self.client_oldprob:
  215. c.terminate()
  216. numclientsterminated += 1
  217. else:
  218. # Keep this client
  219. clients_remaining.append(c)
  220. if percent != lastpercent:
  221. lastpercent = percent
  222. logging.info("%d%% clients considered, %d terminated",
  223. percent, numclientsterminated)
  224. self.clients = clients_remaining
  225. # Start some new clients
  226. clients_new = int(random.normalvariate(self.client_newmean,
  227. self.client_newstddev))
  228. logging.info("Starting %d new clients", clients_new)
  229. if clients_new > 0:
  230. for i in range(clients_new):
  231. self.clients.append(client.Client(self.dirauthaddrs))
  232. # Tick the epoch
  233. network.thenetwork.nextepoch()
  234. if __name__ == '__main__':
  235. # Args: womode snipauthmode networkscale numepochs randseed
  236. if len(sys.argv) != 7:
  237. sys.stderr.write("Usage: womode snipauthmode networkscale numepochs randseed logdir\n")
  238. sys.exit(1)
  239. womode = network.WOMode[sys.argv[1].upper()]
  240. snipauthmode = network.SNIPAuthMode[sys.argv[2].upper()]
  241. networkscale = float(sys.argv[3])
  242. numepochs = int(sys.argv[4])
  243. randseed = int(sys.argv[5])
  244. logfile = "%s/%s_%s_%f_%s_%s.log" % (sys.argv[6], womode.name,
  245. snipauthmode.name, networkscale, numepochs, randseed)
  246. # Seed the PRNG. On Ubuntu 18.04, this in fact makes future calls
  247. # to (non-cryptographic) random numbers deterministic. On Ubuntu
  248. # 16.04, it does not.
  249. random.seed(randseed)
  250. loglevel = logging.INFO
  251. # Uncomment to see all the debug messages
  252. # loglevel = logging.DEBUG
  253. logging.basicConfig(level=loglevel,
  254. format="%(asctime)s:%(levelname)s:%(message)s")
  255. # The gathered statistics get logged separately
  256. statslogger = logging.getLogger("simulator")
  257. handler = logging.FileHandler(logfile)
  258. handler.setFormatter(logging.Formatter("%(asctime)s:%(message)s"))
  259. statslogger.addHandler(handler)
  260. statslogger.setLevel(logging.INFO)
  261. statslogger.info("Starting simulation %s", logfile)
  262. # Set the Walking Onions style to use
  263. network.thenetwork.set_wo_style(womode, snipauthmode)
  264. # The steady-state numbers of relays and clients
  265. relaytarget = math.ceil(6500 * networkscale)
  266. clienttarget = math.ceil(2500000 * networkscale)
  267. # Create the simulation
  268. simulator = Simulator(relaytarget, clienttarget, statslogger)
  269. for e in range(numepochs):
  270. statslogger.info("Starting epoch %s simulation", e+3)
  271. simulator.one_epoch()
  272. maxmemmib = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024
  273. statslogger.info("%d MiB used", maxmemmib)