tor-design.tex 101 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945
  1. \documentclass[times,10pt,twocolumn]{article}
  2. \usepackage{latex8}
  3. \usepackage{times}
  4. \usepackage{url}
  5. \usepackage{graphics}
  6. \usepackage{amsmath}
  7. \pagestyle{empty}
  8. \renewcommand\url{\begingroup \def\UrlLeft{<}\def\UrlRight{>}\urlstyle{tt}\Url}
  9. \newcommand\emailaddr{\begingroup \def\UrlLeft{<}\def\UrlRight{>}\urlstyle{tt}\Url}
  10. % If an URL ends up with '%'s in it, that's because the line *in the .bib/.tex
  11. % file* is too long, so break it there (it doesn't matter if the next line is
  12. % indented with spaces). -DH
  13. %\newif\ifpdf
  14. %\ifx\pdfoutput\undefined
  15. % \pdffalse
  16. %\else
  17. % \pdfoutput=1
  18. % \pdftrue
  19. %\fi
  20. \newenvironment{tightlist}{\begin{list}{$\bullet$}{
  21. \setlength{\itemsep}{0mm}
  22. \setlength{\parsep}{0mm}
  23. % \setlength{\labelsep}{0mm}
  24. % \setlength{\labelwidth}{0mm}
  25. % \setlength{\topsep}{0mm}
  26. }}{\end{list}}
  27. \begin{document}
  28. %% Use dvipdfm instead. --DH
  29. %\ifpdf
  30. % \pdfcompresslevel=9
  31. % \pdfpagewidth=\the\paperwidth
  32. % \pdfpageheight=\the\paperheight
  33. %\fi
  34. \title{Tor: The Second-Generation Onion Router}
  35. % Putting the 'Private' back in 'Virtual Private Network'
  36. %\author{Roger Dingledine \\ The Free Haven Project \\ arma@freehaven.net \and
  37. %Nick Mathewson \\ The Free Haven Project \\ nickm@freehaven.net \and
  38. %Paul Syverson \\ Naval Research Lab \\ syverson@itd.nrl.navy.mil}
  39. \maketitle
  40. \thispagestyle{empty}
  41. \begin{abstract}
  42. We present Tor, a circuit-based low-latency anonymous communication
  43. system. Tor is the successor to Onion Routing
  44. and addresses various limitations in the original Onion Routing design.
  45. Tor works on the real-world Internet, requires no special
  46. privileges such as root- or kernel-level access,
  47. requires little synchronization or coordination between nodes, and
  48. provides a reasonable trade-off between anonymity, usability, and efficiency.
  49. We include a new, more practical design for rendezvous points, and
  50. close with a list of open problems in anonymous communication systems
  51. today.
  52. % Which other innovations from section 1 should we mention in the abstract?
  53. \end{abstract}
  54. %\begin{center}
  55. %\textbf{Keywords:} anonymity, peer-to-peer, remailer, nymserver, reply block
  56. %\end{center}
  57. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  58. \Section{Overview}
  59. \label{sec:intro}
  60. Onion Routing is a distributed overlay network designed to anonymize
  61. low-latency TCP-based applications such as web browsing, secure shell,
  62. and instant messaging. Clients choose a path through the network and
  63. build a \emph{virtual circuit}, in which each node (or ``onion router'')
  64. in the path knows its
  65. predecessor and successor, but no other nodes in the circuit.
  66. Traffic flowing down the circuit
  67. is sent in fixed-size \emph{cells}, which are unwrapped by a symmetric key
  68. at each node (like the layers of an onion) and relayed downstream. The
  69. original Onion Routing project published several design and analysis
  70. papers
  71. \cite{or-ih96,or-jsac98,or-discex00,or-pet00}. While
  72. a wide area Onion Routing network was deployed for some weeks,
  73. the only long-running and publicly accessible
  74. implementation of the original design was a fragile proof-of-concept
  75. that ran on a single machine. Even this simple deployment processed tens
  76. of thousands of connections daily from thousands of users worldwide. But
  77. many critical design and deployment issues were never resolved, and the
  78. design has not been updated in several years. Here we describe Tor, a
  79. protocol for asynchronous, loosely federated onion routers that provides
  80. the following improvements over the old Onion Routing design:
  81. \begin{tightlist}
  82. \item \textbf{Perfect forward secrecy:} The original Onion Routing
  83. design was vulnerable to a single hostile node recording traffic and later
  84. compromising successive nodes in the circuit and forcing them to
  85. decrypt it.
  86. Rather than using a single onion to lay each circuit,
  87. Tor now uses an incremental or \emph{telescoping}
  88. path-building design, where the initiator negotiates session keys with
  89. each successive hop in the circuit. Once these keys are deleted,
  90. subsequently compromised nodes cannot decrypt old traffic.
  91. As a side benefit, onion replay detection is no longer
  92. necessary, and the process of building circuits is more reliable, since
  93. the initiator knows when a hop fails and can then try extending to a new node.
  94. % Perhaps mention that not all of these are things that we invented. -NM
  95. \item \textbf{Separation of protocol cleaning from anonymity:}
  96. The original Onion Routing design required a separate ``application
  97. proxy'' for each
  98. supported application protocol---most
  99. of which were never written, so many applications were never supported.
  100. Tor uses the standard and near-ubiquitous SOCKS
  101. \cite{socks4} proxy interface, allowing us to support most TCP-based
  102. programs without modification. This design change allows Tor to
  103. use the filtering features of privacy-enhancing
  104. application-level proxies such as Privoxy \cite{privoxy} without having to
  105. incorporate those features itself.
  106. \item \textbf{Many TCP streams can share one circuit:} The original
  107. Onion Routing design built a separate circuit for each application-level
  108. request.
  109. This hurt performance by requiring multiple public key operations for
  110. every request, and also presented
  111. a threat to anonymity from building so many different circuits; see
  112. Section~\ref{sec:maintaining-anonymity}.
  113. Tor multiplexes multiple TCP streams along each virtual
  114. circuit, to improve efficiency and anonymity.
  115. \item \textbf{Leaky-pipe circuit topology:} Through in-band signalling
  116. within the circuit, Tor initiators can direct traffic to nodes partway
  117. down the circuit. This allows for long-range padding to frustrate traffic
  118. shape and volume attacks at the initiator \cite{defensive-dropping}.
  119. Because circuits are used by more than one application, it also allows
  120. traffic to exit the circuit from the middle---thus frustrating traffic
  121. shape and volume attacks based on observing the end of the circuit.
  122. \item \textbf{No mixing, padding, or traffic shaping:} The original
  123. Onion Routing design called for batching and reordering the cells arriving
  124. from each circuit. It also included padding between onion routers and,
  125. in a later design, between onion
  126. proxies (that is, users) and onion routers \cite{or-ih96,or-jsac98}.
  127. The trade-off between padding protection and cost was discussed, but no
  128. general padding scheme was suggested. In
  129. \cite{or-pet00} it was theorized \emph{traffic shaping} would generally
  130. be used, but details were not provided.
  131. Recent research \cite{econymics} and deployment
  132. experience \cite{freedom21-security} suggest that this level of resource
  133. use is not practical or economical; and even full link padding is still
  134. vulnerable \cite{defensive-dropping}. Thus, until we have a proven and
  135. convenient design for traffic shaping or low-latency mixing that
  136. will improve anonymity against a realistic adversary, we leave these
  137. strategies out.
  138. \item \textbf{Congestion control:} Earlier anonymity designs do not
  139. address traffic bottlenecks. Unfortunately, typical approaches to load
  140. balancing and flow control in overlay networks involve inter-node control
  141. communication and global views of traffic. Tor's decentralized congestion
  142. control uses end-to-end acks to maintain reasonable anonymity while
  143. allowing nodes
  144. at the edges of the network to detect congestion or flooding attacks
  145. and send less data until the congestion subsides.
  146. \item \textbf{Directory servers:} The original Onion Routing design
  147. planned to flood link-state information through the network---an
  148. approach that can be unreliable and
  149. open to partitioning attacks or outright deception. Tor takes a simplified
  150. view toward distributing link-state information. Certain more trusted
  151. onion routers also act as directory servers: they provide signed
  152. \emph{directories} that describe known routers and their availability.
  153. Users periodically download these directories via HTTP.
  154. \item \textbf{End-to-end integrity checking:} The original Onion Routing
  155. design did no integrity checking on data. Any onion router on the circuit
  156. could change the contents of data cells as they passed by---for example, to
  157. alter a
  158. connection request on the fly so it would connect to a different
  159. webserver, or to
  160. `tag' encrypted traffic and look for corresponding corrupted traffic
  161. at the network
  162. edges \cite{minion-design}. Tor hampers these attacks by checking data
  163. integrity before it leaves the network.
  164. \item \textbf{Improved robustness to failed nodes:} A failed node in
  165. the old design
  166. meant that circuit-building failed, but thanks to Tor's step-by-step
  167. circuit building, users can notice failed
  168. nodes while building circuits and route around them. Additionally,
  169. liveness information from directories allows users to avoid
  170. unreliable nodes in the first place.
  171. \item \textbf{Variable exit policies:} Tor provides a consistent
  172. mechanism for
  173. each node to specify and advertise a policy describing the hosts and
  174. ports to which it will connect. These exit policies
  175. are critical in a volunteer-based distributed infrastructure, because
  176. each operator is comfortable with allowing different types of traffic
  177. to exit the Tor network from his node.
  178. \item \textbf{Implementable in user-space:} Unlike other anonymity systems
  179. like Freedom \cite{freedom2-arch}, Tor only attempts to anonymize TCP
  180. streams. Thus it does not require patches to an operating system's network
  181. stack (or built-in support) to operate. Although this approach is less
  182. flexible, it has proven valuable to Tor's portability and deployability.
  183. \item \textbf{Rendezvous points and location-protected servers:}
  184. Tor provides an integrated mechanism for responder anonymity via
  185. location-protected servers. Previous Onion Routing designs included
  186. long-lived ``reply onions'' that could be used to build virtual circuits
  187. to a hidden server, but these reply onions did not provide forward
  188. security, and would become useless if any node in
  189. the path went down or rotated its keys.
  190. In Tor's current design, clients negotiate {\it
  191. rendezvous points} to connect with hidden servers; reply onions are no
  192. longer required.
  193. \end{tightlist}
  194. We have implemented most of the above features. Our source code is
  195. available under a free license, and is not (as far as we can tell)
  196. encumbered by patents. We have
  197. recently begun deploying a widespread alpha network to test
  198. the design in practice, to get more experience with usability and users,
  199. and to provide a research platform for experimenting with new ideas.
  200. We review previous work in Section~\ref{sec:related-work}, describe
  201. our goals and assumptions in Section~\ref{sec:assumptions},
  202. and then address the above list of improvements in
  203. Sections~\ref{sec:design}-\ref{sec:rendezvous}. We
  204. summarize in Section~\ref{sec:analysis}
  205. how our design stands up to known attacks, and conclude with a list of
  206. open problems in Section~\ref{sec:maintaining-anonymity} and future
  207. work for the Onion Routing project in Section~\ref{sec:conclusion}.
  208. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  209. \Section{Related work}
  210. \label{sec:related-work}
  211. Modern anonymity systems date to Chaum's Mix-Net\cite{chaum-mix} design of
  212. 1981. Chaum proposed hiding sender-recipient connections by wrapping
  213. messages in layers of public key cryptography, and relaying them
  214. through a path composed of ``Mixes.'' These mixes in turn decrypt, delay,
  215. and re-order messages, before relaying them along the sender-selected
  216. path towards their destinations.
  217. Subsequent relay-based anonymity designs have diverged in two
  218. principal directions. Some have attempted to maximize anonymity at
  219. the cost of introducing comparatively large and variable latencies,
  220. including Babel\cite{babel}, Mixmaster\cite{mixmaster-spec}, and
  221. Mixminion\cite{minion-design}. Because of this
  222. decision, these \emph{high-latency} networks are well-suited for anonymous
  223. email, but introduce too much lag for interactive tasks such as web browsing,
  224. internet chat, or SSH connections.
  225. Tor belongs to the second category: \emph{low-latency} designs that
  226. attempt to anonymize interactive network traffic. These systems handle
  227. a variety of bidirectional protocols. They also provide more convenient
  228. mail delivery than the high-latency fire-and-forget anonymous email
  229. networks, because the remote mail server provides explicit delivery
  230. confirmation. But because these designs typically
  231. involve many packets that must be delivered quickly, it is
  232. difficult for them to prevent an attacker who can eavesdrop both ends of the
  233. communication from correlating the timing and volume
  234. of traffic entering the anonymity network with traffic leaving it. These
  235. protocols are also vulnerable against active attacks in which an
  236. adversary introduces timing patterns into traffic entering the network, and
  237. looks
  238. for correlated patterns among exiting traffic.
  239. Although some work has been done to frustrate
  240. these attacks,\footnote{
  241. The most common approach is to pad and limit communication to a constant
  242. rate, or to limit
  243. the variation in traffic shape. Doing so can have prohibitive bandwidth
  244. costs and/or performance limitations.
  245. } most designs protect primarily against traffic analysis rather than traffic
  246. confirmation \cite{or-jsac98}---that is, they assume that the attacker is
  247. attempting to learn who is talking to whom, not to confirm a prior suspicion
  248. about who is talking to whom.
  249. The simplest low-latency designs are single-hop proxies such as the
  250. Anonymizer \cite{anonymizer}, wherein a single trusted server strips the
  251. data's origin before relaying it. These designs are easy to
  252. analyze, but require end-users to trust the anonymizing proxy.
  253. Concentrating the traffic to a single point increases the anonymity set
  254. (the set of people a given user is hiding among), but it can make traffic
  255. analysis easier: an adversary need only eavesdrop on the proxy to observe
  256. the entire system.
  257. More complex are distributed-trust, circuit-based anonymizing systems.
  258. In these designs, a user establishes one or more medium-term bidirectional
  259. end-to-end circuits, and tunnels TCP streams in fixed-size cells.
  260. Establishing circuits is computationally expensive and typically
  261. requires public-key
  262. cryptography, whereas relaying cells is comparatively inexpensive and
  263. typically requires only symmetric encryption.
  264. Because a circuit crosses several servers, and each server only knows
  265. the adjacent servers in the circuit, no single server can link a
  266. user to her communication partners.
  267. The Java Anon Proxy (also known as JAP or Web MIXes) uses fixed shared
  268. routes known as \emph{cascades}. As with a single-hop proxy, this
  269. approach aggregates users into larger anonymity sets, but again an
  270. attacker only needs to observe both ends of the cascade to bridge all
  271. the system's traffic. The Java Anon Proxy's design provides
  272. protection by padding between end users and the head of the cascade
  273. \cite{web-mix}. However, it is not demonstrated whether the current
  274. implementation's padding policy improves anonymity.
  275. PipeNet \cite{back01, pipenet}, another low-latency design proposed at
  276. about the same time as the original Onion Routing design, provided
  277. stronger anonymity at the cost of allowing a single user to shut
  278. down the network simply by not sending. Low-latency anonymous
  279. communication has also been designed for other environments such as
  280. ISDN \cite{isdn-mixes}.
  281. In P2P designs like Tarzan \cite{tarzan:ccs02} and MorphMix
  282. \cite{morphmix:fc04}, all participants both generate traffic and relay
  283. traffic for others. Rather than aiming to hide the originator within a
  284. group of other originators, these systems instead aim to prevent a peer
  285. or observer from knowing whether a given peer originated the request
  286. or just relayed it from another peer. While Tarzan and MorphMix use
  287. layered encryption as above, Crowds \cite{crowds-tissec} simply assumes
  288. an adversary who cannot observe the initiator: it uses no public-key
  289. encryption, so nodes on a circuit can read that circuit's traffic. The
  290. anonymity of the initiator relies on filtering all identifying information
  291. from the data stream.
  292. Hordes \cite{hordes-jcs} is based on Crowds but also uses multicast
  293. responses to hide the initiator. Herbivore \cite{herbivore} and P5
  294. \cite{p5} go even further, requiring broadcast. They make anonymity
  295. and efficiency trade-offs to make broadcast more practical.
  296. These systems are designed primarily for communication between peers,
  297. although Herbivore users can make external connections by
  298. requesting a peer to serve as a proxy. Allowing easy connections to
  299. nonparticipating responders or recipients is important for usability,
  300. for example so users can visit nonparticipating Web sites or exchange
  301. mail with nonparticipating recipients.
  302. Systems like Freedom and the original Onion Routing build the circuit
  303. all at once, using a layered ``onion'' of public-key encrypted messages,
  304. each layer of which provides a set of session keys and the address of the
  305. next server in the circuit. Tor as described herein, Tarzan, MorphMix,
  306. Cebolla \cite{cebolla}, and AnonNet \cite{anonnet} build the circuit
  307. in stages, extending it one hop at a time. This approach makes perfect
  308. forward secrecy feasible.
  309. Circuit-based anonymity designs must choose which protocol layer
  310. to anonymize. They may choose to intercept IP packets directly, and
  311. relay them whole (stripping the source address) as the contents of
  312. the circuit \cite{freedom2-arch,tarzan:ccs02}. Alternatively, like
  313. Tor, they may accept TCP streams and relay the data in those streams
  314. along the circuit, ignoring the breakdown of that data into TCP frames
  315. \cite{morphmix:fc04,anonnet}. Finally, they may accept application-level
  316. protocols (such as HTTP) and relay the application requests themselves
  317. along the circuit.
  318. Making this protocol-layer decision requires a compromise between flexibility
  319. and anonymity. For example, a system that understands HTTP can strip
  320. identifying information from those requests, can take advantage of caching
  321. to limit the number of requests that leave the network, and can batch
  322. or encode those requests in order to minimize the number of connections.
  323. On the other hand, an IP-level anonymizer can handle nearly any protocol,
  324. even ones unforeseen by their designers (though these systems require
  325. kernel-level modifications to some operating systems, and so are more
  326. complex and less portable). TCP-level anonymity networks like Tor present
  327. a middle approach: they are fairly application neutral (so long as the
  328. application supports, or can be tunneled across, TCP), but by treating
  329. application connections as data streams rather than raw TCP packets,
  330. they avoid the well-known inefficiencies of tunneling TCP over TCP
  331. \cite{tcp-over-tcp-is-bad}.
  332. Distributed-trust anonymizing systems need to prevent attackers from
  333. adding too many servers and thus compromising too many user paths.
  334. Tor relies on a small set of well-known directory servers, run by
  335. independent parties, to make
  336. decisions about which nodes can join. Tarzan
  337. and MorphMix allow unknown users to run servers, and limit an attacker
  338. from becoming too much of the network based on a limited resource such
  339. as number of IPs controlled. Crowds suggests requiring written, notarized
  340. requests from potential crowd members.
  341. Anonymous communication is essential for censorship-resistant
  342. systems like Eternity \cite{eternity}, Free~Haven \cite{freehaven-berk},
  343. Publius \cite{publius}, and Tangler \cite{tangler}. Tor's rendezvous
  344. points enable connections between mutually anonymous entities; they
  345. are a building block for location-hidden servers, which are needed by
  346. Eternity and Free~Haven.
  347. % didn't include rewebbers. No clear place to put them, so I'll leave
  348. % them out for now. -RD
  349. \Section{Design goals and assumptions}
  350. \label{sec:assumptions}
  351. \SubSection{Goals}
  352. Like other low-latency anonymity designs, Tor seeks to frustrate
  353. attackers from linking communication partners, or from linking
  354. multiple communications to or from a single user. Within this
  355. main goal, however, several design considerations have directed
  356. Tor's evolution.
  357. \textbf{Deployability:} The design must be one that can be implemented,
  358. deployed, and used in the real world. This requirement precludes designs
  359. that are expensive to run (for example, by requiring more bandwidth
  360. than volunteers are willing to provide); designs that place a heavy
  361. liability burden on operators (for example, by allowing attackers to
  362. implicate onion routers in illegal activities); and designs that are
  363. difficult or expensive to implement (for example, by requiring kernel
  364. patches, or separate proxies for every protocol). This requirement also
  365. precludes systems in which users who do not benefit from anonymity are
  366. required to run special software in order to communicate with anonymous
  367. parties. (We do not meet this goal for the current rendezvous design,
  368. however; see Section~\ref{sec:rendezvous}.)
  369. \textbf{Usability:} A hard-to-use system has fewer users---and because
  370. anonymity systems hide users among users, a system with fewer users
  371. provides less anonymity. Usability is not only a convenience for Tor:
  372. it is a security requirement \cite{econymics,back01}. Tor should not
  373. require modifying applications; should not introduce prohibitive delays;
  374. and should require the user to make as few configuration decisions
  375. as possible.
  376. \textbf{Flexibility:} The protocol must be flexible and well-specified,
  377. so that it can serve as a test-bed for future research in low-latency
  378. anonymity systems. Many of the open problems in low-latency anonymity
  379. networks, such as generating dummy traffic or preventing Sybil attacks
  380. \cite{sybil}, may be solvable independently from the issues solved by
  381. Tor. Hopefully future systems will not need to reinvent Tor's design.
  382. (But note that while a flexible design benefits researchers,
  383. there is a danger that differing choices of extensions will make users
  384. distinguishable. Experiments should be run on a separate network.)
  385. \textbf{Simple design:} The protocol's design and security
  386. parameters must be well-understood. Additional features impose implementation
  387. and complexity costs; adding unproven techniques to the design threatens
  388. deployability, readability, and ease of security analysis. Tor aims to
  389. deploy a simple and stable system that integrates the best well-understood
  390. approaches to protecting anonymity.
  391. \SubSection{Non-goals}
  392. \label{subsec:non-goals}
  393. In favoring simple, deployable designs, we have explicitly deferred
  394. several possible goals, either because they are solved elsewhere, or because
  395. they are an open research question.
  396. \textbf{Not Peer-to-peer:} Tarzan and MorphMix aim to scale to completely
  397. decentralized peer-to-peer environments with thousands of short-lived
  398. servers, many of which may be controlled by an adversary. This approach
  399. is appealing, but still has many open problems
  400. \cite{tarzan:ccs02,morphmix:fc04}.
  401. \textbf{Not secure against end-to-end attacks:} Tor does not claim
  402. to provide a definitive solution to end-to-end timing or intersection
  403. attacks. Some approaches, such as running an onion router, may help;
  404. see Section~\ref{sec:analysis} for more discussion.
  405. \textbf{No protocol normalization:} Tor does not provide \emph{protocol
  406. normalization} like Privoxy or the Anonymizer. For complex and variable
  407. protocols such as HTTP, Tor must be layered with a filtering proxy such
  408. as Privoxy to hide differences between clients, and expunge protocol
  409. features that leak identity. Similarly, Tor does not currently integrate
  410. tunneling for non-stream-based protocols like UDP; this too must be
  411. provided by an external service.
  412. % Actually, tunneling udp over tcp is probably horrible for some apps.
  413. % Should this get its own non-goal bulletpoint? The motivation for
  414. % non-goal-ness would be burden on clients / portability. -RD
  415. % No, leave it as is. -RD
  416. \textbf{Not steganographic:} Tor does not try to conceal which users are
  417. sending or receiving communications; it only tries to conceal with whom
  418. they communicate.
  419. \SubSection{Threat Model}
  420. \label{subsec:threat-model}
  421. A global passive adversary is the most commonly assumed threat when
  422. analyzing theoretical anonymity designs. But like all practical
  423. low-latency systems, Tor does not protect against such a strong
  424. adversary. Instead, we expect an adversary who can observe some fraction
  425. of network traffic; who can generate, modify, delete, or delay traffic
  426. on the network; who can operate onion routers of its own; and who can
  427. compromise some fraction of the onion routers on the network.
  428. In low-latency anonymity systems that use layered encryption, the
  429. adversary's typical goal is to observe both the initiator and the
  430. receiver. Passive attackers can confirm a suspicion that Alice is
  431. talking to Bob if the timing and volume properties of the traffic on the
  432. connection are unique enough; active attackers are even more effective
  433. because they can induce timing signatures on the traffic. Tor provides
  434. some defenses against these \emph{traffic confirmation} attacks, for
  435. example by encouraging users to run their own onion routers, but it does
  436. not provide complete protection. Rather, we aim to prevent \emph{traffic
  437. analysis} attacks, where the adversary uses traffic patterns to learn
  438. which points in the network he should attack.
  439. Our adversary might try to link an initiator Alice with any of her
  440. communication partners, or he might try to build a profile of Alice's
  441. behavior. He might mount passive attacks by observing the edges of the
  442. network and correlating traffic entering and leaving the network---either
  443. because of relationships in packet timing; relationships in the volume
  444. of data sent; or relationships in any externally visible user-selected
  445. options. The adversary can also mount active attacks by compromising
  446. routers or keys; by replaying traffic; by selectively denying service
  447. to trustworthy routers to encourage users to send their traffic through
  448. compromised routers, or denying service to users to see if the traffic
  449. elsewhere in the
  450. network stops; or by introducing patterns into traffic that can later be
  451. detected. The adversary might attack the directory servers to give users
  452. differing views of network state. Additionally, he can try to decrease
  453. the network's reliability by attacking nodes or by performing antisocial
  454. activities from reliable servers and trying to get them taken down;
  455. making the network unreliable flushes users to other less anonymous
  456. systems, where they may be easier to attack.
  457. We consider each of these attacks in more detail below, and summarize
  458. in Section~\ref{sec:attacks} how well the Tor design defends against
  459. each of them.
  460. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  461. \Section{The Tor Design}
  462. \label{sec:design}
  463. The Tor network is an overlay network; each node is called an onion router
  464. (OR). Onion routers run as normal user-level processes without needing
  465. any special
  466. privileges. Currently, each OR maintains a long-term TLS \cite{TLS}
  467. connection to every other
  468. OR. (We examine some ways to relax this clique-topology assumption in
  469. Section~\ref{subsec:restricted-routes}.) A subset of the ORs also act as
  470. directory servers, tracking which routers are currently in the network;
  471. see Section~\ref{subsec:dirservers} for directory server details. Users
  472. run local software called an onion proxy (OP) to fetch directories,
  473. establish paths (called \emph{virtual circuits}) across the network,
  474. and handle connections from user applications. Onion proxies accept
  475. TCP streams and multiplex them across the virtual circuit. The onion
  476. router on the other side
  477. % I don't mean other side, I mean wherever it is on the circuit. But
  478. % don't want to introduce complexity this early? Hm. -RD
  479. of the circuit connects to the destinations of
  480. the TCP streams and relays data.
  481. Each onion router uses three public keys: a long-term identity key, a
  482. short-term onion key, and a short-term link key. The identity
  483. (signing) key is used to sign TLS certificates, to sign its router
  484. descriptor (a summary of its keys, address, bandwidth, exit policy,
  485. etc), and to sign directories if it is a directory server. Changing
  486. the identity key of a router is considered equivalent to creating a
  487. new router. The onion (decryption) key is used for decrypting requests
  488. from users to set up a circuit and negotiate ephemeral keys. Finally,
  489. link keys are used by the TLS protocol when communicating between
  490. onion routers. We discuss rotating these keys in
  491. Section~\ref{subsec:rotating-keys}.
  492. Section~\ref{subsec:cells} discusses the structure of the fixed-size
  493. \emph{cells} that are the unit of communication in Tor. We describe
  494. in Section~\ref{subsec:circuits} how virtual circuits are
  495. built, extended, truncated, and destroyed. Section~\ref{subsec:tcp}
  496. describes how TCP streams are routed through the network, and finally
  497. Section~\ref{subsec:congestion} talks about congestion control and
  498. fairness issues.
  499. \SubSection{Cells}
  500. \label{subsec:cells}
  501. % I think we should describe connections before cells. -NM
  502. Traffic passes from one OR to another, or between a user's OP and an OR,
  503. in fixed-size cells. Each cell is 256 bytes (but see
  504. Section~\ref{sec:conclusion}
  505. for a discussion of allowing large cells and small cells on the same
  506. network), and consists of a header and a payload. The header includes an
  507. anonymous circuit identifier (ACI) that specifies which circuit the
  508. % Should we replace ACI with circID ? What is this 'anonymous circuit'
  509. % thing anyway? -RD
  510. cell refers to
  511. (many circuits can be multiplexed over the single TCP connection between
  512. ORs or between an OP and an OR), and a command to describe what to do
  513. with the cell's payload. Cells are either \emph{control} cells, which are
  514. interpreted by the node that receives them, or \emph{relay} cells,
  515. which carry end-to-end stream data. Controls cells can be one of:
  516. \emph{padding} (currently used for keepalive, but also usable for link
  517. padding); \emph{create} or \emph{created} (used to set up a new circuit);
  518. or \emph{destroy} (to tear down a circuit).
  519. % We need to say that ACIs are connection-specific: each circuit has
  520. % a different ACI along each connection. -NM
  521. % agreed -RD
  522. Relay cells have an additional header (the relay header) after the
  523. cell header, containing the stream identifier (many streams can
  524. be multiplexed over a circuit); an end-to-end checksum for integrity
  525. checking; the length of the relay payload; and a relay command. Relay
  526. commands can be one of: \emph{relay
  527. data} (for data flowing down the stream), \emph{relay begin} (to open a
  528. stream), \emph{relay end} (to close a stream cleanly), \emph{relay
  529. teardown} (to close a broken stream), \emph{relay connected}
  530. (to notify the OP that a relay begin has succeeded), \emph{relay
  531. extend} and \emph{relay extended} (to extend the circuit by a hop,
  532. and to acknowledge), \emph{relay truncate} and \emph{relay truncated}
  533. (to tear down only part of the circuit, and to acknowledge), \emph{relay
  534. sendme} (used for congestion control), and \emph{relay drop} (used to
  535. implement long-range dummies).
  536. We describe each of these cell types in more detail below.
  537. \SubSection{Circuits and streams}
  538. \label{subsec:circuits}
  539. % I think when we say ``the user,'' maybe we should say ``the user's OP.''
  540. The original Onion Routing design built one circuit for each
  541. TCP stream. Because building a circuit can take several tenths of a
  542. second (due to public-key cryptography delays and network latency),
  543. this design imposed high costs on applications like web browsing that
  544. open many TCP streams.
  545. In Tor, each circuit can be shared by many TCP streams. To avoid
  546. delays, users construct circuits preemptively. To limit linkability
  547. among the streams, users rotate connections by building a new circuit
  548. periodically if the previous one has been used,
  549. and expire old used circuits that are no longer in use. Tor considers
  550. making a new circuit once a minute: thus
  551. even heavy users spend a negligible amount of time and CPU in
  552. building circuits, but only a limited number of requests can be linked
  553. to each other by a given exit node. Also, because circuits are built
  554. in the background, failed routers do not affect user experience.
  555. \subsubsection{Constructing a circuit}
  556. Users construct a circuit incrementally, negotiating a symmetric key with
  557. each hop one at a time. To begin creating a new circuit, the user
  558. (call her Alice) sends a \emph{create} cell to the first node in her
  559. chosen path. The cell's payload is the first half of the
  560. Diffie-Hellman handshake, encrypted to the onion key of the OR (call
  561. him Bob). Bob responds with a \emph{created} cell containing the second
  562. half of the DH handshake, along with a hash of the negotiated key
  563. $K=g^{xy}$.
  564. To extend a circuit past the first hop, Alice sends a \emph{relay extend}
  565. cell to the last node in the circuit, specifying the address of the new
  566. OR and an encrypted $g^x$ for it. That node copies the half-handshake
  567. into a \emph{create} cell, and passes it to the new OR to extend the
  568. circuit. When it responds with a \emph{created} cell, the penultimate OR
  569. copies the payload into a \emph{relay extended} cell and passes it back.
  570. % Nick: please fix my "that OR" pronouns -RD
  571. The onion-level handshake protocol achieves unilateral entity
  572. authentication (Alice knows she's handshaking with Bob, Bob doesn't
  573. care who is opening the circuit---Alice has no key and is trying to
  574. remain anonymous) and unilateral key authentication (Alice and Bob
  575. agree on a key, and Alice knows Bob is the only other person who should
  576. know it). We also want perfect forward secrecy and key freshness.
  577. \begin{equation}
  578. \begin{aligned}
  579. \mathrm{Alice} \rightarrow \mathrm{Bob}&: E_{PK_{Bob}}(g^x) \\
  580. \mathrm{Bob} \rightarrow \mathrm{Alice}&: g^y, H(K | \mathrm{``handshake"}) \\
  581. \end{aligned}
  582. \end{equation}
  583. The second step shows both that it was Bob
  584. who received $g^x$, and that it was Bob who came up with $y$. We use
  585. PK encryption in the first step (rather than, say, using the first two
  586. steps of STS, which has a signature in the second step) because we
  587. don't have enough room in a single cell for a public key and also a
  588. signature. Preliminary analysis with the NRL protocol analyzer \cite{meadows96}
  589. shows the above protocol to be secure (including providing PFS) under the
  590. traditional Dolev-Yao model.
  591. \subsubsection{Relay cells}
  592. Once Alice has established the circuit (so she shares a key with each
  593. OR on the circuit), she can send relay cells.
  594. The stream ID in the relay header indicates to which stream the cell belongs.
  595. A relay cell can be addressed to any of the ORs on the circuit. To
  596. construct a relay cell addressed to a given OR, Alice iteratively
  597. encrypts the cell payload (that is, the relay header and payload)
  598. with the symmetric key of each hop up to that OR. Then, at each hop
  599. down the circuit, the OR decrypts the cell payload and checks whether
  600. it recognizes the stream ID. A stream ID is recognized either if it
  601. is an already open stream at that OR, or if it is equal to zero. The
  602. zero stream ID is treated specially, and is used for control messages,
  603. e.g. starting a new stream. If the stream ID is unrecognized, the OR
  604. passes the relay cell downstream. This \emph{leaky pipe} circuit topology
  605. allows Alice's streams to exit at different ORs on a single circuit.
  606. Alice may choose different exit points because of their exit policies,
  607. or to keep the ORs from knowing that two streams
  608. originate at the same person.
  609. To tear down a circuit, Alice sends a destroy control cell. Each OR
  610. in the circuit receives the destroy cell, closes all open streams on
  611. that circuit, and passes a new destroy cell forward. But since circuits
  612. can be built incrementally, they can also be torn down incrementally:
  613. Alice can instead send a relay truncate cell to a node along the circuit. That
  614. node will send a destroy cell forward, and reply with an acknowledgment
  615. (relay truncated). Alice might truncate her circuit so she can extend it
  616. to different nodes without signaling to the first few nodes (or somebody
  617. observing them) that she is changing her circuit. That is, nodes in the
  618. middle are not even aware that the circuit was truncated, because the
  619. relay cells are encrypted. Similarly, if a node on the circuit goes down,
  620. the adjacent node can send a relay truncated back to Alice. Thus the
  621. ``break a node and see which circuits go down'' attack is weakened.
  622. \SubSection{Opening and closing streams}
  623. \label{subsec:tcp}
  624. When Alice's application wants to open a TCP connection to a given
  625. address and port, it asks the OP (via SOCKS) to make the connection. The
  626. OP chooses the newest open circuit (or creates one if none is available),
  627. chooses a suitable OR on that circuit to be the exit node (usually the
  628. last node, but maybe others due to exit policy conflicts; see
  629. Section~\ref{sec:exit-policies}), chooses a new random stream ID for
  630. this stream,
  631. and delivers a relay begin cell to that exit node. It uses a stream ID
  632. of zero for the begin cell (so the OR will recognize it), and the relay
  633. payload lists the new stream ID and the destination address and port.
  634. Once the exit node completes the connection to the remote host, it
  635. responds with a relay connected cell through the circuit. Upon receipt,
  636. the OP notifies the application that it can begin talking.
  637. There's a catch to using SOCKS, though -- some applications hand the
  638. alphanumeric address to the proxy, while others resolve it into an IP
  639. address first and then hand the IP to the proxy. When the application
  640. does the DNS resolution first, Alice broadcasts her destination. Common
  641. applications like Mozilla and ssh have this flaw.
  642. In the case of Mozilla, we're fine: the filtering web proxy called Privoxy
  643. does the SOCKS call safely, and Mozilla talks to Privoxy safely. But a
  644. portable general solution, such as for ssh, is an open problem. We can
  645. modify the local nameserver, but this approach is invasive, brittle, and
  646. not portable. We can encourage the resolver library to do resolution
  647. via TCP rather than UDP, but this approach is hard to do right, and also
  648. has portability problems. We can provide a tool similar to \emph{dig} that
  649. can do a private lookup through the Tor network. Our current answer is to
  650. encourage the use of privacy-aware proxies like Privoxy wherever possible,
  651. Ending a Tor stream is analogous to ending a TCP stream: it uses a
  652. two-step handshake for normal operation, or a one-step handshake for
  653. errors. If one side of the stream closes abnormally, that node simply
  654. sends a relay teardown cell, and tears down the stream. If one side
  655. of the stream closes the connection normally, that node sends a relay
  656. end cell down the circuit. When the other side has sent back its own
  657. relay end, the stream can be torn down. This two-step handshake allows
  658. for TCP-based applications that, for example, close a socket for writing
  659. but are still willing to read. Remember that all relay cells use layered
  660. encryption, so only the destination OR knows what type of relay cell
  661. it is.
  662. \SubSection{Integrity checking on streams}
  663. Because the old Onion Routing design used a stream cipher, traffic was
  664. vulnerable to a malleability attack: even though the attacker could not
  665. decrypt cells, he could make changes to an encrypted
  666. cell to create corresponding changes to the data leaving the network.
  667. (Even an external adversary could do this, despite link encryption!)
  668. This weakness allowed an adversary to change a padding cell to a destroy
  669. cell; change the destination address in a relay begin cell to the
  670. adversary's webserver; or change a user on an ftp connection from
  671. typing ``dir'' to typing ``delete~*''. Any node or external adversary
  672. along the circuit could introduce such corruption in a stream.
  673. Tor prevents external adversaries from mounting this attack simply by
  674. using TLS. Addressing the insider malleability attack, however, is
  675. more complex.
  676. We could do integrity checking of the relay cells at each hop, either
  677. by including hashes or by using a cipher mode like EAX \cite{eax},
  678. but we don't want the added message-expansion overhead at each hop, and
  679. we don't want to leak the path length or pad to some max path length.
  680. Because we've already accepted that our design is vulnerable to end-to-end
  681. timing attacks, we can perform integrity checking only at the edges of
  682. the circuit without introducing any new anonymity attacks. When Alice
  683. negotiates a key
  684. with each hop, they both start a SHA-1 with some derivative of that key,
  685. % Not just the exit hop, but each hop: any hop can be an exit node. -RD
  686. thus starting out with randomness that only the two of them know. From
  687. then on they each incrementally add to the SHA-1 all the data bytes
  688. entering or exiting from the circuit, and each such relay cell includes
  689. the first 4 bytes of the current value of the hash.
  690. The attacker must be able to guess all previous bytes between Alice
  691. and Bob on that circuit (including the pseudorandomness from the key
  692. negotiation), plus the bytes in the current cell, to remove or modify the
  693. cell. Attacks on SHA-1 where the adversary can incrementally add to a
  694. hash to produce a new valid hash don't work,
  695. because all hashes are end-to-end encrypted across the circuit.
  696. The computational overhead isn't so bad, compared to doing an AES
  697. crypt at each hop in the circuit. We use only four bytes per cell to
  698. minimize overhead; the chance that an adversary will correctly guess a
  699. valid hash, plus the payload the current cell, is acceptly low, given
  700. that Alice or Bob tear down the circuit if they receive a bad hash.
  701. \SubSection{Rate limiting and fairness}
  702. Volunteers are generally more willing to run services that can limit
  703. their bandwidth usage. To accomodate them, Tor servers use a token
  704. bucket approach to limit the number of bytes they
  705. % XXX cite token bucket?
  706. receive. Tokens are added to the bucket each second (when the bucket is
  707. full, new tokens are discarded.) Each token represents permission to
  708. receive one byte from the network---to receive a byte, the connection
  709. must remove a token from the bucket. Thus if the bucket is empty, that
  710. connection must wait until more tokens arrive. The number of tokens we
  711. add enforces a long-term average rate of incoming bytes, while still
  712. permitting short-term bursts above the allowed bandwidth. Current bucket
  713. sizes are set to ten seconds worth of traffic.
  714. Further, we want to avoid starving any Tor streams. Entire circuits
  715. could starve if we read greedily from connections and one connection
  716. uses all the remaining bandwidth. We solve this by dividing the number
  717. of tokens in the bucket by the number of connections that want to read,
  718. and reading at most that number of bytes from each connection. We iterate
  719. this procedure until the number of tokens in the bucket is under some
  720. threshold (eg 10KB), at which point we greedily read from connections.
  721. Because the Tor protocol generates roughly the same number of outgoing
  722. bytes as incoming bytes, it is sufficient in practice to rate-limit
  723. incoming bytes.
  724. % Is it? Fun attack: I send you lots of 1-byte-at-a-time TCP frames.
  725. % In response, you send lots of 256 byte cells. Can I use this to
  726. % make you exceed your outgoing bandwidth limit by a factor of 256? -NM
  727. % Can we resolve this by, when reading from edge connections, rounding up
  728. % the bytes read (wrt buckets) to the nearest multiple of 256? -RD
  729. Further, inspired by Rennhard et al's design in \cite{anonnet}, a
  730. circuit's edges heuristically distinguish interactive streams from bulk
  731. streams by comparing the frequency with which they supply cells. We can
  732. provide good latency for interactive streams by giving them preferential
  733. service, while still getting good overall throughput to the bulk
  734. streams. Such preferential treatment presents a possible end-to-end
  735. attack, but an adversary who can observe both
  736. ends of the stream can already learn this information through timing
  737. attacks.
  738. \SubSection{Congestion control}
  739. \label{subsec:congestion}
  740. Even with bandwidth rate limiting, we still need to worry about
  741. congestion, either accidental or intentional. If enough users choose the
  742. same OR-to-OR connection for their circuits, that connection can become
  743. saturated. For example, an adversary could make a large HTTP PUT request
  744. through the onion routing network to a webserver he runs, and then
  745. refuse to read any of the bytes at the webserver end of the
  746. circuit. Without some congestion control mechanism, these bottlenecks
  747. can propagate back through the entire network. We describe our
  748. responses below.
  749. \subsubsection{Circuit-level}
  750. To control a circuit's bandwidth usage, each OR keeps track of two
  751. windows. The \emph{package window} tracks how many relay data cells the OR is
  752. allowed to package (from outside streams) for transmission back to the OP,
  753. and the \emph{deliver window} tracks how many relay data cells it is willing
  754. to deliver to streams outside the network. Each window is initialized
  755. (say, to 1000 data cells). When a data cell is packaged or delivered,
  756. the appropriate window is decremented. When an OR has received enough
  757. data cells (currently 100), it sends a relay sendme cell towards the OP,
  758. with stream ID zero. When an OR receives a relay sendme cell with stream
  759. ID zero, it increments its packaging window. Either of these cells
  760. increments the corresponding window by 100. If the packaging window
  761. reaches 0, the OR stops reading from TCP connections for all streams
  762. on the corresponding circuit, and sends no more relay data cells until
  763. receiving a relay sendme cell.
  764. The OP behaves identically, except that it must track a packaging window
  765. and a delivery window for every OR in the circuit. If a packaging window
  766. reaches 0, it stops reading from streams destined for that OR.
  767. \subsubsection{Stream-level}
  768. The stream-level congestion control mechanism is similar to the
  769. circuit-level mechanism above. ORs and OPs use relay sendme cells
  770. to implement end-to-end flow control for individual streams across
  771. circuits. Each stream begins with a package window (e.g. 500 cells),
  772. and increments the window by a fixed value (50) upon receiving a relay
  773. sendme cell. Rather than always returning a relay sendme cell as soon
  774. as enough cells have arrived, the stream-level congestion control also
  775. has to check whether data has been successfully flushed onto the TCP
  776. stream; it sends a relay sendme only when the number of bytes pending
  777. to be flushed is under some threshold (currently 10 cells worth).
  778. Currently, non-data relay cells do not affect the windows. Thus we
  779. avoid potential deadlock issues, e.g. because a stream can't send a
  780. relay sendme cell because its packaging window is empty.
  781. \subsubsection{Needs more research}
  782. We don't need to reimplement full TCP windows (with sequence numbers,
  783. the ability to drop cells when we're full and retransmit later, etc),
  784. because the TCP streams already guarantee in-order delivery of each
  785. cell. But we need to investigate further the effects of the current
  786. parameters on throughput and latency, while also keeping privacy in mind;
  787. see Section~\ref{sec:maintaining-anonymity} for more discussion.
  788. \Section{Other design decisions}
  789. \SubSection{Resource management and denial-of-service}
  790. \label{subsec:dos}
  791. Providing Tor as a public service provides many opportunities for an
  792. attacker to mount denial-of-service attacks against the network. While
  793. flow control and rate limiting (discussed in
  794. Section~\ref{subsec:congestion}) prevent users from consuming more
  795. bandwidth than routers are willing to provide, opportunities remain for
  796. users to
  797. consume more network resources than their fair share, or to render the
  798. network unusable for other users.
  799. First of all, there are several CPU-consuming denial-of-service
  800. attacks wherein an attacker can force an OR to perform expensive
  801. cryptographic operations. For example, an attacker who sends a
  802. \emph{create} cell full of junk bytes can force an OR to perform an RSA
  803. decrypt. Similarly, an attacker can
  804. fake the start of a TLS handshake, forcing the OR to carry out its
  805. (comparatively expensive) half of the handshake at no real computational
  806. cost to the attacker.
  807. Several approaches exist to address these attacks. First, ORs may
  808. require clients to solve a puzzle \cite{puzzles-tls} while beginning new
  809. TLS handshakes or accepting \emph{create} cells. So long as these
  810. tokens are easy to verify and computationally expensive to produce, this
  811. approach limits the attack multiplier. Additionally, ORs may limit
  812. the rate at which they accept create cells and TLS connections, so that
  813. the computational work of processing them does not drown out the (comparatively
  814. inexpensive) work of symmetric cryptography needed to keep cells
  815. flowing. This rate limiting could, however, allow an attacker
  816. to slow down other users when they build new circuits.
  817. % What about link-to-link rate limiting?
  818. Attackers also have an opportunity to attack the Tor network by mounting
  819. attacks on its hosts and network links. Disrupting a single circuit or
  820. link breaks all currently open streams passing along that part of the
  821. circuit. Indeed, this same loss of service occurs when a router crashes
  822. or its operator restarts it. The current Tor design treats such attacks
  823. as intermittent network failures, and depends on users and applications
  824. to respond or recover as appropriate. A future design could use an
  825. end-to-end TCP-like acknowledgment protocol, so that no streams are
  826. lost unless the entry or exit point itself is disrupted. This solution
  827. would require more buffering at the network edges, however, and the
  828. performance and anonymity implications from this extra complexity still
  829. require investigation.
  830. \SubSection{Exit policies and abuse}
  831. \label{subsec:exitpolicies}
  832. Exit abuse is a serious barrier to wide-scale Tor deployment. Anonymity
  833. presents would-be vandals and abusers with an opportunity to hide
  834. the origins of their activities. Attackers can harm the Tor network by
  835. implicating exit servers for their abuse. Also, applications that commonly
  836. use IP-based authentication (such as institutional mail or web servers)
  837. can be fooled by the fact that anonymous connections appear to originate
  838. at the exit OR.
  839. We stress that Tor does not enable any new class of abuse. Spammers
  840. and other attackers already have access to thousands of misconfigured
  841. systems worldwide, and the Tor network is far from the easiest way
  842. to launch these antisocial or illegal attacks. Indeed, Tor's limited
  843. anonymity may be a benefit here, because large determined adversaries
  844. may still be able to track down criminals. In any case, because the
  845. %XXX
  846. onion routers can easily be mistaken for the originators of the abuse,
  847. and the volunteers who run them may not want to deal with the hassle of
  848. repeatedly explaining anonymity networks, we must block or limit attacks
  849. and other abuse that travel through the Tor network.
  850. To mitigate abuse issues, in Tor, each onion router's \emph{exit policy}
  851. describes to which external addresses and ports the router will permit
  852. stream connections. On one end of the spectrum are \emph{open exit}
  853. nodes that will connect anywhere. On the other end are \emph{middleman}
  854. nodes that only relay traffic to other Tor nodes, and \emph{private exit}
  855. nodes that only connect to a local host or network. Using a private
  856. exit (if one exists) is a more secure way for a client to connect to a
  857. given host or network---an external adversary cannot eavesdrop traffic
  858. between the private exit and the final destination, and so is less sure of
  859. Alice's destination and activities. Most onion routers will function as
  860. \emph{restricted exits} that permit connections to the world at large,
  861. but prevent access to certain abuse-prone addresses and services. In
  862. general, nodes can require a variety of forms of traffic authentication
  863. \cite{or-discex00}.
  864. %The abuse issues on closed (e.g. military) networks are different
  865. %from the abuse on open networks like the Internet. While these IP-based
  866. %access controls are still commonplace on the Internet, on closed networks,
  867. %nearly all participants will be honest, and end-to-end authentication
  868. %can be assumed for important traffic.
  869. Many administrators will use port restrictions to support only a
  870. limited set of well-known services, such as HTTP, SSH, or AIM.
  871. This is not a complete solution, since abuse opportunities for these
  872. protocols are still well known. Nonetheless, the benefits are real,
  873. since administrators seem used to the concept of port 80 abuse not
  874. coming from the machine's owner.
  875. A further solution may be to use proxies to clean traffic for certain
  876. protocols as it leaves the network. For example, much abusive HTTP
  877. behavior (such as exploiting buffer overflows or well-known script
  878. vulnerabilities) can be detected in a straightforward manner.
  879. Similarly, one could run automatic spam filtering software (such as
  880. SpamAssassin) on email exiting the OR network.
  881. ORs may also choose to rewrite exiting traffic in order to append
  882. headers or other information to indicate that the traffic has passed
  883. through an anonymity service. This approach is commonly used
  884. by email-only anonymity systems. When possible, ORs can also
  885. run on servers with hostnames such as {\it anonymous}, to further
  886. alert abuse targets to the nature of the anonymous traffic.
  887. A mixture of open and restricted exit nodes will allow the most
  888. flexibility for volunteers running servers. But while many
  889. middleman nodes help provide a large and robust network,
  890. having only a few exit nodes reduces the number of points
  891. an adversary needs to monitor for traffic analysis, and places a
  892. greater burden on the exit nodes. This tension can be seen in the
  893. Java Anon Proxy
  894. cascade model, wherein only one node in each cascade needs to handle
  895. abuse complaints---but an adversary only needs to observe the entry
  896. and exit of a cascade to perform traffic analysis on all that
  897. cascade's users. The Hydra model (many entries, few exits) presents a
  898. different compromise: only a few exit nodes are needed, but an
  899. adversary needs to work harder to watch all the clients; see
  900. Section~\ref{sec:conclusion}.
  901. Finally, we note that exit abuse must not be dismissed as a peripheral
  902. issue: when a system's public image suffers, it can reduce the number
  903. and diversity of that system's users, and thereby reduce the anonymity
  904. of the system itself. Like usability, public perception is also a
  905. security parameter. Sadly, preventing abuse of open exit nodes is an
  906. unsolved problem, and will probably remain an arms race for the
  907. forseeable future. The abuse problems faced by Princeton's CoDeeN
  908. project \cite{darkside} give us a glimpse of likely issues.
  909. \SubSection{Directory Servers}
  910. \label{subsec:dirservers}
  911. First-generation Onion Routing designs \cite{freedom2-arch,or-jsac98} used
  912. in-band network status updates: each router flooded a signed statement
  913. to its neighbors, which propagated it onward. But anonymizing networks
  914. have different security goals than typical link-state routing protocols.
  915. For example, delays (accidental or intentional)
  916. that can cause different parts of the network to have different pictures
  917. of link-state and topology are not only inconvenient---they give
  918. attackers an opportunity to exploit differences in client knowledge.
  919. We also worry about attacks to deceive a
  920. client about the router membership list, topology, or current network
  921. state. Such \emph{partitioning attacks} on client knowledge help an
  922. adversary to efficiently deploy resources
  923. when attacking a target.
  924. Tor uses a small group of redundant, well-known onion routers to
  925. track changes in network topology and node state, including keys and
  926. exit policies. Each such \emph{directory server} also acts as an HTTP
  927. server, so participants can fetch current network state and router
  928. lists (a \emph{directory}), and so other onion routers can upload
  929. their router descriptors. Onion routers periodically publish signed
  930. statements of their state to each directory server, which combines this
  931. state information with its own view of network liveness, and generates
  932. a signed description of the entire network state. Client software is
  933. pre-loaded with a list of the directory servers and their keys; it uses
  934. this information to bootstrap each client's view of the network.
  935. When a directory server receives a signed statement from an onion
  936. router, it recognizes the onion router by its identity key. Directory
  937. servers do not automatically advertise unrecognized ORs. (If they did,
  938. an adversary could take over the network by creating many servers
  939. \cite{sybil}.) Instead, new nodes must be approved by the directory
  940. server administrator before they are included. Mechanisms for automated
  941. node approval are an area of active research, and are discussed more
  942. in Section~\ref{sec:maintaining-anonymity}.
  943. Of course, a variety of attacks remain. An adversary who controls
  944. a directory server can track certain clients by providing different
  945. information---perhaps by listing only nodes under its control, or by
  946. informing only certain clients about a given node. Even an external
  947. adversary can exploit differences in client knowledge: clients who use
  948. a node listed on one directory server but not the others are vulnerable.
  949. Thus these directory servers must be synchronized and redundant.
  950. Valid directories are those signed by a threshold of the directory
  951. servers.
  952. The directory servers in Tor are modeled after those in Mixminion
  953. \cite{minion-design}, but our situation is easier. First, we make the
  954. simplifying assumption that all participants agree on the set of
  955. directory servers. Second, while Mixminion needs to predict node
  956. behavior, Tor only needs a threshold consensus of the current
  957. state of the network.
  958. Tor directory servers build a consensus directory through a simple
  959. four-round broadcast protocol. In round one, each server dates and
  960. signs its current opinion, and broadcasts it to the other directory
  961. servers; then in round two, each server rebroadcasts all the signed
  962. opinions it has received. At this point all directory servers check
  963. to see whether any server has signed multiple opinions in the same
  964. period. Such a server is either broken or cheating, so the protocol
  965. stops and notifies the administrators, who either remove the cheater
  966. or wait for the broken server to be fixed. If there are no
  967. discrepancies, each directory server then locally computes an algorithm
  968. (described below)
  969. on the set of opinions, resulting in a uniform shared directory. In
  970. round three servers sign this directory and broadcast it; and finally
  971. in round four the servers rebroadcast the directory and all the
  972. signatures. If any directory server drops out of the network, its
  973. signature is not included on the final directory.
  974. The rebroadcast steps ensure that a directory server is heard by
  975. either all of the other servers or none of them, even when some links
  976. are down (assuming that any two directory servers can talk directly or
  977. via a third). Broadcasts are feasible because there are relatively few
  978. directory servers (currently 3, but we expect as many as 9 as the network
  979. scales). Computing the shared directory locally is a straightforward
  980. threshold voting process: we include an OR if a majority of directory
  981. servers believe it to be good.
  982. To avoid attacks where a router connects to all the directory servers
  983. but refuses to relay traffic from other routers, the directory servers
  984. must build circuits and use them to anonymously test router reliability
  985. \cite{mix-acc}.
  986. Using directory servers is simpler and more flexible than flooding.
  987. For example, flooding complicates the analysis when we
  988. start experimenting with non-clique network topologies. And because
  989. the directories are signed, they can be cached by other onion routers.
  990. Thus directory servers are not a performance
  991. bottleneck when we have many users, and do not aid traffic analysis by
  992. forcing clients to periodically announce their existence to any
  993. central point.
  994. \Section{Rendezvous points: location privacy}
  995. \label{sec:rendezvous}
  996. Rendezvous points are a building block for \emph{location-hidden
  997. services} (also known as ``responder anonymity'') in the Tor
  998. network. Location-hidden services allow Bob to offer a TCP
  999. service, such as a webserver, without revealing its IP.
  1000. We are also motivated by protection against distributed DoS attacks:
  1001. attackers are forced to attack the onion routing network as a whole
  1002. rather than just Bob's IP.
  1003. Our design for location-hidden servers has the following goals.
  1004. \textbf{Flood-proof:} An attacker should not be able to flood Bob
  1005. with traffic simply by sending many requests to talk to Bob. Thus,
  1006. Bob needs a way to filter incoming requests. \textbf{Robust:} Bob
  1007. should be able to maintain a long-term pseudonymous identity even
  1008. in the presence of router failure. Thus, Bob's service must not be
  1009. tied to a single OR, and Bob must be able to tie his service to new
  1010. ORs. \textbf{Smear-resistant:} An attacker should not be able to use
  1011. rendezvous points to smear an OR. That is, if a social attacker tries
  1012. to host a location-hidden service that is illegal or disreputable, it
  1013. should not appear---even to a casual observer---that the OR is hosting
  1014. that service. \textbf{Application-transparent:} Although we are willing to
  1015. require users to run special software to access location-hidden servers,
  1016. we are not willing to require them to modify their applications.
  1017. \subsection{Rendezvous design}
  1018. We provide location-hiding for Bob by allowing him to advertise
  1019. several onion routers (his \emph{Introduction Points}) as his public
  1020. location. (He may do this on any robust efficient distributed
  1021. key-value lookup system with authenticated updates, such as CFS
  1022. \cite{cfs:sosp01}\footnote{
  1023. Each onion router could run a node in this lookup
  1024. system; also note that as a stopgap measure, we can start by running a
  1025. simple lookup system on the directory servers.})
  1026. Alice, the client, chooses a node for her
  1027. \emph{Meeting Point}. She connects to one of Bob's introduction
  1028. points, informs him about her rendezvous point, and then waits for him
  1029. to connect to the rendezvous point. This extra level of indirection
  1030. helps Bob's introduction points avoid problems associated with serving
  1031. unpopular files directly, as could occur, for example, if Bob chooses
  1032. an introduction point in Texas to serve anti-ranching propaganda,
  1033. or if Bob's service tends to get attacked by network vandals.
  1034. The extra level of indirection also allows Bob to respond to some requests
  1035. and ignore others.
  1036. The steps of a rendezvous as follows. These steps are performed on
  1037. behalf of Alice and Bob by their local onion proxies, which they both
  1038. must run; application integration is described more fully below.
  1039. \begin{tightlist}
  1040. \item Bob chooses some introduction ppoints, and advertises them via
  1041. CFS (or some other distributed key-value publication system).
  1042. \item Bob establishes a Tor virtual circuit to each of his
  1043. Introduction Points, and waits.
  1044. \item Alice learns about Bob's service out of band (perhaps Bob told her,
  1045. or she found it on a website). She looks up the details of Bob's
  1046. service from CFS.
  1047. \item Alice chooses an OR to serve as a Rendezvous Point (RP) for this
  1048. transaction. She establishes a virtual circuit to her RP, and
  1049. tells it to wait for connections. %[XXX how?]
  1050. \item Alice opens an anonymous stream to one of Bob's Introduction
  1051. Points, and gives it message (encrypted for Bob) which tells him
  1052. about herself, her chosen RP, and the first half of an ephemeral
  1053. key handshake. The Introduction Point sends the message to Bob.
  1054. \item Bob may decide to ignore Alice's request. %[XXX Based on what?]
  1055. Otherwise, he creates a new virtual circuit to Alice's RP, and
  1056. authenticates himself. %[XXX how?]
  1057. \item If the authentication is successful, the RP connects Alice's
  1058. virtual circuit to Bob's. Note that RP can't recognize Alice,
  1059. Bob, or the data they transmit (they share a session key).
  1060. \item Alice now sends a Begin cell along the circuit. It arrives at Bob's
  1061. onion proxy. Bob's onion proxy connects to Bob's webserver.
  1062. \item An anonymous stream has been established, and Alice and Bob
  1063. communicate as normal.
  1064. \end{tightlist}
  1065. %[XXX We need to modify the above to refer people down to these next
  1066. % paragraphs. -NM]
  1067. When establishing an introduction point, Bob provides the onion router
  1068. with a public ``introduction'' key. The hash of this public key
  1069. identifies a unique service, and (since Bob is required to sign his
  1070. messages) prevents anybody else from usurping Bob's introduction point
  1071. in the future. Bob uses the same public key when establishing the other
  1072. introduction points for that service.
  1073. The message that Alice gives the introduction point includes a hash of Bob's
  1074. public key to identify the service, an optional initial authentication
  1075. token (the introduction point can do prescreening, eg to block replays),
  1076. and (encrypted to Bob's public key) the location of the rendezvous point,
  1077. a rendezvous cookie Bob should tell RP so he gets connected to
  1078. Alice, an optional authentication token so Bob can choose whether to respond,
  1079. and the first half of a DH key exchange. When Bob connects to RP
  1080. and gets connected to Alice's pipe, his first cell contains the
  1081. other half of the DH key exchange.
  1082. The authentication tokens can be used to provide selective access to users
  1083. proportional to how important it is that they main uninterrupted access
  1084. to the service. During normal situations, Bob's service might simply be
  1085. offered directly from mirrors; Bob can also give out authentication cookies
  1086. to high-priority users. If those mirrors are knocked down by
  1087. distributed DoS attacks,
  1088. those users can switch to accessing Bob's service via the Tor
  1089. rendezvous system.
  1090. \SubSection{Integration with user applications}
  1091. For each service Bob offers, he configures his local onion proxy to know
  1092. the local IP and port of the server, a strategy for authorizing Alices,
  1093. and a public key. Bob publishes
  1094. the public key, an expiration
  1095. time (``not valid after''), and the current introduction points for
  1096. his
  1097. service into CFS, all indexed by the hash of the public key
  1098. Note that Bob's webserver is unmodified, and doesn't even know
  1099. that it's hidden behind the Tor network.
  1100. Because Alice's applications must work unchanged, her client interface
  1101. remains a SOCKS proxy. Thus we must encode all of the necessary
  1102. information into the fully qualified domain name Alice uses when
  1103. establishing her connections. Location-hidden services use a virtual
  1104. top level domain called `.onion': thus hostnames take the form
  1105. x.y.onion where x is the authentication cookie, and y encodes the hash
  1106. of PK. Alice's onion proxy examines hostnames and recognizes when
  1107. they're destined for a hidden server. If so, it decodes the PK and
  1108. starts the rendezvous as described in the table above.
  1109. \subsection{Previous rendezvous work}
  1110. Ian Goldberg developed a similar notion of rendezvous points for
  1111. low-latency anonymity systems \cite{ian-thesis}. His ``service tags''
  1112. play the same role in his design as the hashes of services' public
  1113. keys play in ours. We use public key hashes so that they can be
  1114. self-authenticating, and so the client can recognize the same service
  1115. with confidence later on. His design also differs from ours in the
  1116. following ways: First, Goldberg suggests that the client should
  1117. manually hunt down a current location of the service via Gnutella;
  1118. whereas our use of CFS makes lookup faster, more robust, and
  1119. transparent to the user. Second, in Tor the client and server
  1120. negotiate ephemeral keys via Diffie-Hellman, so at no point in the
  1121. path is the plaintext exposed. Third, our design tries to minimize the
  1122. exposure associated with running the service, so as to make volunteers
  1123. more willing to offer introduction and rendezvous point services.
  1124. Tor's introduction points do not output any bytes to the clients, and
  1125. the rendezvous points don't know the client, the server, or the data
  1126. being transmitted. The indirection scheme is also designed to include
  1127. authentication/authorization---if the client doesn't include the right
  1128. cookie with its request for service, the server need not even
  1129. acknowledge its existence.
  1130. \Section{Analysis}
  1131. \label{sec:analysis}
  1132. In this section, we discuss how well Tor meets our stated design goals
  1133. and its resistance to attacks.
  1134. \SubSection{Meeting Basic Goals}
  1135. % None of these seem to say very much. Should this subsection be removed?
  1136. \begin{tightlist}
  1137. \item [Basic Anonymity:] Because traffic is encrypted, changing in
  1138. appearance, and can flow from anywhere to anywhere within the
  1139. network, a simple observer that cannot see both the initiator
  1140. activity and the corresponding activity where the responder talks to
  1141. the network will not be able to link the initiator and responder.
  1142. Nor is it possible to directly correlate any two communication
  1143. sessions as coming from a single source without additional
  1144. information. Resistance to more sophisticated anonymity threats is
  1145. discussed below.
  1146. \item[Deployability:] Tor requires no specialized hardware. Tor
  1147. requires no kernel modifications; it runs in user space (currently
  1148. on Linux, various BSDs, and Windows). All of these imply a low
  1149. technical barrier to running a Tor node. There is an assumption that
  1150. Tor nodes have good relatively persistent net connectivity
  1151. (currently T1 or better);
  1152. % Is that reasonable to say? We haven't really discussed it -P.S.
  1153. % Roger thinks otherwise; he will fix this. -NM
  1154. however, there is no padding overhead, and operators can limit
  1155. bandwidth on any link. Tor is freely available under the modified
  1156. BSD license, and operators are able to choose their own exit
  1157. policies, thus reducing legal and social barriers to
  1158. running a node.
  1159. \item[Usability:] As noted, Tor runs in user space. So does the onion
  1160. proxy, which is comparatively easy to install and run. SOCKS-aware
  1161. applications require nothing more than to be pointed at the onion
  1162. proxy; other applications can be redirected to use SOCKS for their
  1163. outgoing TCP connections by drop-in libraries such as tsocks.
  1164. \item[Flexibility:] Tor's design and implementation is fairly modular,
  1165. so that, for example, a scalable P2P replacement for the directory
  1166. servers would not substantially impact other aspects of the system.
  1167. Tor runs on top of TCP, so design options that could not easily do
  1168. so would be difficult to test on the current network. However, most
  1169. low-latency protocols are designed to run over TCP. We are currently
  1170. working with the designers of MorphMix to render our two systems
  1171. interoperable. So for, this seems to be relatively straightforward.
  1172. Interoperability will allow testing and direct comparison of the two
  1173. rather different designs.
  1174. \item[Simple design:] Tor opts for practicality when there is no
  1175. clear resolution of anonymity trade-offs or practical means to
  1176. achieve resolution. Thus, we do not currently pad or mix; although
  1177. it would be easy to add either of these. Indeed, our system allows
  1178. long-range and variable padding if this should ever be shown to have
  1179. a clear advantage. Similarly, we do not currently attempt to
  1180. resolve such issues as Sybil attacks to dominate the network except
  1181. by such direct means as personal familiarity of director operators
  1182. with all node operators.
  1183. \end{tightlist}
  1184. \SubSection{Attacks and Defenses}
  1185. \label{sec:attacks}
  1186. Below we summarize a variety of attacks, and discuss how well our
  1187. design withstands them.
  1188. \subsubsection*{Passive attacks}
  1189. \begin{tightlist}
  1190. \item \emph{Observing user traffic patterns.} Observations of connection
  1191. between an end user and a first onion router will not reveal to whom
  1192. the user is connecting or what information is being sent. It will
  1193. reveal patterns of user traffic (both sent and received). Simple
  1194. profiling of user connection patterns is not generally possible,
  1195. however, because multiple application connections (streams) may be
  1196. operating simultaneously or in series over a single circuit. Thus,
  1197. further processing is necessary to try to discern even these usage
  1198. patterns.
  1199. \item \emph{Observing user content.} At the user end, content is
  1200. encrypted; however, connections from the network to arbitrary
  1201. websites may not be. Further, a responding website may itself be
  1202. considered an adversary. Filtering content is not a primary goal of
  1203. Onion Routing; nonetheless, Tor can directly make use of Privoxy and
  1204. related filtering services via SOCKS and thus anonymize their
  1205. application data streams.
  1206. \item \emph{Option distinguishability.} Configuration options can be a
  1207. source of distinguishable patterns. In general there is economic
  1208. incentive to allow preferential services \cite{econymics}, and some
  1209. degree of configuration choice can be a factor in attracting many users
  1210. to provide anonymity. So far, however, we have
  1211. not found a compelling use case in Tor for any client-configurable
  1212. options. Thus, clients are currently distinguishable only by their
  1213. behavior.
  1214. \item \emph{End-to-end Timing correlation.} Tor only minimally hides
  1215. end-to-end timing correlations. If an attacker can watch patterns of
  1216. traffic at the initiator end and the responder end, then he will be
  1217. able to confirm the correspondence with high probability. The
  1218. greatest protection currently against such confirmation is if the
  1219. connection between the onion proxy and the first Tor node is hidden,
  1220. possibly because it is local or behind a firewall. This approach
  1221. requires an observer to separate traffic originating the onion
  1222. router from traffic passes through it. We still do not, however,
  1223. predict this approach to be a large problem for an attacker who can
  1224. observe traffic at both ends of an application connection.
  1225. \item \emph{End-to-end Size correlation.} Simple packet counting
  1226. without timing consideration will also be effective in confirming
  1227. endpoints of a connection through Onion Routing; although slightly
  1228. less so. This is because, even without padding, the leaky pipe
  1229. topology means different numbers of packets may enter one end of a
  1230. circuit than exit at the other.
  1231. \item \emph{Website fingerprinting.} All the above passive
  1232. attacks that are at all effective are traffic confirmation attacks.
  1233. This puts them outside our general design goals. There is also
  1234. a passive traffic analysis attack that is potentially effective.
  1235. Instead of searching exit connections for timing and volume
  1236. correlations it is possible to build up a database of
  1237. ``fingerprints'' containing file sizes and access patterns for many
  1238. interesting websites. If one now wants to
  1239. monitor the activity of a user, it may be possible to confirm a
  1240. connection to a site simply by consulting the database. This attack has
  1241. been shown to be effective against SafeWeb \cite{hintz-pet02}. Onion
  1242. Routing is not as vulnerable as SafeWeb to this attack: There is the
  1243. possibility that multiple streams are exiting the circuit at
  1244. different places concurrently. Also, fingerprinting will be limited to
  1245. the granularity of cells, currently 256 bytes. Larger cell sizes
  1246. and/or minimal padding schemes that group websites into large sets
  1247. are possible responses. But this remains an open problem. Link
  1248. padding or long-range dummies may also make fingerprints harder to
  1249. detect. (Note that
  1250. such fingerprinting should not be confused with the latency attacks
  1251. of \cite{back01}. Those require a fingerprint of the latencies of
  1252. all circuits through the network, combined with those from the
  1253. network edges to the targeted user and the responder website. While
  1254. these are in principal feasible and surprises are always possible,
  1255. these constitute a much more complicated attack, and there is no
  1256. current evidence of their practicality.)
  1257. \item \emph{Content analysis.} Tor explicitly provides no content
  1258. rewriting for any protocol at a higher level than TCP. When
  1259. protocol cleaners are available, however (as Privoxy is for HTTP),
  1260. Tor can integrate them in order to address these attacks.
  1261. \end{tightlist}
  1262. \subsubsection*{Active attacks}
  1263. \begin{tightlist}
  1264. \item \emph{Key compromise.} We consider the impact of a compromise
  1265. for each type of key in turn, from the shortest- to the
  1266. longest-lived. If a circuit session key is compromised, the
  1267. attacker can unwrap a single layer of encryption from the relay
  1268. cells traveling along that circuit. (Only nodes on the circuit can
  1269. see these cells.) If a TLS session key is compromised, an attacker
  1270. can view all the cells on TLS connection until the key is
  1271. renegotiated. (These cells are themselves encrypted.) If a TLS
  1272. private key is compromised, the attacker can fool others into
  1273. thinking that he is the affected OR, but still cannot accept any
  1274. connections. If an onion private key is compromised, the attacker
  1275. can impersonate the OR in circuits, but only if the attacker has
  1276. also compromised the OR's TLS private key, or is running the
  1277. previous OR in the circuit. (This compromise affects newly created
  1278. circuits, but because of perfect forward secrecy, the attacker
  1279. cannot hijack old circuits without compromising their session keys.)
  1280. In any case, an attacker can only take advantage of a compromise in
  1281. these mid-term private keys until they expire. Only by
  1282. compromising a node's identity key can an attacker replace that
  1283. node indefinitely, by sending new forged mid-term keys to the
  1284. directories. Finally, an attacker who can compromise a
  1285. \emph{directory's} identity key can influence every client's view
  1286. of the network---but only to the degree made possible by gaining a
  1287. vote with the rest of the the directory servers.
  1288. \item \emph{Iterated compromise.} A roving adversary who can
  1289. compromise ORs (by system intrusion, legal coersion, or extralegal
  1290. coersion) could march down length of a circuit compromising the
  1291. nodes until he reaches the end. Unless the adversary can complete
  1292. this attack within the lifetime of the circuit, however, the ORs
  1293. will have discarded the necessary information before the attack can
  1294. be completed. (Thanks to the perfect forward secrecy of session
  1295. keys, the attacker cannot cannot force nodes to decrypt recorded
  1296. traffic once the circuits have been closed.) Additionally, building
  1297. circuits that cross jurisdictions can make legal coercion
  1298. harder---this phenomenon is commonly called ``jurisdictional
  1299. arbitrage.'' The Java Anon Proxy project recently experienced this
  1300. issue, when
  1301. the German government successfully ordered them to add a backdoor to
  1302. all of their nodes \cite{jap-backdoor}.
  1303. \item \emph{Run a recipient.} By running a Web server, an adversary
  1304. trivially learns the timing patterns of those connecting to it, and
  1305. can introduce arbitrary patterns in its responses. This can greatly
  1306. facilitate end-to-end attacks: If the adversary can induce certain
  1307. users to connect to connect to his webserver (perhaps by providing
  1308. content targeted at those users), she now holds one end of their
  1309. connection. Additonally, here is a danger that the application
  1310. protocols and associated programs can be induced to reveal
  1311. information about the initiator. This is not directly in Onion
  1312. Routing's protection area, so we are dependent on Privoxy and
  1313. similar protocol cleaners to solve the problem.
  1314. \item \emph{Run an onion proxy.} It is expected that end users will
  1315. nearly always run their own local onion proxy. However, in some
  1316. settings, it may be necessary for the proxy to run
  1317. remotely---typically, in an institutional setting where it was
  1318. necessary to monitor the activity of those connecting to the proxy.
  1319. The drawback, of course, is that if the onion proxy is compromised,
  1320. then all future connections through it are completely compromised.
  1321. \item \emph{DoS non-observed nodes.} An observer who can observe some
  1322. of the Tor network can increase the value of this traffic analysis
  1323. if it can attack non-observed nodes to shut them down, reduce
  1324. their reliability, or persuade users that they are not trustworthy.
  1325. The best defense here is robustness.
  1326. \item \emph{Run a hostile node.} In addition to the abilties of a
  1327. local observer, an isolated hostile node can create circuits through
  1328. itself, or alter traffic patterns, in order to affect traffic at
  1329. other nodes. Its ability to directly DoS a neighbor is now limited
  1330. by bandwidth throttling. Nonetheless, in order to compromise the
  1331. anonymity of the endpoints of a circuit by its observations, a
  1332. hostile node is only significant if it is immediately adjacent to
  1333. that endpoint.
  1334. \item \emph{Run multiple hostile nodes.} If an adversary is able to
  1335. run multiple ORs, and is able to persuade the directory servers
  1336. that those ORs are trustworthy and independant, then occasionally
  1337. some user will choose one of those ORs for the start and another of
  1338. those ORs as the end of a circuit. When this happens, the user's
  1339. anonymity is compromised for those circuits. If an adversary can
  1340. control $m$ out of $N$ nodes, he should be able to correlate at most
  1341. $\frac{m}{N}$ of the traffic in this way---although an adersary
  1342. could possibly attract a disproportionately large amount of traffic
  1343. by running an exit node with an unusually permisssive exit policy.
  1344. \item \emph{Compromise entire path.} Anyone compromising both
  1345. endpoints of a circuit can confirm this with high probability. If
  1346. the entire path is compromised, this becomes a certainty; however,
  1347. the added benefit to the adversary of such an attack is small in
  1348. relation to the difficulty.
  1349. \item \emph{Run a hostile directory server.} Directory servers control
  1350. admission to the network. However, because the network directory
  1351. must be signed by a majority of servers, the threat of a single
  1352. hostile server is minimized.
  1353. \item \emph{Selectively DoS a Tor node.} As noted, neighbors are
  1354. bandwidth limited; however, it is possible to open up sufficient
  1355. circuits that converge at a single onion router to
  1356. overwhelm its network connection, its ability to process new
  1357. circuits, or both.
  1358. %OK so I noticed that twins are completely removed from the paper above,
  1359. % but it's after 5 so I'll leave that problem to you guys. -PS
  1360. \item \emph{Introduce timing into messages.} This is simply a stronger
  1361. version of passive timing attacks already discussed above.
  1362. \item \emph{Tagging attacks.} A hostile node could try to ``tag'' a
  1363. cell by altering it. This would render it unreadable, but if the
  1364. connection is, for example, an unencrypted request to a Web site,
  1365. the garbled content coming out at the appropriate time could confirm
  1366. the association. However, integrity checks on cells prevent
  1367. this attack from succeeding.
  1368. \item \emph{Replace contents of unauthenticated protocols.} When a
  1369. relaying an unauthenticated protocol like HTTP, a hostile exit node
  1370. can impersonate the target server. Thus, whenever possible, clients
  1371. should prefer protocols with end-to-end authentication.
  1372. \item \emph{Replay attacks.} Some anonymity protocols are vulnerable
  1373. to replay attacks. Tor is not; replaying one side of a handshake
  1374. will result in a different negotiated session key, and so the rest
  1375. of the recorded session can't be used.
  1376. % ``NonSSL Anonymizer''?
  1377. \item \emph{Smear attacks.} An attacker could use the Tor network to
  1378. engage in socially dissapproved acts, so as to try to bring the
  1379. entire network into disrepute and get its operators to shut it down.
  1380. Exit policies can help reduce the possibilities for abuse, but
  1381. ultimately, the network will require volunteers who can tolerate
  1382. some political heat.
  1383. \item \emph{Distribute hostile code.} An attacker could trick users
  1384. into running subverted Tor software that did not, in fact, anonymize
  1385. their connections---or worse, trick ORs into running weakened
  1386. software that provided users with less anonymity. We address this
  1387. problem (but do not solve it completely) by signing all Tor releases
  1388. with an official public key, and including an entry the directory
  1389. describing which versions are currently believed to be secure. To
  1390. prevent an attacker from subverting the official release itself
  1391. (through threats, bribery, or insider attacks), we provide all
  1392. releases in source code form, encourage source audits, and
  1393. frequently warn our users never to trust any software (even from
  1394. us!) that comes without source.
  1395. \end{tightlist}
  1396. \subsubsection*{Directory attacks}
  1397. \begin{tightlist}
  1398. \item \emph{Destroy directory servers.} If a single directory
  1399. server drops out of operation, the others still arrive at a final
  1400. directory. So long as any directory servers remain in operation,
  1401. they will still broadcast their views of the network and generate a
  1402. consensus directory. (If more than half are destroyed, this
  1403. directory will not, however, have enough signatures for clients to
  1404. use it automatically; human intervention will be necessary for
  1405. clients to decide whether to trust the resulting directory.)
  1406. \item \emph{Subvert a directory server.} By taking over a directory
  1407. server, an attacker can influence (but not control) the final
  1408. directory. Since ORs are included or excluded by majority vote,
  1409. the corrupt directory can at worst cast a tie-breaking vote to
  1410. decide whether to include marginal ORs. How often such marginal
  1411. cases will occur in practice, however, remains to be seen.
  1412. \item \emph{Subvert a majority of directory servers.} If the
  1413. adversary controls more than half of the directory servers, he can
  1414. decide on a final directory, and thus can include as many
  1415. compromised ORs in the final directory as he wishes. Other than
  1416. trying to ensure that directory server operators are truly
  1417. independent and resistant to attack, Tor does not address this
  1418. possibility.
  1419. \item \emph{Encourage directory server dissent.} The directory
  1420. agreement protocol requires that directory server operators agree on
  1421. the list of directory servers. An adversary who can persuade some
  1422. of the directory server operators to distrust one another could
  1423. split the quorum into mutually hostile camps, thus partitioning
  1424. users based on which directory they used. Tor does not address
  1425. this attack.
  1426. \item \emph{Trick the directory servers into listing a hostile OR.}
  1427. Our threat model explicitly assumes directory server operators will
  1428. be able to filter out most hostile ORs. If this is not true, an
  1429. attacker can flood the directory with compromised servers.
  1430. \item \emph{Convince the directories that a malfunctioning OR is
  1431. working.} In the current Tor implementation, directory servers
  1432. assume that if they can start a TLS connection to an an OR, that OR
  1433. must be running correctly. It would be easy for a hostile OR to
  1434. subvert this test by only accepting TLS connections from ORs, and
  1435. ignoring all cells. Thus, directory servers must actively test ORs
  1436. by building circuits and streams as appropriate. The benefits and
  1437. hazards of a similar approach are discussed in \cite{mix-acc}.
  1438. \end{tightlist}
  1439. \subsubsection*{Attacks against rendezvous points}
  1440. \begin{tightlist}
  1441. \item \emph{Make many introduction requests.} An attacker could
  1442. attempt to deny Bob service by flooding his Introduction Point with
  1443. requests. Because the introduction point can block requests that
  1444. lack authentication tokens, however, Bob can restrict the volume of
  1445. requests he receives, or require a certain amount of computation for
  1446. every request he receives.
  1447. \item \emph{Attack an introduction point.} An attacker could try to
  1448. disrupt a location-hidden service by disabling its introduction
  1449. point. But because a service's identity is attached to its public
  1450. key, not its introduction point, the service can simply re-advertise
  1451. itself at a different introduction point.
  1452. \item \emph{Compromise an introduction point.} If an attacker controls
  1453. an introduction point for a service, it can flood the service with
  1454. introduction requests, or prevent valid introduction requests from
  1455. reaching the hidden server. The server will notice a flooding
  1456. attempt if it receives many introduction requests. To notice
  1457. blocking of valid requests, however, the hidden server should
  1458. periodically test the introduction point by sending its introduction
  1459. requests, and making sure it receives them.
  1460. \item \emph{Compromise a rendezvous point.} Controlling a rendezvous
  1461. point gains an attacker no more than controlling any other OR along
  1462. a circuit, since all data passing along the rendezvous is protected
  1463. by the session key shared by the client and server.
  1464. \end{tightlist}
  1465. \Section{Open Questions in Low-latency Anonymity}
  1466. \label{sec:maintaining-anonymity}
  1467. % There must be a better intro than this! -NM
  1468. In addition to the open problems discussed in
  1469. Section~\ref{subsec:non-goals}, many other questions remain to be
  1470. solved by future research before we can be truly confident that we
  1471. have built a secure low-latency anonymity service.
  1472. Many of these open issues are questions of balance. For example,
  1473. how often should users rotate to fresh circuits? Too-frequent
  1474. rotation is inefficient and expensive, but too-infrequent rotation
  1475. makes the user's traffic linkable. Instead of opening a fresh
  1476. circuit; clients can also limit linkability exit from a middle point
  1477. of the circuit, or by truncating and re-extending the circuit, but
  1478. more analysis is needed to determine the proper trade-off.
  1479. %[XXX mention predecessor attacks?]
  1480. A similar question surrounds timing of directory operations:
  1481. how often should directories be updated? With too-infrequent
  1482. updates clients receive an inaccurate picture of the network; with
  1483. too-frequent updates the directory servers are overloaded.
  1484. %do different exit policies at different exit nodes trash anonymity sets,
  1485. %or not mess with them much?
  1486. %
  1487. %% Why would they? By routing traffic to certain nodes preferentially?
  1488. %[XXX Choosing paths and path lengths: I'm not writing this bit till
  1489. % Arma's pathselection stuff is in. -NM]
  1490. %%%% Roger said that he'd put a path selection paragraph into section
  1491. %%%% 4 that would replace this.
  1492. %
  1493. %I probably should have noted that this means loops will be on at least
  1494. %five hop routes, which should be rare given the distribution. I'm
  1495. %realizing that this is reproducing some of the thought that led to a
  1496. %default of five hops in the original onion routing design. There were
  1497. %some different assumptions, which I won't spell out now. Note that
  1498. %enclave level protections really change these assumptions. If most
  1499. %circuits are just two hops, then just a single link observer will be
  1500. %able to tell that two enclaves are communicating with high probability.
  1501. %So, it would seem that enclaves should have a four node minimum circuit
  1502. %to prevent trivial circuit insider identification of the whole circuit,
  1503. %and three hop minimum for circuits from an enclave to some nonclave
  1504. %responder. But then... we would have to make everyone obey these rules
  1505. %or a node that through timing inferred it was on a four hop circuit
  1506. %would know that it was probably carrying enclave to enclave traffic.
  1507. %Which... if there were even a moderate number of bad nodes in the
  1508. %network would make it advantageous to break the connection to conduct
  1509. %a reformation intersection attack. Ahhh! I gotta stop thinking
  1510. %about this and work on the paper some before the family wakes up.
  1511. %On Sat, Oct 25, 2003 at 06:57:12AM -0400, Paul Syverson wrote:
  1512. %> Which... if there were even a moderate number of bad nodes in the
  1513. %> network would make it advantageous to break the connection to conduct
  1514. %> a reformation intersection attack. Ahhh! I gotta stop thinking
  1515. %> about this and work on the paper some before the family wakes up.
  1516. %This is the sort of issue that should go in the 'maintaining anonymity
  1517. %with tor' section towards the end. :)
  1518. %Email from between roger and me to beginning of section above. Fix and move.
  1519. Throughout this paper, we have assumed that end-to-end traffic
  1520. analysis will immediately and automatically defeat a low-latency
  1521. anonymity system. Even high-latency anonymity
  1522. systems can be vulnerable to end-to-end traffic analysis, if the
  1523. traffic volumes are high enough, and if users' habits are sufficiently
  1524. distinct \cite{limits-open,statistical-disclosure}. \emph{Can
  1525. anything be donw to make low-latency systems resist these attacks as
  1526. well as high-latency systems?}
  1527. Tor already makes some effort to conceal the starts and
  1528. ends of streams by wrapping all long-range control commands in
  1529. identical-looking relay cells, but more analysis is needed. Link
  1530. padding could frustrate passive observers who count packets; long-range
  1531. padding could work against observers who own the first hop in a
  1532. circuit. But more research needs to be done in order to find an
  1533. efficient and practical approach. Volunteers prefer not to run
  1534. constant-bandwidth padding; but more sophisticated traffic shaping
  1535. approaches remain somewhat unanalyzed.
  1536. %[XXX is this so?]
  1537. Recent work
  1538. on long-range padding \cite{defensive-dropping} shows promise. One
  1539. could also try to reduce correlation in packet timing by batching and
  1540. re-ordering packets, but it is unclear whether this could improve
  1541. anonymity without introducing so much latency as to render the
  1542. network unusable.
  1543. Even if passive timing attacks were wholly solved, active timing
  1544. attacks would remain. \emph{What can
  1545. be done to address attackers who can introduce timing patterns into
  1546. a user's traffic?} % [XXX mention likely approaches]
  1547. %%% I think we cover this by framing the problem as ``Can we make
  1548. %%% end-to-end characteristics of low-latency systems as good as
  1549. %%% those of high-latency systems?'' Eliminating long-term
  1550. %%% intersection is a hard problem.
  1551. %
  1552. %Even regardless of link padding from Alice to the cloud, there will be
  1553. %times when Alice is simply not online. Link padding, at the edges or
  1554. %inside the cloud, does not help for this.
  1555. In order to scale to large numbers of users, and to prevent an
  1556. attacker from observing the whole network at once, it may be necessary
  1557. for low-latency anonymity systems to support far more servers than Tor
  1558. currently anticipates. This introduces several issues. First, if
  1559. approval by a centralized set of directory servers is no longer
  1560. feasible, what mechanism should be used to prevent adversaries from
  1561. signing up many spurious servers?
  1562. Second, if clients can no longer have a complete
  1563. picture of the network at all times, how can should they perform
  1564. discovery while preventing attackers from manipulating or exploiting
  1565. gaps in client knowledge? Third, if there are too many servers
  1566. for every server to constantly communicate with every other, what kind
  1567. of non-clique topology should the network use? Restricted-route
  1568. topologies promise comparable anonymity with better scalability
  1569. \cite{danezis-pets03}, but whatever topology we choose, we need some
  1570. way to keep attackers from manipulating their position within it.
  1571. Fourth, since no centralized authority is tracking server reliability,
  1572. How do we prevent unreliable servers from rendering the network
  1573. unusable? Fifth, do clients receive so much anonymity benefit from
  1574. running their own servers that we should expect them all to do so, or
  1575. do we need to find another incentive structure to motivate them?
  1576. (Tarzan and MorphMix present possible solutions.)
  1577. % [[ XXX how to approve new nodes (advogato, sybil, captcha (RTT));]
  1578. Alternatively, it may be the case that one of these problems proves
  1579. intractable, or that the drawbacks to many-server systems prove
  1580. greater than the benefits. Nevertheless, we may still do well to
  1581. consider non-clique topologies. A cascade topology may provide more
  1582. defense against traffic confirmation confirmation.
  1583. % XXX Why would it? Cite. -NM
  1584. Does the hydra (many inputs, few outputs) topology work
  1585. better? Are we going to get a hydra anyway because most nodes will be
  1586. middleman nodes?
  1587. As mentioned in Section~\ref{subsec:dos}, Tor could improve its
  1588. robustness against node failure by buffering transmitted stream data
  1589. at the network's edges until the data has been acknowledged by the
  1590. other end of the stream. The efficacy of this approach remains to be
  1591. tested, however, and there may be more effective means for ensuring
  1592. reliable connections in the presence of unreliable nodes.
  1593. %%% Keeping this original paragraph for a little while, since it
  1594. %%% is not the same as what's written there now.
  1595. %
  1596. %Because Tor depends on TLS and TCP to provide a reliable transport,
  1597. %when one of the servers goes down, all the circuits (and thus streams)
  1598. %traveling over that server must break. This reduces anonymity because
  1599. %everybody needs to reconnect right then (does it? how much?) and
  1600. %because exit connections all break at the same time, and it also harms
  1601. %usability. It seems the problem is even worse in a peer-to-peer
  1602. %environment, because so far such systems don't really provide an
  1603. %incentive for nodes to stay connected when they're done browsing, so
  1604. %we would expect a much higher churn rate than for onion routing.
  1605. %there ways of allowing streams to survive the loss of a node in the
  1606. %path?
  1607. % Roger or Paul suggested that we say something about incentives,
  1608. % too, but I think that's a better candidate for our future work
  1609. % section. After all, we will doubtlessly learn very much about why
  1610. % people do or don't run and use Tor in the near future. -NM
  1611. %We should run a squid at each exit node, to provide comparable anonymity
  1612. %to private exit nodes for cache hits, to speed everything up, and to
  1613. %have a buffer for funny stuff coming out of port 80.
  1614. % on the other hand, it hampers PFS, because ORs have pages in the cache.
  1615. %I previously elsewhere suggested bulk transfer proxies to carve
  1616. %up big things so that they could be downloaded in less noticeable
  1617. %pieces over several normal looking connections. We could suggest
  1618. %similarly one or a handful of squid nodes that might serve up
  1619. %some of the more sensitive but common material, especially if
  1620. %the relevant sites didn't want to or couldn't run their own OR.
  1621. %This would be better than having everyone run a squid which would
  1622. %just help identify after the fact the different history of that
  1623. %node's activity. All this kind of speculation needs to move to
  1624. %future work section I guess. -PS]
  1625. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1626. \Section{Future Directions}
  1627. \label{sec:conclusion}
  1628. Tor brings together many innovations into
  1629. a unified deployable system. But there are still several attacks that
  1630. work quite well, as well as a number of sustainability and run-time
  1631. issues remaining to be ironed out. In particular:
  1632. % Many of these (Scalability, cover traffic, morphmix)
  1633. % are duplicates from open problems.
  1634. %
  1635. \begin{tightlist}
  1636. \item \emph{Scalability:} Tor's emphasis on design simplicity and
  1637. deployability has led us to adopt a clique topology, a
  1638. semi-centralized model for directories and trusts, and a
  1639. full-network-visibility model for client knowledge. None of these
  1640. properties will scale to more than a few hundred servers, at most.
  1641. Promising approaches to better scalability exist (see
  1642. Section~\ref{sec:maintaining-anonymity}), but more deployment
  1643. experience would be helpful in learning the relative importance of
  1644. these bottlenecks.
  1645. \item \emph{Cover traffic:} Currently we avoid cover traffic because
  1646. of its clear costs in performance and bandwidth, and because its
  1647. security benefits have not well understood. With more research
  1648. \cite{SS03,defensive-dropping}, the price/value ratio may change,
  1649. both for link-level cover traffic and also long-range cover traffic.
  1650. \item \emph{Better directory distribution:} Even with the threshold
  1651. directory agreement algorithm described in Section~\ref{subsec:dirservers},
  1652. the directory servers are still trust bottlenecks. We must find more
  1653. decentralized yet practical ways to distribute up-to-date snapshots of
  1654. network status without introducing new attacks. Also, directory
  1655. retrieval presents a scaling problem, since clients currently
  1656. download a description of the entire network state every 15
  1657. minutes. As the state grows larger and clients more numerous, we
  1658. may need to move to a solution in which clients only receive
  1659. incremental updates to directory state, or where directories are
  1660. cached at the ORs to avoid high loads on the directory servers.
  1661. \item \emph{Implementing location-hidden servers:} While
  1662. Section~\ref{sec:rendezvous} describes a design for rendezvous
  1663. points and location-hidden servers, these feature has not yet been
  1664. implemented. While doing so, will likely encounter additional
  1665. issues, both in terms of usability and anonymity, that must be
  1666. resolved.
  1667. \item \emph{Further specification review:} Although we have a public,
  1668. byte-level specification for the Tor protocols, this protocol has
  1669. not received extensive external review. We hope that as Tor
  1670. becomes more widely deployed, more people will become interested in
  1671. examining our specification.
  1672. \item \emph{Wider-scale deployment:} The original goal of Tor was to
  1673. gain experience in deploying an anonymizing overlay network, and
  1674. learn from having actual users. We are now at the point in design
  1675. and development where we can start deploying a wider network. Once
  1676. we have are ready for actual users, we will doubtlessly be better
  1677. able to evaluate some of our design decisions, including our
  1678. robustness/latency trade-offs, our performance trade-offs (including
  1679. cell size), our abuse-prevention mechanisms, and
  1680. our overall usability.
  1681. % XXX work with morphmix spec
  1682. \end{tightlist}
  1683. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1684. %% commented out for anonymous submission
  1685. %\Section{Acknowledgments}
  1686. % Peter Palfrader, Geoff Goodell, Adam Shostack, Joseph Sokol-Margolis
  1687. % for editing and comments
  1688. % Bram Cohen for congestion control discussions
  1689. % Adam Back for suggesting telescoping circuits
  1690. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1691. \bibliographystyle{latex8}
  1692. \bibliography{tor-design}
  1693. \end{document}
  1694. % Style guide:
  1695. % U.S. spelling
  1696. % avoid contractions (it's, can't, etc.)
  1697. % prefer ``for example'' or ``such as'' to e.g.
  1698. % prefer ``that is'' to i.e.
  1699. % 'mix', 'mixes' (as noun)
  1700. % 'mix-net'
  1701. % 'mix', 'mixing' (as verb)
  1702. % 'middleman' [Not with a hyphen; the hyphen has been optional
  1703. % since Middle English.]
  1704. % 'nymserver'
  1705. % 'Cypherpunk', 'Cypherpunks', 'Cypherpunk remailer'
  1706. % 'Onion Routing design', 'onion router' [note capitalization]
  1707. % 'SOCKS'
  1708. % Try not to use \cite as a noun.
  1709. % 'Authorizating' sounds great, but it isn't a word.
  1710. % 'First, second, third', not 'Firstly, secondly, thirdly'.
  1711. % 'circuit', not 'channel'
  1712. % Typography: no space on either side of an em dash---ever.
  1713. % Hyphens are for multi-part words; en dashs imply movement or
  1714. % opposition (The Alice--Bob connection); and em dashes are
  1715. % for punctuation---like that.
  1716. %
  1717. % 'Substitute ``Damn'' every time you're inclined to write ``very;'' your
  1718. % editor will delete it and the writing will be just as it should be.'
  1719. % -- Mark Twain