tor-design.tex 98 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898
  1. \documentclass[times,10pt,twocolumn]{article}
  2. \usepackage{latex8}
  3. \usepackage{times}
  4. \usepackage{url}
  5. \usepackage{graphics}
  6. \usepackage{amsmath}
  7. \pagestyle{empty}
  8. \renewcommand\url{\begingroup \def\UrlLeft{<}\def\UrlRight{>}\urlstyle{tt}\Url}
  9. \newcommand\emailaddr{\begingroup \def\UrlLeft{<}\def\UrlRight{>}\urlstyle{tt}\Url}
  10. % If an URL ends up with '%'s in it, that's because the line *in the .bib/.tex
  11. % file* is too long, so break it there (it doesn't matter if the next line is
  12. % indented with spaces). -DH
  13. %\newif\ifpdf
  14. %\ifx\pdfoutput\undefined
  15. % \pdffalse
  16. %\else
  17. % \pdfoutput=1
  18. % \pdftrue
  19. %\fi
  20. \newenvironment{tightlist}{\begin{list}{$\bullet$}{
  21. \setlength{\itemsep}{0mm}
  22. \setlength{\parsep}{0mm}
  23. % \setlength{\labelsep}{0mm}
  24. % \setlength{\labelwidth}{0mm}
  25. % \setlength{\topsep}{0mm}
  26. }}{\end{list}}
  27. \begin{document}
  28. %% Use dvipdfm instead. --DH
  29. %\ifpdf
  30. % \pdfcompresslevel=9
  31. % \pdfpagewidth=\the\paperwidth
  32. % \pdfpageheight=\the\paperheight
  33. %\fi
  34. \title{Tor: The Second-Generation Onion Router}
  35. % Putting the 'Private' back in 'Virtual Private Network'
  36. %\author{Roger Dingledine \\ The Free Haven Project \\ arma@freehaven.net \and
  37. %Nick Mathewson \\ The Free Haven Project \\ nickm@freehaven.net \and
  38. %Paul Syverson \\ Naval Research Lab \\ syverson@itd.nrl.navy.mil}
  39. \maketitle
  40. \thispagestyle{empty}
  41. \begin{abstract}
  42. We present Tor, a circuit-based low-latency anonymous communication
  43. system. This second-generation Onion Routing system addresses limitations
  44. in the original design. We add perfect forward secrecy, congestion
  45. control, directory servers, integrity checking, variable exit policies,
  46. and a practical design for rendezvous points. Tor works on the real-world
  47. Internet, requires no special privileges or kernel modifications, requires
  48. little synchronization or coordination between nodes, and provides a
  49. reasonable trade-off between anonymity, usability, and efficiency. We
  50. close with a list of open problems in anonymous communication systems.
  51. \end{abstract}
  52. %\begin{center}
  53. %\textbf{Keywords:} anonymity, peer-to-peer, remailer, nymserver, reply block
  54. %\end{center}
  55. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  56. \Section{Overview}
  57. \label{sec:intro}
  58. Onion Routing is a distributed overlay network designed to anonymize
  59. low-latency TCP-based applications such as web browsing, secure shell,
  60. and instant messaging. Clients choose a path through the network and
  61. build a \emph{virtual circuit}, in which each node (or ``onion router'')
  62. in the path knows its predecessor and successor, but no other nodes in
  63. the circuit. Traffic flowing down the circuit is sent in fixed-size
  64. \emph{cells}, which are unwrapped by a symmetric key at each node
  65. (like the layers of an onion) and relayed downstream. The original
  66. Onion Routing project published several design and analysis papers
  67. \cite{or-ih96,or-jsac98,or-discex00,or-pet00}. While a wide area Onion
  68. Routing network was deployed for some weeks, the only long-running and
  69. publicly accessible implementation of the original design was a fragile
  70. proof-of-concept that ran on a single machine. Even this simple deployment
  71. processed tens of thousands of connections daily from thousands of users
  72. worldwide. But many critical design and deployment issues were never
  73. resolved, and the design has not been updated in several years. Here
  74. we describe Tor, a protocol for asynchronous, loosely federated onion
  75. routers that provides the following improvements over the old Onion
  76. Routing design:
  77. \begin{tightlist}
  78. \item \textbf{Perfect forward secrecy:} The original Onion Routing
  79. design was vulnerable to a single hostile node recording traffic and
  80. later compromising successive nodes in the circuit and forcing them
  81. to decrypt it. Rather than using a single onion to lay each circuit,
  82. Tor now uses an incremental or \emph{telescoping} path-building design,
  83. where the initiator negotiates session keys with each successive hop in
  84. the circuit. Once these keys are deleted, subsequently compromised nodes
  85. cannot decrypt old traffic. As a side benefit, onion replay detection
  86. is no longer necessary, and the process of building circuits is more
  87. reliable, since the initiator knows when a hop fails and can then try
  88. extending to a new node.
  89. \item \textbf{Separation of protocol cleaning from anonymity:}
  90. The original Onion Routing design required a separate ``application
  91. proxy'' for each supported application protocol---most of which were
  92. never written, so many applications were never supported. Tor uses the
  93. standard and near-ubiquitous SOCKS \cite{socks4} proxy interface, allowing
  94. us to support most TCP-based programs without modification. This design
  95. change allows Tor to use the filtering features of privacy-enhancing
  96. application-level proxies such as Privoxy \cite{privoxy} without having
  97. to incorporate those features itself.
  98. \item \textbf{Many TCP streams can share one circuit:} The
  99. original Onion Routing design built a separate circuit for each
  100. application-level request. This hurt performance by requiring
  101. multiple public key operations for every request, and also presented
  102. a threat to anonymity from building so many different circuits; see
  103. Section~\ref{sec:maintaining-anonymity}. Tor multiplexes multiple TCP
  104. streams along each virtual circuit, to improve efficiency and anonymity.
  105. \item \textbf{Leaky-pipe circuit topology:} Through in-band signalling
  106. within the circuit, Tor initiators can direct traffic to nodes partway
  107. down the circuit. This novel approach allows both for long-range
  108. padding to frustrate traffic shape and volume attacks at the initiator
  109. \cite{defensive-dropping}, and, because circuits are used by more than one
  110. application, allows traffic to exit the circuit from the middle---thus
  111. frustrating traffic shape and volume attacks based on observing the end
  112. of the circuit.
  113. \item \textbf{No mixing, padding, or traffic shaping:} The original Onion
  114. Routing design called for batching and reordering the cells arriving from
  115. each circuit. It also included padding between onion routers and, in a
  116. later design, between onion proxies (that is, users) and onion routers
  117. \cite{or-ih96,or-jsac98}. The trade-off between padding protection
  118. and cost was discussed, but no general padding scheme was suggested. In
  119. \cite{or-pet00} it was theorized \emph{traffic shaping} would generally
  120. be used, but details were not provided. Recent research \cite{econymics}
  121. and deployment experience \cite{freedom21-security} suggest that this
  122. level of resource use is not practical or economical; and even full link
  123. padding is still vulnerable \cite{defensive-dropping}. Thus, until we
  124. have a proven and convenient design for traffic shaping or low-latency
  125. mixing that will improve anonymity against a realistic adversary, we
  126. leave these strategies out.
  127. \item \textbf{Congestion control:} Earlier anonymity designs do not
  128. address traffic bottlenecks. Unfortunately, typical approaches to
  129. load balancing and flow control in overlay networks involve inter-node
  130. control communication and global views of traffic. Tor's decentralized
  131. congestion control uses end-to-end acks to maintain reasonable anonymity
  132. while allowing nodes at the edges of the network to detect congestion
  133. or flooding attacks and send less data until the congestion subsides.
  134. \item \textbf{Directory servers:} The original Onion Routing design
  135. planned to flood link-state information through the network---an approach
  136. that can be unreliable and open to partitioning attacks or outright
  137. deception. Tor takes a simplified view toward distributing link-state
  138. information. Certain more trusted onion routers also act as directory
  139. servers: they provide signed \emph{directories} that describe known
  140. routers and their availability. Users periodically download these
  141. directories via HTTP.
  142. \item \textbf{Variable exit policies:} Tor provides a consistent mechanism
  143. for each node to specify and advertise a policy describing the hosts
  144. and ports to which it will connect. These exit policies are critical
  145. in a volunteer-based distributed infrastructure, because each operator
  146. is comfortable with allowing different types of traffic to exit the Tor
  147. network from his node.
  148. \item \textbf{End-to-end integrity checking:} The original Onion Routing
  149. design did no integrity checking on data. Any onion router on the
  150. circuit could change the contents of data cells as they passed by---for
  151. example, to alter a connection request on the fly so it would connect
  152. to a different webserver, or to `tag' encrypted traffic and look for
  153. corresponding corrupted traffic at the network edges \cite{minion-design}.
  154. Tor hampers these attacks by checking data integrity before it leaves
  155. the network.
  156. \item \textbf{Improved robustness to failed nodes:} A failed node
  157. in the old design meant that circuit-building failed, but thanks to
  158. Tor's step-by-step circuit building, users can notice failed nodes
  159. while building circuits and route around them. Additionally, liveness
  160. information from directories allows users to avoid unreliable nodes in
  161. the first place.
  162. \item \textbf{Rendezvous points and location-protected servers:}
  163. Tor provides an integrated mechanism for responder anonymity via
  164. location-protected servers. Previous Onion Routing designs included
  165. long-lived ``reply onions'' that could be used to build virtual circuits
  166. to a hidden server, but these reply onions did not provide forward
  167. security, and would become useless if any node in the path went down
  168. or rotated its keys. In Tor, clients negotiate {\it rendezvous points}
  169. to connect with hidden servers; reply onions are no longer required.
  170. \end{tightlist}
  171. Unlike anonymity systems like Freedom \cite{freedom2-arch}, Tor only
  172. attempts to anonymize TCP streams. Because it does not require patches
  173. (or built-in support) in an operating system's network stack, this
  174. approach has proven valuable to Tor's portability and deployability.
  175. We have implemented most of the above features. Our source code is
  176. available under a free license, and we believe it to be unencumbered by
  177. patents. We have recently begun deploying a widespread alpha network
  178. to test the design in practice, to get more experience with usability
  179. and users, and to provide a research platform for experimenting with
  180. new ideas.
  181. We review previous work in Section~\ref{sec:related-work}, describe
  182. our goals and assumptions in Section~\ref{sec:assumptions},
  183. and then address the above list of improvements in
  184. Sections~\ref{sec:design}-\ref{sec:rendezvous}. We summarize
  185. in Section~\ref{sec:attacks} how our design stands up to
  186. known attacks, and conclude with a list of open problems in
  187. Section~\ref{sec:maintaining-anonymity} and future work for the Onion
  188. Routing project in Section~\ref{sec:conclusion}.
  189. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  190. \Section{Related work}
  191. \label{sec:related-work}
  192. Modern anonymity systems date to Chaum's Mix-Net design
  193. \cite{chaum-mix}. Chaum
  194. proposed hiding the correspondence between sender and recipient by
  195. wrapping messages in layers of public key cryptography, and relaying them
  196. through a path composed of ``Mixes.'' These mixes in turn decrypt, delay,
  197. and re-order messages, before relaying them along the sender-selected
  198. path towards their destinations.
  199. Subsequent relay-based anonymity designs have diverged in two
  200. principal directions. Some have attempted to maximize anonymity at
  201. the cost of introducing comparatively large and variable latencies,
  202. including Babel \cite{babel}, Mixmaster \cite{mixmaster-spec}, and
  203. Mixminion \cite{minion-design}. Because of this
  204. decision, these \emph{high-latency} networks are well-suited for anonymous
  205. email, but introduce too much lag for interactive tasks such as web browsing,
  206. internet chat, or SSH connections.
  207. Tor belongs to the second category: \emph{low-latency} designs that
  208. attempt to anonymize interactive network traffic. These systems handle
  209. a variety of bidirectional protocols. They also provide more convenient
  210. mail delivery than the high-latency fire-and-forget anonymous email
  211. networks, because the remote mail server provides explicit delivery
  212. confirmation. But because these designs typically
  213. involve many packets that must be delivered quickly, it is
  214. difficult for them to prevent an attacker who can eavesdrop both ends of the
  215. communication from correlating the timing and volume
  216. of traffic entering the anonymity network with traffic leaving it. These
  217. protocols are also vulnerable against active attacks in which an
  218. adversary introduces timing patterns into traffic entering the network, and
  219. looks
  220. for correlated patterns among exiting traffic.
  221. Although some work has been done to frustrate
  222. these attacks,\footnote{
  223. The most common approach is to pad and limit communication to a constant
  224. rate, or to limit
  225. the variation in traffic shape. Doing so can have prohibitive bandwidth
  226. costs and/or performance limitations.
  227. } most designs protect primarily against traffic analysis rather than traffic
  228. confirmation \cite{or-jsac98}---that is, they assume that the attacker is
  229. attempting to learn who is talking to whom, not to confirm a prior suspicion
  230. about who is talking to whom.
  231. The simplest low-latency designs are single-hop proxies such as the
  232. Anonymizer \cite{anonymizer}, wherein a single trusted server strips the
  233. data's origin before relaying it. These designs are easy to
  234. analyze, but require end-users to trust the anonymizing proxy.
  235. Concentrating the traffic to a single point increases the anonymity set
  236. (the set of people a given user is hiding among), but it can make traffic
  237. analysis easier: an adversary need only eavesdrop on the proxy to observe
  238. the entire system.
  239. More complex are distributed-trust, circuit-based anonymizing systems.
  240. In these designs, a user establishes one or more medium-term bidirectional
  241. end-to-end circuits, and tunnels TCP streams in fixed-size cells.
  242. Establishing circuits is computationally expensive and typically
  243. requires public-key
  244. cryptography, whereas relaying cells is comparatively inexpensive and
  245. typically requires only symmetric encryption.
  246. Because a circuit crosses several servers, and each server only knows
  247. the adjacent servers in the circuit, no single server can link a
  248. user to her communication partners.
  249. The Java Anon Proxy (also known as JAP or Web MIXes) uses fixed shared
  250. routes known as \emph{cascades}. As with a single-hop proxy, this
  251. approach aggregates users into larger anonymity sets, but again an
  252. attacker only needs to observe both ends of the cascade to bridge all
  253. the system's traffic. The Java Anon Proxy's design provides
  254. protection by padding between end users and the head of the cascade
  255. \cite{web-mix}. However, it is not demonstrated whether the current
  256. implementation's padding policy improves anonymity.
  257. PipeNet \cite{back01, pipenet}, another low-latency design proposed at
  258. about the same time as the original Onion Routing design, provided
  259. stronger anonymity at the cost of allowing a single user to shut
  260. down the network simply by not sending. Low-latency anonymous
  261. communication has also been designed for other environments such as
  262. ISDN \cite{isdn-mixes}.
  263. In P2P designs like Tarzan \cite{tarzan:ccs02} and MorphMix
  264. \cite{morphmix:fc04}, all participants both generate traffic and relay
  265. traffic for others. These systems aim to prevent a peer
  266. or observer from knowing whether a given peer originated a request
  267. or just relayed it from another peer. While Tarzan and MorphMix use
  268. layered encryption as above, Crowds \cite{crowds-tissec} simply assumes
  269. an adversary who cannot observe the initiator: it uses no public-key
  270. encryption, so nodes on a circuit can read that circuit's traffic.
  271. Hordes \cite{hordes-jcs} is based on Crowds but also uses multicast
  272. responses to hide the initiator. Herbivore \cite{herbivore} and P5
  273. \cite{p5} go even further, requiring broadcast. They make anonymity
  274. and efficiency trade-offs to make broadcast more practical.
  275. These systems are designed primarily for communication between peers,
  276. although Herbivore users can make external connections by
  277. requesting a peer to serve as a proxy.
  278. Systems like Freedom and the original Onion Routing build the circuit
  279. all at once, using a layered ``onion'' of public-key encrypted messages,
  280. each layer of which provides a set of session keys and the address of the
  281. next server in the circuit. Tor as described herein, Tarzan, MorphMix,
  282. Cebolla \cite{cebolla}, and Rennhard's Anonymity Network \cite{anonnet}
  283. build the circuit
  284. in stages, extending it one hop at a time.
  285. Section~\ref{subsubsec:constructing-a-circuit} describes how this
  286. approach makes perfect forward secrecy feasible.
  287. Circuit-based anonymity designs must choose which protocol layer
  288. to anonymize. They may choose to intercept IP packets directly, and
  289. relay them whole (stripping the source address) along the circuit
  290. \cite{freedom2-arch,tarzan:ccs02}. Alternatively, like
  291. Tor, they may accept TCP streams and relay the data in those streams
  292. along the circuit, ignoring the breakdown of that data into TCP frames
  293. \cite{morphmix:fc04,anonnet}. Finally, they may accept application-level
  294. protocols (such as HTTP) and relay the application requests themselves
  295. along the circuit.
  296. Making this protocol-layer decision requires a compromise between flexibility
  297. and anonymity. For example, a system that understands HTTP can strip
  298. identifying information from those requests, can take advantage of caching
  299. to limit the number of requests that leave the network, and can batch
  300. or encode those requests in order to minimize the number of connections.
  301. On the other hand, an IP-level anonymizer can handle nearly any protocol,
  302. even ones unforeseen by their designers (though these systems require
  303. kernel-level modifications to some operating systems, and so are more
  304. complex and less portable). TCP-level anonymity networks like Tor present
  305. a middle approach: they are fairly application neutral (so long as the
  306. application supports, or can be tunneled across, TCP), but by treating
  307. application connections as data streams rather than raw TCP packets,
  308. they avoid the well-known inefficiencies of tunneling TCP over TCP
  309. \cite{tcp-over-tcp-is-bad}.
  310. Distributed-trust anonymizing systems need to prevent attackers from
  311. adding too many servers and thus compromising too many user paths.
  312. Tor relies on a small set of well-known directory servers, run by
  313. independent parties, to make decisions about which nodes can
  314. join. Tarzan and MorphMix allow unknown users to run servers, and use
  315. a limited resource (like IP addresses) to prevent an attacker from
  316. controlling too much of the network. Crowds suggests requiring
  317. written, notarized requests from potential crowd members.
  318. Anonymous communication is essential for censorship-resistant
  319. systems like Eternity \cite{eternity}, Free~Haven \cite{freehaven-berk},
  320. Publius \cite{publius}, and Tangler \cite{tangler}. Tor's rendezvous
  321. points enable connections between mutually anonymous entities; they
  322. are a building block for location-hidden servers, which are needed by
  323. Eternity and Free~Haven.
  324. % didn't include rewebbers. No clear place to put them, so I'll leave
  325. % them out for now. -RD
  326. \Section{Design goals and assumptions}
  327. \label{sec:assumptions}
  328. \SubSection{Goals}
  329. Like other low-latency anonymity designs, Tor seeks to frustrate
  330. attackers from linking communication partners, or from linking
  331. multiple communications to or from a single user. Within this
  332. main goal, however, several design considerations have directed
  333. Tor's evolution.
  334. \textbf{Deployability:} The design must be one that can be implemented,
  335. deployed, and used in the real world. This requirement precludes designs
  336. that are expensive to run (for example, by requiring more bandwidth
  337. than volunteers are willing to provide); designs that place a heavy
  338. liability burden on operators (for example, by allowing attackers to
  339. implicate onion routers in illegal activities); and designs that are
  340. difficult or expensive to implement (for example, by requiring kernel
  341. patches, or separate proxies for every protocol). This requirement also
  342. precludes systems in which non-anonymous parties (such as websites)
  343. must run our software. (We do not meet this goal for the current
  344. rendezvous design,
  345. however; see Section~\ref{sec:rendezvous}.)
  346. \textbf{Usability:} A hard-to-use system has fewer users---and because
  347. anonymity systems hide users among users, a system with fewer users
  348. provides less anonymity. Usability is thus not only a convenience for Tor:
  349. it is a security requirement \cite{econymics,back01}. Tor should
  350. therefore not
  351. require modifying applications; should not introduce prohibitive delays;
  352. and should require the user to make as few configuration decisions
  353. as possible.
  354. \textbf{Flexibility:} The protocol must be flexible and well-specified,
  355. so that it can serve as a test-bed for future research in low-latency
  356. anonymity systems. Many of the open problems in low-latency anonymity
  357. networks, such as generating dummy traffic or preventing Sybil attacks
  358. \cite{sybil}, may be solvable independently from the issues solved by
  359. Tor. Hopefully future systems will not need to reinvent Tor's design.
  360. (But note that while a flexible design benefits researchers,
  361. there is a danger that differing choices of extensions will make users
  362. distinguishable. Experiments should be run on a separate network.)
  363. \textbf{Simple design:} The protocol's design and security
  364. parameters must be well-understood. Additional features impose implementation
  365. and complexity costs; adding unproven techniques to the design threatens
  366. deployability, readability, and ease of security analysis. Tor aims to
  367. deploy a simple and stable system that integrates the best well-understood
  368. approaches to protecting anonymity.
  369. \SubSection{Non-goals}
  370. \label{subsec:non-goals}
  371. In favoring simple, deployable designs, we have explicitly deferred
  372. several possible goals, either because they are solved elsewhere, or because
  373. their solution is an open research problem.
  374. \textbf{Not Peer-to-peer:} Tarzan and MorphMix aim to scale to completely
  375. decentralized peer-to-peer environments with thousands of short-lived
  376. servers, many of which may be controlled by an adversary. This approach
  377. is appealing, but still has many open problems
  378. \cite{tarzan:ccs02,morphmix:fc04}.
  379. \textbf{Not secure against end-to-end attacks:} Tor does not claim
  380. to provide a definitive solution to end-to-end timing or intersection
  381. attacks. Some approaches, such as running an onion router, may help;
  382. see Section~\ref{sec:attacks} for more discussion.
  383. \textbf{No protocol normalization:} Tor does not provide \emph{protocol
  384. normalization} like Privoxy or the Anonymizer. For complex and variable
  385. protocols such as HTTP, Tor must be layered with a filtering proxy such
  386. as Privoxy to hide differences between clients, and expunge protocol
  387. features that leak identity. Similarly, Tor does not currently integrate
  388. tunneling for non-stream-based protocols like UDP; this too must be
  389. provided by an external service.
  390. % Actually, tunneling udp over tcp is probably horrible for some apps.
  391. % Should this get its own non-goal bulletpoint? The motivation for
  392. % non-goal-ness would be burden on clients / portability. -RD
  393. % No, leave it as is. -RD
  394. \textbf{Not steganographic:} Tor does not try to conceal which users are
  395. sending or receiving communications; it only tries to conceal with whom
  396. they communicate.
  397. \SubSection{Threat Model}
  398. \label{subsec:threat-model}
  399. A global passive adversary is the most commonly assumed threat when
  400. analyzing theoretical anonymity designs. But like all practical
  401. low-latency systems, Tor does not protect against such a strong
  402. adversary. Instead, we assume an adversary who can observe some fraction
  403. of network traffic; who can generate, modify, delete, or delay traffic
  404. on the network; who can operate onion routers of its own; and who can
  405. compromise some fraction of the onion routers on the network.
  406. In low-latency anonymity systems that use layered encryption, the
  407. adversary's typical goal is to observe both the initiator and the
  408. receiver. Passive attackers can confirm a suspicion that Alice is
  409. talking to Bob if the timing and volume patterns of the traffic on the
  410. connection are distinct enough; active attackers can induce timing
  411. signatures on the traffic to \emph{force} distinct patterns. Tor provides
  412. some defenses against these \emph{traffic confirmation} attacks, for
  413. example by encouraging users to run their own onion routers, but it does
  414. not provide complete protection. Rather, we aim to prevent \emph{traffic
  415. analysis} attacks, where the adversary uses traffic patterns to learn
  416. which points in the network he should attack.
  417. Our adversary might try to link an initiator Alice with any of her
  418. communication partners, or he might try to build a profile of Alice's
  419. behavior. He might mount passive attacks by observing the edges of the
  420. network and correlating traffic entering and leaving the network---either
  421. by relationships in packet timing; relationships in the volume
  422. of data sent; or relationships in any externally visible user-selected
  423. options. The adversary can also mount active attacks by compromising
  424. routers or keys; by replaying traffic; by selectively denying service
  425. to trustworthy routers to encourage users to send their traffic through
  426. compromised routers, or denying service to users to see if the traffic
  427. elsewhere in the
  428. network stops; or by introducing patterns into traffic that can later be
  429. detected. The adversary might attack the directory servers to give users
  430. differing views of network state. Additionally, he can try to decrease
  431. the network's reliability by attacking nodes or by performing antisocial
  432. activities from reliable servers and trying to get them taken down;
  433. making the network unreliable flushes users to other less anonymous
  434. systems, where they may be easier to attack.
  435. We consider each of these attacks in more detail below, and summarize
  436. in Section~\ref{sec:attacks} how well the Tor design defends against
  437. each of them.
  438. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  439. \Section{The Tor Design}
  440. \label{sec:design}
  441. The Tor network is an overlay network; each node is called an onion router
  442. (OR). Onion routers run as normal user-level processes without needing
  443. any special
  444. privileges. Currently, each OR maintains a long-term TLS \cite{TLS}
  445. connection to every other
  446. OR. (We examine some ways to relax this clique-topology assumption in
  447. Section~\ref{subsec:restricted-routes}.) A subset of the ORs also act as
  448. directory servers, tracking which routers are in the network;
  449. see Section~\ref{subsec:dirservers} for directory server details.
  450. Each user
  451. runs local software called an onion proxy (OP) to fetch directories,
  452. establish paths (called \emph{virtual circuits}) across the network,
  453. and handle connections from user applications. These onion proxies accept
  454. TCP streams and multiplex them across the virtual circuit. The onion
  455. router on the other side
  456. % I don't mean other side, I mean wherever it is on the circuit. But
  457. % don't want to introduce complexity this early? Hm. -RD
  458. of the circuit connects to the destinations of
  459. the TCP streams and relays data.
  460. Each onion router uses three public keys: a long-term identity key, a
  461. short-term onion key, and a short-term link key. The identity
  462. (signing) key is used to sign TLS certificates, to sign its router
  463. descriptor (a summary of its keys, address, bandwidth, exit policy,
  464. etc), and to sign directories if it is a directory server. Changing
  465. the identity key of a router is considered equivalent to creating a
  466. new router. The onion (decryption) key is used for decrypting requests
  467. from users to set up a circuit and negotiate ephemeral keys. Finally,
  468. link keys are used by the TLS protocol when communicating between
  469. onion routers. Each short-term key is rotated periodically and
  470. independently, to limit the impact of key compromise.
  471. Section~\ref{subsec:cells} discusses the structure of the fixed-size
  472. \emph{cells} that are the unit of communication in Tor. We describe
  473. in Section~\ref{subsec:circuits} how virtual circuits are
  474. built, extended, truncated, and destroyed. Section~\ref{subsec:tcp}
  475. describes how TCP streams are routed through the network, and finally
  476. Section~\ref{subsec:congestion} talks about congestion control and
  477. fairness issues.
  478. \SubSection{Cells}
  479. \label{subsec:cells}
  480. Onion routers communicate with one another, and with users' OPs, via TLS
  481. connections with ephemeral keys. This prevents an attacker from
  482. impersonating an OR, conceals the contents of the connection with
  483. perfect forward secrecy, and prevents an attacker from modifying data
  484. on the wire.
  485. Traffic passes along these connections in fixed-size cells. Each cell
  486. is 256 bytes (but see Section~\ref{sec:conclusion} for a discussion of
  487. allowing large cells and small cells on the same network), and
  488. consists of a header and a payload. The header includes a circuit
  489. identifier (circID) that specifies which circuit the cell refers to
  490. (many circuits can be multiplexed over the single TLS connection), and
  491. a command to describe what to do with the cell's payload. (Circuit
  492. identifiers are connection-specific: each single circuit has one circID
  493. for the forward connection and another for the backward connection.)
  494. % XXX Say that each OR can have many circuits with same circID, so
  495. % XXX long as they're on different connections, and that ORs know
  496. % XXX which circIDs/connection pairs are linked by a circuit.
  497. Based on their command, cells are either \emph{control} cells, which are
  498. always interpreted by the node that receives them, or \emph{relay} cells,
  499. which carry end-to-end stream data. The control cell commands are:
  500. \emph{padding} (currently used for keepalive, but also usable for link
  501. padding); \emph{create} or \emph{created} (used to set up a new circuit);
  502. and \emph{destroy} (to tear down a circuit).
  503. Relay cells have an additional header (the relay header) after the
  504. cell header, containing the stream identifier (many streams can
  505. be multiplexed over a circuit); an end-to-end checksum for integrity
  506. checking; the length of the relay payload; and a relay command.
  507. % XXX Mention _here_ that relay headers are {en|de}crypted as they
  508. % XXX progress along the circuit.
  509. The
  510. relay commands are: \emph{relay
  511. data} (for data flowing down the stream), \emph{relay begin} (to open a
  512. stream), \emph{relay end} (to close a stream cleanly), \emph{relay
  513. teardown} (to close a broken stream), \emph{relay connected}
  514. (to notify the OP that a relay begin has succeeded), \emph{relay
  515. extend} and \emph{relay extended} (to extend the circuit by a hop,
  516. and to acknowledge), \emph{relay truncate} and \emph{relay truncated}
  517. (to tear down only part of the circuit, and to acknowledge), \emph{relay
  518. sendme} (used for congestion control), and \emph{relay drop} (used to
  519. implement long-range dummies).
  520. We describe each of these cell types and commands in more detail below.
  521. \SubSection{Circuits and streams}
  522. \label{subsec:circuits}
  523. % I think when we say ``the user,'' maybe we should say ``the user's OP.''
  524. The original Onion Routing design built one circuit for each
  525. TCP stream. Because building a circuit can take several tenths of a
  526. second (due to public-key cryptography delays and network latency),
  527. this design imposed high costs on applications like web browsing that
  528. open many TCP streams.
  529. In Tor, each circuit can be shared by many TCP streams. To avoid
  530. delays, users construct circuits preemptively. To limit linkability
  531. among their streams, users' OPs build a new circuit
  532. periodically if the previous one has been used,
  533. and expire old used circuits that no longer have any open streams.
  534. OPs consider making a new circuit once a minute: thus
  535. even heavy users spend a negligible amount of time and CPU in
  536. building circuits, but only a limited number of requests can be linked
  537. to each other through a given exit node. Also, because circuits are built
  538. in the background, OPs can recover from failed circuit creation
  539. without delaying streams and thereby harming user experience.
  540. \subsubsection{Constructing a circuit}
  541. \label{subsubsec:constructing-a-circuit}
  542. %XXXX Discuss what happens with circIDs here.
  543. Users construct a circuit incrementally, negotiating a symmetric key with
  544. each OR on the circuit, one hop at a time. To begin creating a new
  545. circuit, the user
  546. (call her Alice) sends a \emph{create} cell to the first node in her
  547. chosen path. This cell's payload contains the first half of the
  548. Diffie-Hellman handshake ($g^x$), encrypted to the onion key of the OR (call
  549. him Bob). Bob responds with a \emph{created} cell containing the second
  550. half of the DH handshake, along with a hash of the negotiated key
  551. $K=g^{xy}$.
  552. Once the circuit has been established, Alice and Bob can send one
  553. another relay cells encrypted with the negotiated
  554. key.\footnote{Actually, the negotiated key is used to derive two
  555. symmetric keys: one for each direction.} More detail is given in
  556. the next section.
  557. To extend the circuit further, Alice sends a \emph{relay extend} cell
  558. to Bob, specifying the address of the next OR (call her Carol), and
  559. an encrypted $g^{x_2}$ for her. Bob copies the half-handshake into a
  560. \emph{create} cell, and passes it to Carol to extend the circuit.
  561. When Carol responds with a \emph{created} cell, Bob wraps the payload
  562. into a \emph{relay extended} cell and passes it back to Alice. Now
  563. the circuit is extended to Carol, and Alice and Carol share a common key
  564. $K_2 = g^{x_2 y_2}$.
  565. To extend the circuit to a third node or beyond, Alice
  566. proceeds as above, always telling the last node in the circuit to
  567. extend one hop further.
  568. % XXX Briefly mention path selection and path length.
  569. This circuit-level handshake protocol achieves unilateral entity
  570. authentication (Alice knows she's handshaking with the OR, but
  571. the OR doesn't care who is opening the circuit---Alice has no key
  572. and is trying to remain anonymous) and unilateral key authentication
  573. (Alice and the OR agree on a key, and Alice knows the OR is the
  574. only other entity who should know it). It also achieves forward
  575. secrecy and key freshness. More formally, the protocol is as follows
  576. (where $E_{PK_{Bob}}(\cdot)$ is encryption with Bob's public key,
  577. $H$ is a secure hash function, and $|$ is concatenation):
  578. \begin{equation}
  579. \begin{aligned}
  580. \mathrm{Alice} \rightarrow \mathrm{Bob}&: E_{PK_{Bob}}(g^x) \\
  581. \mathrm{Bob} \rightarrow \mathrm{Alice}&: g^y, H(K | \mathrm{``handshake"}) \\
  582. \end{aligned}
  583. \end{equation}
  584. In the second step, Bob proves that it was he who who received $g^x$,
  585. and who came up with $y$. We use PK encryption in the first step
  586. (rather than, say, using the first two steps of STS, which has a
  587. signature in the second step) because a single cell is too small to
  588. hold both a public key and a signature. Preliminary analysis with the
  589. NRL protocol analyzer \cite{meadows96} shows the above protocol to be
  590. secure (including providing PFS) under the traditional Dolev-Yao
  591. model.
  592. \subsubsection{Relay cells}
  593. Once Alice has established the circuit (so she shares keys with each
  594. OR on the circuit), she can send relay cells.
  595. % XXX Describe _here_ what happens with relay cells that are not
  596. % XXX targeted at a given node; how they're decrypted; how they're
  597. % XXX encrypted. The easiest expository order should probably be: What ORs
  598. % XXX Do With Unrecognized Streams; What Alice Does To Build Relay
  599. % XXX Cells; What ORs Do With Streams They Recognize.
  600. Recall that every relay cell has a stream ID in the relay header
  601. that indicates to
  602. which stream the cell belongs.
  603. This stream ID allows a relay cell to be addressed to any of the ORs
  604. on the circuit. To
  605. construct a relay cell addressed to a given OR, Alice iteratively
  606. encrypts the cell payload (that is, the relay header and payload)
  607. with the symmetric key of each hop up to that OR. Then, at each hop
  608. down the circuit, the OR decrypts the cell payload and checks whether
  609. it recognizes the stream ID. A stream ID is recognized either if it
  610. is an already open stream at that OR, or if it is equal to zero. The
  611. zero stream ID is treated specially, and is used for control messages,
  612. e.g. starting a new stream. If the stream ID is unrecognized, the OR
  613. passes the relay cell downstream. This \emph{leaky pipe} circuit topology
  614. allows Alice's streams to exit at different ORs on a single circuit.
  615. Alice may choose different exit points because of their exit policies,
  616. or to keep the ORs from knowing that two streams
  617. originate at the same person.
  618. To tear down a whole circuit, Alice sends a \emph{destroy} control
  619. cell. Each OR
  620. in the circuit receives the \emph{destroy} cell, closes all open streams on
  621. that circuit, and passes a new \emph{destroy} cell forward. But since circuits
  622. can be built incrementally, they can also be torn down incrementally:
  623. Alice can instead send a relay truncate cell to a node along the circuit. That
  624. node will send a \emph{destroy} cell forward, and reply with an acknowledgment
  625. (a \emph{relay truncated} cell). Alice might truncate her circuit so
  626. she can extend it
  627. to different nodes without signalling to the first few nodes (or somebody
  628. observing them) that she is changing her circuit. That is, nodes in the
  629. middle of a circuit are not even aware that the circuit has been
  630. truncated, because they see only the encrypted relay cells.
  631. Similarly, if a node on the circuit goes down,
  632. the adjacent node can send a \emph{relay truncated} cell back to
  633. Alice. Thus the
  634. ``break a node and see which circuits go down'' attack is weakened.
  635. \SubSection{Opening and closing streams}
  636. \label{subsec:tcp}
  637. When Alice's application wants to open a TCP connection to a given
  638. address and port, it asks the OP (via SOCKS) to make the connection. The
  639. OP chooses the newest open circuit (or creates one if none is available),
  640. chooses a suitable OR on that circuit to be the exit node (usually the
  641. last node, but maybe others due to exit policy conflicts; see
  642. Section~\ref{sec:exit-policies}), chooses a new random stream ID for
  643. this stream,
  644. and delivers a relay begin cell to that exit node. It uses a stream ID
  645. of zero for the begin cell (so the OR will recognize it), and the relay
  646. payload lists the new stream ID and the destination address and port.
  647. Once the exit node completes the connection to the remote host, it
  648. responds with a relay connected cell through the circuit. Upon receipt,
  649. the OP notifies the application that it can begin talking.
  650. There's a catch to using SOCKS, though -- some applications hand the
  651. alphanumeric address to the proxy, while others resolve it into an IP
  652. address first and then hand the IP to the proxy. When the application
  653. does the DNS resolution first, Alice broadcasts her destination. Common
  654. applications like Mozilla and ssh have this flaw.
  655. In the case of Mozilla, we're fine: the filtering web proxy called Privoxy
  656. does the SOCKS call safely, and Mozilla talks to Privoxy safely. But a
  657. portable general solution, such as for ssh, is an open problem. We can
  658. modify the local nameserver, but this approach is invasive, brittle, and
  659. not portable. We can encourage the resolver library to do resolution
  660. via TCP rather than UDP, but this approach is hard to do right, and also
  661. has portability problems. We can provide a tool similar to \emph{dig} that
  662. can do a private lookup through the Tor network. Our current answer is to
  663. encourage the use of privacy-aware proxies like Privoxy wherever possible,
  664. Ending a Tor stream is analogous to ending a TCP stream: it uses a
  665. two-step handshake for normal operation, or a one-step handshake for
  666. errors. If one side of the stream closes abnormally, that node simply
  667. sends a relay teardown cell, and tears down the stream. If one side
  668. of the stream closes the connection normally, that node sends a relay
  669. end cell down the circuit. When the other side has sent back its own
  670. relay end, the stream can be torn down. This two-step handshake allows
  671. for TCP-based applications that, for example, close a socket for writing
  672. but are still willing to read. Remember that all relay cells use layered
  673. encryption, so only the destination OR knows what type of relay cell
  674. it is.
  675. \SubSection{Integrity checking on streams}
  676. Because the old Onion Routing design used a stream cipher, traffic was
  677. vulnerable to a malleability attack: even though the attacker could not
  678. decrypt cells, he could make changes to an encrypted
  679. cell to create corresponding changes to the data leaving the network.
  680. (Even an external adversary could do this, despite link encryption!)
  681. This weakness allowed an adversary to change a padding cell to a destroy
  682. cell; change the destination address in a relay begin cell to the
  683. adversary's webserver; or change a user on an ftp connection from
  684. typing ``dir'' to typing ``delete~*''. Any node or external adversary
  685. along the circuit could introduce such corruption in a stream.
  686. Tor prevents external adversaries from mounting this attack simply by
  687. using TLS. Addressing the insider malleability attack, however, is
  688. more complex.
  689. We could do integrity checking of the relay cells at each hop, either
  690. by including hashes or by using a cipher mode like EAX \cite{eax},
  691. but we don't want the added message-expansion overhead at each hop, and
  692. we don't want to leak the path length or pad to some max path length.
  693. Because we've already accepted that our design is vulnerable to end-to-end
  694. timing attacks, we can perform integrity checking only at the edges of
  695. the circuit without introducing any new anonymity attacks. When Alice
  696. negotiates a key
  697. with each hop, they both start a SHA-1 with some derivative of that key,
  698. % Not just the exit hop, but each hop: any hop can be an exit node. -RD
  699. thus starting out with randomness that only the two of them know. From
  700. then on they each incrementally add to the SHA-1 all the data bytes
  701. entering or exiting from the circuit, and each such relay cell includes
  702. the first 4 bytes of the current value of the hash.
  703. The attacker must be able to guess all previous bytes between Alice
  704. and Bob on that circuit (including the pseudorandomness from the key
  705. negotiation), plus the bytes in the current cell, to remove or modify the
  706. cell. Attacks on SHA-1 where the adversary can incrementally add to a
  707. hash to produce a new valid hash don't work,
  708. because all hashes are end-to-end encrypted across the circuit.
  709. The computational overhead isn't so bad, compared to doing an AES
  710. crypt at each hop in the circuit. We use only four bytes per cell to
  711. minimize overhead; the chance that an adversary will correctly guess a
  712. valid hash, plus the payload the current cell, is acceptly low, given
  713. that Alice or Bob tear down the circuit if they receive a bad hash.
  714. \SubSection{Rate limiting and fairness}
  715. Volunteers are generally more willing to run services that can limit
  716. their bandwidth usage. To accomodate them, Tor servers use a token
  717. bucket approach to limit the number of bytes they
  718. % XXX cite token bucket?
  719. receive. Tokens are added to the bucket each second (when the bucket is
  720. full, new tokens are discarded.) Each token represents permission to
  721. receive one byte from the network---to receive a byte, the connection
  722. must remove a token from the bucket. Thus if the bucket is empty, that
  723. connection must wait until more tokens arrive. The number of tokens we
  724. add enforces a long-term average rate of incoming bytes, while still
  725. permitting short-term bursts above the allowed bandwidth. Current bucket
  726. sizes are set to ten seconds worth of traffic.
  727. Further, we want to avoid starving any Tor streams. Entire circuits
  728. could starve if we read greedily from connections and one connection
  729. uses all the remaining bandwidth. We solve this by dividing the number
  730. of tokens in the bucket by the number of connections that want to read,
  731. and reading at most that number of bytes from each connection. We iterate
  732. this procedure until the number of tokens in the bucket is under some
  733. threshold (eg 10KB), at which point we greedily read from connections.
  734. Because the Tor protocol generates roughly the same number of outgoing
  735. bytes as incoming bytes, it is sufficient in practice to rate-limit
  736. incoming bytes.
  737. % Is it? Fun attack: I send you lots of 1-byte-at-a-time TCP frames.
  738. % In response, you send lots of 256 byte cells. Can I use this to
  739. % make you exceed your outgoing bandwidth limit by a factor of 256? -NM
  740. % Can we resolve this by, when reading from edge connections, rounding up
  741. % the bytes read (wrt buckets) to the nearest multiple of 256? -RD
  742. Further, inspired by Rennhard et al's design in \cite{anonnet}, a
  743. circuit's edges heuristically distinguish interactive streams from bulk
  744. streams by comparing the frequency with which they supply cells. We can
  745. provide good latency for interactive streams by giving them preferential
  746. service, while still getting good overall throughput to the bulk
  747. streams. Such preferential treatment presents a possible end-to-end
  748. attack, but an adversary who can observe both
  749. ends of the stream can already learn this information through timing
  750. attacks.
  751. \SubSection{Congestion control}
  752. \label{subsec:congestion}
  753. Even with bandwidth rate limiting, we still need to worry about
  754. congestion, either accidental or intentional. If enough users choose the
  755. same OR-to-OR connection for their circuits, that connection can become
  756. saturated. For example, an adversary could make a large HTTP PUT request
  757. through the onion routing network to a webserver he runs, and then
  758. refuse to read any of the bytes at the webserver end of the
  759. circuit. Without some congestion control mechanism, these bottlenecks
  760. can propagate back through the entire network. We describe our
  761. responses below.
  762. \subsubsection{Circuit-level}
  763. To control a circuit's bandwidth usage, each OR keeps track of two
  764. windows. The \emph{package window} tracks how many relay data cells the OR is
  765. allowed to package (from outside streams) for transmission back to the OP,
  766. and the \emph{deliver window} tracks how many relay data cells it is willing
  767. to deliver to streams outside the network. Each window is initialized
  768. (say, to 1000 data cells). When a data cell is packaged or delivered,
  769. the appropriate window is decremented. When an OR has received enough
  770. data cells (currently 100), it sends a relay sendme cell towards the OP,
  771. with stream ID zero. When an OR receives a relay sendme cell with stream
  772. ID zero, it increments its packaging window. Either of these cells
  773. increments the corresponding window by 100. If the packaging window
  774. reaches 0, the OR stops reading from TCP connections for all streams
  775. on the corresponding circuit, and sends no more relay data cells until
  776. receiving a relay sendme cell.
  777. The OP behaves identically, except that it must track a packaging window
  778. and a delivery window for every OR in the circuit. If a packaging window
  779. reaches 0, it stops reading from streams destined for that OR.
  780. \subsubsection{Stream-level}
  781. The stream-level congestion control mechanism is similar to the
  782. circuit-level mechanism above. ORs and OPs use relay sendme cells
  783. to implement end-to-end flow control for individual streams across
  784. circuits. Each stream begins with a package window (e.g. 500 cells),
  785. and increments the window by a fixed value (50) upon receiving a relay
  786. sendme cell. Rather than always returning a relay sendme cell as soon
  787. as enough cells have arrived, the stream-level congestion control also
  788. has to check whether data has been successfully flushed onto the TCP
  789. stream; it sends a relay sendme only when the number of bytes pending
  790. to be flushed is under some threshold (currently 10 cells worth).
  791. Currently, non-data relay cells do not affect the windows. Thus we
  792. avoid potential deadlock issues, e.g. because a stream can't send a
  793. relay sendme cell because its packaging window is empty.
  794. % XXX Bad heading
  795. \subsubsection{Needs more research}
  796. We don't need to reimplement full TCP windows (with sequence numbers,
  797. the ability to drop cells when we're full and retransmit later, etc),
  798. because the TCP streams already guarantee in-order delivery of each
  799. cell. But we need to investigate further the effects of the current
  800. parameters on throughput and latency, while also keeping privacy in mind;
  801. see Section~\ref{sec:maintaining-anonymity} for more discussion.
  802. \Section{Other design decisions}
  803. \SubSection{Resource management and denial-of-service}
  804. \label{subsec:dos}
  805. Providing Tor as a public service provides many opportunities for an
  806. attacker to mount denial-of-service attacks against the network. While
  807. flow control and rate limiting (discussed in
  808. Section~\ref{subsec:congestion}) prevent users from consuming more
  809. bandwidth than routers are willing to provide, opportunities remain for
  810. users to
  811. consume more network resources than their fair share, or to render the
  812. network unusable for other users.
  813. First of all, there are several CPU-consuming denial-of-service
  814. attacks wherein an attacker can force an OR to perform expensive
  815. cryptographic operations. For example, an attacker who sends a
  816. \emph{create} cell full of junk bytes can force an OR to perform an RSA
  817. decrypt. Similarly, an attacker can
  818. fake the start of a TLS handshake, forcing the OR to carry out its
  819. (comparatively expensive) half of the handshake at no real computational
  820. cost to the attacker.
  821. Several approaches exist to address these attacks. First, ORs may
  822. require clients to solve a puzzle \cite{puzzles-tls} while beginning new
  823. TLS handshakes or accepting \emph{create} cells. So long as these
  824. tokens are easy to verify and computationally expensive to produce, this
  825. approach limits the attack multiplier. Additionally, ORs may limit
  826. the rate at which they accept create cells and TLS connections, so that
  827. the computational work of processing them does not drown out the (comparatively
  828. inexpensive) work of symmetric cryptography needed to keep cells
  829. flowing. This rate limiting could, however, allow an attacker
  830. to slow down other users when they build new circuits.
  831. % What about link-to-link rate limiting?
  832. Attackers also have an opportunity to attack the Tor network by mounting
  833. attacks on its hosts and network links. Disrupting a single circuit or
  834. link breaks all currently open streams passing along that part of the
  835. circuit. Indeed, this same loss of service occurs when a router crashes
  836. or its operator restarts it. The current Tor design treats such attacks
  837. as intermittent network failures, and depends on users and applications
  838. to respond or recover as appropriate. A future design could use an
  839. end-to-end TCP-like acknowledgment protocol, so that no streams are
  840. lost unless the entry or exit point itself is disrupted. This solution
  841. would require more buffering at the network edges, however, and the
  842. performance and anonymity implications from this extra complexity still
  843. require investigation.
  844. \SubSection{Exit policies and abuse}
  845. \label{subsec:exitpolicies}
  846. %XXX originally, we planned to put the "users only know the hostname,
  847. % not the IP, but exit policies are by IP" problem here too. Worth
  848. % while still? -RD
  849. Exit abuse is a serious barrier to wide-scale Tor deployment. Anonymity
  850. presents would-be vandals and abusers with an opportunity to hide
  851. the origins of their activities. Attackers can harm the Tor network by
  852. implicating exit servers for their abuse. Also, applications that commonly
  853. use IP-based authentication (such as institutional mail or web servers)
  854. can be fooled by the fact that anonymous connections appear to originate
  855. at the exit OR.
  856. We stress that Tor does not enable any new class of abuse. Spammers
  857. and other attackers already have access to thousands of misconfigured
  858. systems worldwide, and the Tor network is far from the easiest way
  859. to launch these antisocial or illegal attacks.
  860. %Indeed, because of its limited
  861. %anonymity, Tor is probably not a good way to commit crimes.
  862. But because the
  863. onion routers can easily be mistaken for the originators of the abuse,
  864. and the volunteers who run them may not want to deal with the hassle of
  865. repeatedly explaining anonymity networks, we must block or limit attacks
  866. and other abuse that travel through the Tor network.
  867. To mitigate abuse issues, in Tor, each onion router's \emph{exit policy}
  868. describes to which external addresses and ports the router will permit
  869. stream connections. On one end of the spectrum are \emph{open exit}
  870. nodes that will connect anywhere. On the other end are \emph{middleman}
  871. nodes that only relay traffic to other Tor nodes, and \emph{private exit}
  872. nodes that only connect to a local host or network. Using a private
  873. exit (if one exists) is a more secure way for a client to connect to a
  874. given host or network---an external adversary cannot eavesdrop traffic
  875. between the private exit and the final destination, and so is less sure of
  876. Alice's destination and activities. Most onion routers will function as
  877. \emph{restricted exits} that permit connections to the world at large,
  878. but prevent access to certain abuse-prone addresses and services. In
  879. general, nodes can require a variety of forms of traffic authentication
  880. \cite{or-discex00}.
  881. %The abuse issues on closed (e.g. military) networks are different
  882. %from the abuse on open networks like the Internet. While these IP-based
  883. %access controls are still commonplace on the Internet, on closed networks,
  884. %nearly all participants will be honest, and end-to-end authentication
  885. %can be assumed for important traffic.
  886. Many administrators will use port restrictions to support only a
  887. limited set of well-known services, such as HTTP, SSH, or AIM.
  888. This is not a complete solution, since abuse opportunities for these
  889. protocols are still well known. Nonetheless, the benefits are real,
  890. since administrators seem used to the concept of port 80 abuse not
  891. coming from the machine's owner.
  892. A further solution may be to use proxies to clean traffic for certain
  893. protocols as it leaves the network. For example, much abusive HTTP
  894. behavior (such as exploiting buffer overflows or well-known script
  895. vulnerabilities) can be detected in a straightforward manner.
  896. Similarly, one could run automatic spam filtering software (such as
  897. SpamAssassin) on email exiting the OR network.
  898. ORs may also choose to rewrite exiting traffic in order to append
  899. headers or other information to indicate that the traffic has passed
  900. through an anonymity service. This approach is commonly used
  901. by email-only anonymity systems. When possible, ORs can also
  902. run on servers with hostnames such as {\it anonymous}, to further
  903. alert abuse targets to the nature of the anonymous traffic.
  904. A mixture of open and restricted exit nodes will allow the most
  905. flexibility for volunteers running servers. But while many
  906. middleman nodes help provide a large and robust network,
  907. having only a few exit nodes reduces the number of points
  908. an adversary needs to monitor for traffic analysis, and places a
  909. greater burden on the exit nodes. This tension can be seen in the
  910. Java Anon Proxy
  911. cascade model, wherein only one node in each cascade needs to handle
  912. abuse complaints---but an adversary only needs to observe the entry
  913. and exit of a cascade to perform traffic analysis on all that
  914. cascade's users. The Hydra model (many entries, few exits) presents a
  915. different compromise: only a few exit nodes are needed, but an
  916. adversary needs to work harder to watch all the clients; see
  917. Section~\ref{sec:conclusion}.
  918. Finally, we note that exit abuse must not be dismissed as a peripheral
  919. issue: when a system's public image suffers, it can reduce the number
  920. and diversity of that system's users, and thereby reduce the anonymity
  921. of the system itself. Like usability, public perception is also a
  922. security parameter. Sadly, preventing abuse of open exit nodes is an
  923. unsolved problem, and will probably remain an arms race for the
  924. forseeable future. The abuse problems faced by Princeton's CoDeeN
  925. project \cite{darkside} give us a glimpse of likely issues.
  926. \SubSection{Directory Servers}
  927. \label{subsec:dirservers}
  928. First-generation Onion Routing designs \cite{freedom2-arch,or-jsac98} used
  929. in-band network status updates: each router flooded a signed statement
  930. to its neighbors, which propagated it onward. But anonymizing networks
  931. have different security goals than typical link-state routing protocols.
  932. For example, delays (accidental or intentional)
  933. that can cause different parts of the network to have different pictures
  934. of link-state and topology are not only inconvenient---they give
  935. attackers an opportunity to exploit differences in client knowledge.
  936. We also worry about attacks to deceive a
  937. client about the router membership list, topology, or current network
  938. state. Such \emph{partitioning attacks} on client knowledge help an
  939. adversary to efficiently deploy resources
  940. when attacking a target.
  941. Tor uses a small group of redundant, well-known onion routers to
  942. track changes in network topology and node state, including keys and
  943. exit policies. Each such \emph{directory server} also acts as an HTTP
  944. server, so participants can fetch current network state and router
  945. lists (a \emph{directory}), and so other onion routers can upload
  946. their router descriptors. Onion routers periodically publish signed
  947. statements of their state to each directory server, which combines this
  948. state information with its own view of network liveness, and generates
  949. a signed description of the entire network state. Client software is
  950. pre-loaded with a list of the directory servers and their keys; it uses
  951. this information to bootstrap each client's view of the network.
  952. When a directory server receives a signed statement from an onion
  953. router, it recognizes the onion router by its identity key. Directory
  954. servers do not automatically advertise unrecognized ORs. (If they did,
  955. an adversary could take over the network by creating many servers
  956. \cite{sybil}.) Instead, new nodes must be approved by the directory
  957. server administrator before they are included. Mechanisms for automated
  958. node approval are an area of active research, and are discussed more
  959. in Section~\ref{sec:maintaining-anonymity}.
  960. Of course, a variety of attacks remain. An adversary who controls
  961. a directory server can track certain clients by providing different
  962. information---perhaps by listing only nodes under its control, or by
  963. informing only certain clients about a given node. Even an external
  964. adversary can exploit differences in client knowledge: clients who use
  965. a node listed on one directory server but not the others are vulnerable.
  966. Thus these directory servers must be synchronized and redundant.
  967. Directories are valid if they are signed by a threshold of the directory
  968. servers.
  969. The directory servers in Tor are modeled after those in Mixminion
  970. \cite{minion-design}, but our situation is easier. First, we make the
  971. simplifying assumption that all participants agree on the set of
  972. directory servers. Second, while Mixminion needs to predict node
  973. behavior, Tor only needs a threshold consensus of the current
  974. state of the network.
  975. Tor directory servers build a consensus directory through a simple
  976. four-round broadcast protocol. In round one, each server dates and
  977. signs its current opinion, and broadcasts it to the other directory
  978. servers; then in round two, each server rebroadcasts all the signed
  979. opinions it has received. At this point all directory servers check
  980. to see whether any server has signed multiple opinions in the same
  981. period. Such a server is either broken or cheating, so the protocol
  982. stops and notifies the administrators, who either remove the cheater
  983. or wait for the broken server to be fixed. If there are no
  984. discrepancies, each directory server then locally computes an algorithm
  985. (described below)
  986. on the set of opinions, resulting in a uniform shared directory. In
  987. round three servers sign this directory and broadcast it; and finally
  988. in round four the servers rebroadcast the directory and all the
  989. signatures. If any directory server drops out of the network, its
  990. signature is not included on the final directory.
  991. The rebroadcast steps ensure that a directory server is heard by
  992. either all of the other servers or none of them, even when some links
  993. are down (assuming that any two directory servers can talk directly or
  994. via a third). Broadcasts are feasible because there are relatively few
  995. directory servers (currently 3, but we expect as many as 9 as the network
  996. scales). Computing the shared directory locally is a straightforward
  997. threshold voting process: we include an OR if a majority of directory
  998. servers believe it to be good.
  999. To avoid attacks where a router connects to all the directory servers
  1000. but refuses to relay traffic from other routers, the directory servers
  1001. must build circuits and use them to anonymously test router reliability
  1002. \cite{mix-acc}.
  1003. Using directory servers is simpler and more flexible than flooding.
  1004. For example, flooding complicates the analysis when we
  1005. start experimenting with non-clique network topologies. And because
  1006. the directories are signed, they can be cached by other onion routers.
  1007. Thus directory servers are not a performance
  1008. bottleneck when we have many users, and do not aid traffic analysis by
  1009. forcing clients to periodically announce their existence to any
  1010. central point.
  1011. \Section{Rendezvous points and location privacy}
  1012. \label{sec:rendezvous}
  1013. Rendezvous points are a building block for \emph{location-hidden
  1014. services} (also known as \emph{responder anonymity}) in the Tor
  1015. network. Location-hidden services allow Bob to offer a TCP
  1016. service, such as a webserver, without revealing its IP.
  1017. This type of anonymity protects against distributed DoS attacks:
  1018. attackers are forced to attack the onion routing network as a whole
  1019. rather than just Bob's IP.
  1020. Our design for location-hidden servers has the following goals.
  1021. \textbf{Flood-proof:} Bob needs a way to filter incoming requests,
  1022. so an attacker cannot flood Bob simply by sending many requests.
  1023. \textbf{Robust:} Bob should be able to maintain a long-term pseudonymous
  1024. identity even in the presence of router failure. Bob's service must
  1025. not be tied to a single OR, and Bob must be able to tie his service
  1026. to new ORs. \textbf{Smear-resistant:} if a social attacker offers a
  1027. location-hidden service that is illegal or disreputable, it should not
  1028. appear---even to a casual observer---that a rendezvous router is hosting
  1029. that service. \textbf{Application-transparent:} Although we require users
  1030. to run special software to access location-hidden servers, we must not
  1031. require them to modify their applications.
  1032. We provide location-hiding for Bob by allowing him to advertise
  1033. several onion routers (his \emph{introduction points}) as contact
  1034. points. He may do this on any robust efficient
  1035. key-value lookup system with authenticated updates, such as a
  1036. distributed hash table (DHT) like CFS \cite{cfs:sosp01}\footnote{
  1037. Rather than rely on an external infrastructure, the Onion Routing network
  1038. can run the DHT; to begin, we can run a simple lookup system on the
  1039. directory servers.} Alice, the client, chooses an OR as her
  1040. \emph{rendezvous point}. She connects to one of Bob's introduction
  1041. points, informs him about her rendezvous point, and then waits for him
  1042. to connect to the rendezvous point. This extra level of indirection
  1043. helps Bob's introduction points avoid problems associated with serving
  1044. unpopular files directly (for example, if Bob chooses
  1045. an introduction point in Texas to serve anti-ranching propaganda,
  1046. or if Bob's service tends to get attacked by network vandals).
  1047. The extra level of indirection also allows Bob to respond to some requests
  1048. and ignore others.
  1049. We give an overview of the steps of a rendezvous. These steps are
  1050. performed on behalf of Alice and Bob by their local onion proxies;
  1051. application integration is described more fully below.
  1052. \begin{tightlist}
  1053. \item Bob chooses some introduction points, and advertises them on
  1054. the DHT.
  1055. \item Bob establishes a Tor circuit to each of his introduction points,
  1056. and waits.
  1057. \item Alice learns about Bob's service out of band (perhaps Bob told her,
  1058. or she found it on a website). She retrieves the details of Bob's
  1059. service from the DHT.
  1060. \item Alice chooses an OR to serve as the rendezvous point (RP) for this
  1061. transaction. She establishes a circuit to RP, and gives it a
  1062. rendezvous cookie, which it will use to recognize Bob.
  1063. \item Alice opens an anonymous stream to one of Bob's introduction
  1064. points, and gives it a message (encrypted for Bob) which tells him
  1065. about herself, her chosen RP and the rendezvous cookie, and the
  1066. first half of an ephemeral
  1067. key handshake. The introduction point sends the message to Bob.
  1068. \item If Bob wants to talk to Alice, he builds a new circuit to Alice's
  1069. RP and provides the rendezvous cookie and the second half of the DH
  1070. handshake (along with a hash of the session key they now share).
  1071. \item The RP connects Alice's circuit to Bob's. Note that RP can't
  1072. recognize Alice, Bob, or the data they transmit.
  1073. \item Alice now sends a \emph{relay begin} cell along the circuit. It
  1074. arrives at Bob's onion proxy. Bob's onion proxy connects to Bob's
  1075. webserver.
  1076. \item An anonymous stream has been established, and Alice and Bob
  1077. communicate as normal.
  1078. \end{tightlist}
  1079. When establishing an introduction point, Bob provides the onion router
  1080. with a public ``introduction'' key. The hash of this public key
  1081. identifies a unique service, and (since Bob is required to sign his
  1082. messages) prevents anybody else from usurping Bob's introduction point
  1083. in the future. Bob uses the same public key when establishing the other
  1084. introduction points for that service. Bob periodically refreshes his
  1085. entry in the DHT.
  1086. The message that Alice gives
  1087. the introduction point includes a hash of Bob's public key to identify
  1088. the service, along with an optional initial authentication token (the
  1089. introduction point can do prescreening, for example to block replays). Her
  1090. message to Bob may include an end-to-end authentication token so Bob
  1091. can choose whether to respond.
  1092. The authentication tokens can be used to provide selective access:
  1093. important users get tokens to ensure uninterrupted access to the
  1094. service. During normal situations, Bob's service might simply be offered
  1095. directly from mirrors, and Bob gives out tokens to high-priority users. If
  1096. the mirrors are knocked down by distributed DoS attacks, those users
  1097. can switch to accessing Bob's service via the Tor rendezvous system.
  1098. \SubSection{Integration with user applications}
  1099. Bob configures his onion proxy to know the local IP and port of his
  1100. service, a strategy for authorizing clients, and a public key. Bob
  1101. publishes the public key, an expiration time (``not valid after''), and
  1102. the current introduction points for his service into the DHT, all indexed
  1103. by the hash of the public key. Note that Bob's webserver is unmodified,
  1104. and doesn't even know that it's hidden behind the Tor network.
  1105. Alice's applications also work unchanged---her client interface
  1106. remains a SOCKS proxy. We encode all of the necessary information
  1107. into the fully qualified domain name Alice uses when establishing her
  1108. connection. Location-hidden services use a virtual top level domain
  1109. called `.onion': thus hostnames take the form x.y.onion where x is the
  1110. authentication cookie, and y encodes the hash of PK. Alice's onion proxy
  1111. examines addresses; if they're destined for a hidden server, it decodes
  1112. the PK and starts the rendezvous as described in the table above.
  1113. \subsection{Previous rendezvous work}
  1114. Ian Goldberg developed a similar notion of rendezvous points for
  1115. low-latency anonymity systems \cite{ian-thesis}. His design differs from
  1116. ours in three ways. First, Goldberg suggests that Alice should manually
  1117. hunt down a current location of the service via Gnutella; whereas our
  1118. use of CFS makes lookup faster, more robust, and transparent to the
  1119. user. Second, in Tor the client and server negotiate ephemeral keys
  1120. via Diffie-Hellman, so plaintext is not exposed at any point. Third,
  1121. our design tries to minimize the exposure associated with running the
  1122. service, to encourage volunteers to offer introduction and rendezvous
  1123. point services. Tor's introduction points do not output any bytes to the
  1124. clients, and the rendezvous points don't know the client or the server,
  1125. and can't read the data being transmitted. The indirection scheme is
  1126. also designed to include authentication/authorization---if Alice doesn't
  1127. include the right cookie with her request for service, Bob need not even
  1128. acknowledge his existence.
  1129. \Section{Attacks and Defenses}
  1130. \label{sec:attacks}
  1131. % XXX In sec4 we should talk about bandwidth classes, which will
  1132. % enable us to accept a lot more ORs than if we continue to
  1133. % require 10mbit connections for all ORs. -RD
  1134. % XXX In sec9, we should note that we are currently
  1135. % working with the designers of MorphMix to render our two systems
  1136. % interoperable. So far, this seems to be relatively straightforward.
  1137. % Interoperability will allow testing and direct comparison of the two
  1138. % rather different designs.
  1139. Below we summarize a variety of attacks, and discuss how well our
  1140. design withstands them.
  1141. \subsubsection*{Passive attacks}
  1142. \begin{tightlist}
  1143. \item \emph{Observing user traffic patterns.} Observations of connection
  1144. between a user and her first onion router will not reveal to whom
  1145. the user is connecting or what information is being sent. It will
  1146. reveal patterns of user traffic (both sent and received). Simple
  1147. profiling of user connection patterns is not generally possible,
  1148. however, because multiple application streams may be operating
  1149. simultaneously or in series over a single circuit. Thus, further
  1150. processing is necessary to discern even these usage patterns.
  1151. \item \emph{Observing user content.} At the user end, content is
  1152. encrypted; however, connections from the network to arbitrary
  1153. websites may not be. Further, a responding website may itself be
  1154. hostile. Filtering content is not a primary goal of
  1155. Onion Routing; nonetheless, Tor can directly make use of Privoxy and
  1156. related filtering services to anonymize application data streams.
  1157. \item \emph{Option distinguishability.} Configuration options can be a
  1158. source of distinguishable patterns. In general there is economic
  1159. incentive to allow preferential services \cite{econymics}, and some
  1160. degree of configuration choice can attract users, which
  1161. provide anonymity. So far, however, we have
  1162. not found a compelling use case in Tor for any client-configurable
  1163. options. Thus, clients are currently distinguishable only by their
  1164. behavior.
  1165. %XXX Actually, circuitrebuildperiod is such an option. -RD
  1166. \item \emph{End-to-end Timing correlation.} Tor only minimally hides
  1167. end-to-end timing correlations. An attacker watching patterns of
  1168. traffic at the initiator and the responder will be
  1169. able to confirm the correspondence with high probability. The
  1170. greatest protection currently against such confirmation is to hide
  1171. the connection between the onion proxy and the first Tor node,
  1172. either because it is local or behind a firewall. This approach
  1173. requires an observer to separate traffic originating at the onion
  1174. router from traffic passes through it; but because we do not mix
  1175. or pad, this does not provide much defense.
  1176. \item \emph{End-to-end Size correlation.} Simple packet counting
  1177. without timing consideration will also be effective in confirming
  1178. endpoints of a stream. However, even without padding, we have some
  1179. limited protection: the leaky pipe topology means different numbers
  1180. of packets may enter one end of a circuit than exit at the other.
  1181. \item \emph{Website fingerprinting.} All the above passive
  1182. attacks that are at all effective are traffic confirmation attacks.
  1183. This puts them outside our general design goals. There is also
  1184. a passive traffic analysis attack that is potentially effective.
  1185. Rather than searching exit connections for timing and volume
  1186. correlations, the adversary may build up a database of
  1187. ``fingerprints'' containing file sizes and access patterns for many
  1188. interesting websites. He can confirm a user's connection to a given
  1189. site simply by consulting the database. This attack has
  1190. been shown to be effective against SafeWeb \cite{hintz-pet02}. But
  1191. Tor is not as vulnerable as SafeWeb to this attack: there is the
  1192. possibility that multiple streams are exiting the circuit at
  1193. different places concurrently. Also, fingerprinting will be limited to
  1194. the granularity of cells, currently 256 bytes. Other defenses include
  1195. larger cell sizes and/or minimal padding schemes that group websites
  1196. into large sets. But this remains an open problem. Link
  1197. padding or long-range dummies may also make fingerprints harder to
  1198. detect.\footnote{Note that
  1199. such fingerprinting should not be confused with the latency attacks
  1200. of \cite{back01}. Those require a fingerprint of the latencies of
  1201. all circuits through the network, combined with those from the
  1202. network edges to the targeted user and the responder website. While
  1203. these are in principal feasible and surprises are always possible,
  1204. these constitute a much more complicated attack, and there is no
  1205. current evidence of their practicality.}
  1206. %\item \emph{Content analysis.} Tor explicitly provides no content
  1207. % rewriting for any protocol at a higher level than TCP. When
  1208. % protocol cleaners are available, however (as Privoxy is for HTTP),
  1209. % Tor can integrate them to address these attacks.
  1210. \end{tightlist}
  1211. \subsubsection*{Active attacks}
  1212. \begin{tightlist}
  1213. \item \emph{Compromise keys.}
  1214. If a TLS session key is compromised, an attacker
  1215. can view all the cells on TLS connection until the key is
  1216. renegotiated. (These cells are themselves encrypted.) If a TLS
  1217. private key is compromised, the attacker can fool others into
  1218. thinking that he is the affected OR, but still cannot accept any
  1219. connections. \\
  1220. If a circuit session key is compromised, the
  1221. attacker can unwrap a single layer of encryption from the relay
  1222. cells traveling along that circuit. (Only nodes on the circuit can
  1223. see these cells.) If an onion private key is compromised, the attacker
  1224. can impersonate the OR in circuits, but only if the attacker has
  1225. also compromised the OR's TLS private key, or is running the
  1226. previous OR in the circuit. (This compromise affects newly created
  1227. circuits, but because of perfect forward secrecy, the attacker
  1228. cannot hijack old circuits without compromising their session keys.)
  1229. In any case, periodic key rotation limits the window of opportunity
  1230. for compromising these keys. \\
  1231. Only by
  1232. compromising a node's identity key can an attacker replace that
  1233. node indefinitely, by sending new forged descriptors to the
  1234. directory servers. Finally, an attacker who can compromise a
  1235. directory server's identity key can influence every client's view
  1236. of the network---but only to the degree made possible by gaining a
  1237. vote with the rest of the the directory servers.
  1238. \item \emph{Iterated compromise.} A roving adversary who can
  1239. compromise ORs (by system intrusion, legal coersion, or extralegal
  1240. coersion) could march down the circuit compromising the
  1241. nodes until he reaches the end. Unless the adversary can complete
  1242. this attack within the lifetime of the circuit, however, the ORs
  1243. will have discarded the necessary information before the attack can
  1244. be completed. (Thanks to the perfect forward secrecy of session
  1245. keys, the attacker cannot force nodes to decrypt recorded
  1246. traffic once the circuits have been closed.) Additionally, building
  1247. circuits that cross jurisdictions can make legal coercion
  1248. harder---this phenomenon is commonly called ``jurisdictional
  1249. arbitrage.'' The Java Anon Proxy project recently experienced the
  1250. need for this approach, when
  1251. the German government successfully ordered them to add a backdoor to
  1252. all of their nodes \cite{jap-backdoor}.
  1253. \item \emph{Run a recipient.} By running a Web server, an adversary
  1254. trivially learns the timing patterns of users connecting to it, and
  1255. can introduce arbitrary patterns in its responses. This can greatly
  1256. facilitate end-to-end attacks: If the adversary can induce certain
  1257. users to connect to his webserver (perhaps by advertising
  1258. content targeted at those users), she now holds one end of their
  1259. connection. Additionally, there is a danger that the application
  1260. protocols and associated programs can be induced to reveal
  1261. information about the initiator. Tor does not aim to solve this problem;
  1262. we depend on Privoxy and similar protocol cleaners.
  1263. \item \emph{Run an onion proxy.} It is expected that end users will
  1264. nearly always run their own local onion proxy. However, in some
  1265. settings, it may be necessary for the proxy to run
  1266. remotely---typically, in an institutional setting which wants
  1267. to monitor the activity of those connecting to the proxy.
  1268. Compromising an onion proxy means compromising all future connections
  1269. through it.
  1270. \item \emph{DoS non-observed nodes.} An observer who can observe some
  1271. of the Tor network can increase the value of this traffic analysis
  1272. by attacking non-observed nodes to shut them down, reduce
  1273. their reliability, or persuade users that they are not trustworthy.
  1274. The best defense here is robustness.
  1275. \item \emph{Run a hostile node.} In addition to the abilities of a
  1276. local observer, an isolated hostile node can create circuits through
  1277. itself, or alter traffic patterns, to affect traffic at
  1278. other nodes. Its ability to directly DoS a neighbor is now limited
  1279. by bandwidth throttling. Nonetheless, in order to compromise the
  1280. anonymity of the endpoints of a circuit by its observations, a
  1281. hostile node is only significant if it is immediately adjacent to
  1282. that endpoint.
  1283. \item \emph{Run multiple hostile nodes.} If an adversary is able to
  1284. run multiple ORs, and is able to persuade the directory servers
  1285. that those ORs are trustworthy and independant, then occasionally
  1286. some user will choose one of those ORs for the start and another
  1287. as the end of a circuit. When this happens, the user's
  1288. anonymity is compromised for those streams. If an adversary can
  1289. control $m$ out of $N$ nodes, he should be able to correlate at most
  1290. $\frac{m}{N}$ of the traffic in this way---although an adversary
  1291. % XXX Isn't this (m/N)^2 ? -RD
  1292. could possibly attract a disproportionately large amount of traffic
  1293. by running an exit node with an unusually permissive exit policy.
  1294. \item \emph{Compromise entire path.} Anyone compromising both
  1295. endpoints of a circuit can confirm this with high probability. If
  1296. the entire path is compromised, this becomes a certainty; however,
  1297. the added benefit to the adversary of such an attack is small in
  1298. relation to the difficulty.
  1299. \item \emph{Run a hostile directory server.} Directory servers control
  1300. admission to the network. However, because the network directory
  1301. must be signed by a majority of servers, the threat of a single
  1302. hostile server is minimized.
  1303. \item \emph{Selectively DoS a Tor node.} As noted, neighbors are
  1304. bandwidth limited; however, it is possible to open up sufficient
  1305. circuits that converge at a single onion router to
  1306. overwhelm its network connection, its ability to process new
  1307. circuits, or both.
  1308. % We aim to address something like this attack with our congestion
  1309. % control algorithm.
  1310. \item \emph{Introduce timing into messages.} This is simply a stronger
  1311. version of passive timing attacks already discussed above.
  1312. \item \emph{Tagging attacks.} A hostile node could ``tag'' a
  1313. cell by altering it. This would render it unreadable, but if the
  1314. stream is, for example, an unencrypted request to a Web site,
  1315. the garbled content coming out at the appropriate time could confirm
  1316. the association. However, integrity checks on cells prevent
  1317. this attack.
  1318. \item \emph{Replace contents of unauthenticated protocols.} When
  1319. relaying an unauthenticated protocol like HTTP, a hostile exit node
  1320. can impersonate the target server. Thus, whenever possible, clients
  1321. should prefer protocols with end-to-end authentication.
  1322. \item \emph{Replay attacks.} Some anonymity protocols are vulnerable
  1323. to replay attacks. Tor is not; replaying one side of a handshake
  1324. will result in a different negotiated session key, and so the rest
  1325. of the recorded session can't be used.
  1326. % ``NonSSL Anonymizer''?
  1327. \item \emph{Smear attacks.} An attacker could use the Tor network to
  1328. engage in socially dissapproved acts, so as to try to bring the
  1329. entire network into disrepute and get its operators to shut it down.
  1330. Exit policies can help reduce the possibilities for abuse, but
  1331. ultimately, the network will require volunteers who can tolerate
  1332. some political heat.
  1333. \item \emph{Distribute hostile code.} An attacker could trick users
  1334. into running subverted Tor software that did not, in fact, anonymize
  1335. their connections---or worse, trick ORs into running weakened
  1336. software that provided users with less anonymity. We address this
  1337. problem (but do not solve it completely) by signing all Tor releases
  1338. with an official public key, and including an entry in the directory
  1339. describing which versions are currently believed to be secure. To
  1340. prevent an attacker from subverting the official release itself
  1341. (through threats, bribery, or insider attacks), we provide all
  1342. releases in source code form, encourage source audits, and
  1343. frequently warn our users never to trust any software (even from
  1344. us!) that comes without source.
  1345. \end{tightlist}
  1346. \subsubsection*{Directory attacks}
  1347. \begin{tightlist}
  1348. \item \emph{Destroy directory servers.} If a few directory
  1349. servers drop out of operation, the others still arrive at a final
  1350. directory. So long as any directory servers remain in operation,
  1351. they will still broadcast their views of the network and generate a
  1352. consensus directory. (If more than half are destroyed, this
  1353. directory will not, however, have enough signatures for clients to
  1354. use it automatically; human intervention will be necessary for
  1355. clients to decide whether to trust the resulting directory, or continue
  1356. to use the old valid one.)
  1357. \item \emph{Subvert a directory server.} By taking over a directory
  1358. server, an attacker can influence (but not control) the final
  1359. directory. Since ORs are included or excluded by majority vote,
  1360. the corrupt directory can at worst cast a tie-breaking vote to
  1361. decide whether to include marginal ORs. How often such marginal
  1362. cases will occur in practice, however, remains to be seen.
  1363. \item \emph{Subvert a majority of directory servers.} If the
  1364. adversary controls more than half of the directory servers, he can
  1365. decide on a final directory, and thus can include as many
  1366. compromised ORs in the final directory as he wishes. Other than
  1367. trying to ensure that directory server operators are truly
  1368. independent and resistant to attack, Tor does not address this
  1369. possibility.
  1370. \item \emph{Encourage directory server dissent.} The directory
  1371. agreement protocol requires that directory server operators agree on
  1372. the list of directory servers. An adversary who can persuade some
  1373. of the directory server operators to distrust one another could
  1374. split the quorum into mutually hostile camps, thus partitioning
  1375. users based on which directory they used. Tor does not address
  1376. this attack.
  1377. \item \emph{Trick the directory servers into listing a hostile OR.}
  1378. Our threat model explicitly assumes directory server operators will
  1379. be able to filter out most hostile ORs. If this is not true, an
  1380. attacker can flood the directory with compromised servers.
  1381. \item \emph{Convince the directories that a malfunctioning OR is
  1382. working.} In the current Tor implementation, directory servers
  1383. assume that if they can start a TLS connection to an an OR, that OR
  1384. must be running correctly. It would be easy for a hostile OR to
  1385. subvert this test by only accepting TLS connections from ORs, and
  1386. ignoring all cells. Thus, directory servers must actively test ORs
  1387. by building circuits and streams as appropriate. The benefits and
  1388. hazards of a similar approach are discussed in \cite{mix-acc}.
  1389. \end{tightlist}
  1390. \subsubsection*{Attacks against rendezvous points}
  1391. \begin{tightlist}
  1392. \item \emph{Make many introduction requests.} An attacker could
  1393. attempt to deny Bob service by flooding his Introduction Point with
  1394. requests. Because the introduction point can block requests that
  1395. lack authentication tokens, however, Bob can restrict the volume of
  1396. requests he receives, or require a certain amount of computation for
  1397. every request he receives.
  1398. \item \emph{Attack an introduction point.} An attacker could try to
  1399. disrupt a location-hidden service by disabling its introduction
  1400. point. But because a service's identity is attached to its public
  1401. key, not its introduction point, the service can simply re-advertise
  1402. itself at a different introduction point.
  1403. \item \emph{Compromise an introduction point.} If an attacker controls
  1404. an introduction point for a service, it can flood the service with
  1405. introduction requests, or prevent valid introduction requests from
  1406. reaching the hidden server. The server will notice a flooding
  1407. attempt if it receives many introduction requests. To notice
  1408. blocking of valid requests, however, the hidden server should
  1409. periodically test the introduction point by sending its introduction
  1410. requests, and making sure it receives them.
  1411. \item \emph{Compromise a rendezvous point.} Controlling a rendezvous
  1412. point gains an attacker no more than controlling any other OR along
  1413. a circuit, since all data passing along the rendezvous is protected
  1414. by the session key shared by the client and server.
  1415. \end{tightlist}
  1416. \Section{Open Questions in Low-latency Anonymity}
  1417. \label{sec:maintaining-anonymity}
  1418. % There must be a better intro than this! -NM
  1419. In addition to the open problems discussed in
  1420. Section~\ref{subsec:non-goals}, many other questions remain to be
  1421. solved by future research before we can be confident that we
  1422. have built a secure low-latency anonymity service.
  1423. Many of these open issues are questions of balance. For example,
  1424. how often should users rotate to fresh circuits? Too-frequent
  1425. rotation is inefficient and expensive, but too-infrequent rotation
  1426. makes the user's traffic linkable. Instead of opening a fresh
  1427. circuit; clients can also limit linkability exit from a middle point
  1428. of the circuit, or by truncating and re-extending the circuit, but
  1429. more analysis is needed to determine the proper trade-off.
  1430. %[XXX mention predecessor attacks?]
  1431. A similar question surrounds timing of directory operations:
  1432. how often should directories be updated? With too-infrequent
  1433. updates clients receive an inaccurate picture of the network; with
  1434. too-frequent updates the directory servers are overloaded.
  1435. %do different exit policies at different exit nodes trash anonymity sets,
  1436. %or not mess with them much?
  1437. %
  1438. %% Why would they? By routing traffic to certain nodes preferentially?
  1439. %[XXX Choosing paths and path lengths: I'm not writing this bit till
  1440. % Arma's pathselection stuff is in. -NM]
  1441. %%%% Roger said that he'd put a path selection paragraph into section
  1442. %%%% 4 that would replace this.
  1443. %
  1444. %I probably should have noted that this means loops will be on at least
  1445. %five hop routes, which should be rare given the distribution. I'm
  1446. %realizing that this is reproducing some of the thought that led to a
  1447. %default of five hops in the original onion routing design. There were
  1448. %some different assumptions, which I won't spell out now. Note that
  1449. %enclave level protections really change these assumptions. If most
  1450. %circuits are just two hops, then just a single link observer will be
  1451. %able to tell that two enclaves are communicating with high probability.
  1452. %So, it would seem that enclaves should have a four node minimum circuit
  1453. %to prevent trivial circuit insider identification of the whole circuit,
  1454. %and three hop minimum for circuits from an enclave to some nonclave
  1455. %responder. But then... we would have to make everyone obey these rules
  1456. %or a node that through timing inferred it was on a four hop circuit
  1457. %would know that it was probably carrying enclave to enclave traffic.
  1458. %Which... if there were even a moderate number of bad nodes in the
  1459. %network would make it advantageous to break the connection to conduct
  1460. %a reformation intersection attack. Ahhh! I gotta stop thinking
  1461. %about this and work on the paper some before the family wakes up.
  1462. %On Sat, Oct 25, 2003 at 06:57:12AM -0400, Paul Syverson wrote:
  1463. %> Which... if there were even a moderate number of bad nodes in the
  1464. %> network would make it advantageous to break the connection to conduct
  1465. %> a reformation intersection attack. Ahhh! I gotta stop thinking
  1466. %> about this and work on the paper some before the family wakes up.
  1467. %This is the sort of issue that should go in the 'maintaining anonymity
  1468. %with tor' section towards the end. :)
  1469. %Email from between roger and me to beginning of section above. Fix and move.
  1470. Throughout this paper, we have assumed that end-to-end traffic
  1471. analysis will immediately and automatically defeat a low-latency
  1472. anonymity system. Even high-latency anonymity
  1473. systems can be vulnerable to end-to-end traffic analysis, if the
  1474. traffic volumes are high enough, and if users' habits are sufficiently
  1475. distinct \cite{limits-open,statistical-disclosure}. \emph{Can
  1476. anything be done to make low-latency systems resist these attacks as
  1477. well as high-latency systems?}
  1478. Tor already makes some effort to conceal the starts and
  1479. ends of streams by wrapping all long-range control commands in
  1480. identical-looking relay cells, but more analysis is needed. Link
  1481. padding could frustrate passive observers who count packets; long-range
  1482. padding could work against observers who own the first hop in a
  1483. circuit. But more research needs to be done in order to find an
  1484. efficient and practical approach. Volunteers prefer not to run
  1485. constant-bandwidth padding; but more sophisticated traffic shaping
  1486. approaches remain somewhat unanalyzed.
  1487. %[XXX is this so?]
  1488. Recent work
  1489. on long-range padding \cite{defensive-dropping} shows promise. One
  1490. could also try to reduce correlation in packet timing by batching and
  1491. re-ordering packets, but it is unclear whether this could improve
  1492. anonymity without introducing so much latency as to render the
  1493. network unusable.
  1494. Even if passive timing attacks were wholly solved, active timing
  1495. attacks would remain. \emph{What can
  1496. be done to address attackers who can introduce timing patterns into
  1497. a user's traffic?} % [XXX mention likely approaches]
  1498. %%% I think we cover this by framing the problem as ``Can we make
  1499. %%% end-to-end characteristics of low-latency systems as good as
  1500. %%% those of high-latency systems?'' Eliminating long-term
  1501. %%% intersection is a hard problem.
  1502. %
  1503. %Even regardless of link padding from Alice to the cloud, there will be
  1504. %times when Alice is simply not online. Link padding, at the edges or
  1505. %inside the cloud, does not help for this.
  1506. In order to scale to large numbers of users, and to prevent an
  1507. attacker from observing the whole network at once, it may be necessary
  1508. for low-latency anonymity systems to support far more servers than Tor
  1509. currently anticipates. This introduces several issues. First, if
  1510. approval by a centralized set of directory servers is no longer
  1511. feasible, what mechanism should be used to prevent adversaries from
  1512. signing up many spurious servers?
  1513. Second, if clients can no longer have a complete
  1514. picture of the network at all times, how can should they perform
  1515. discovery while preventing attackers from manipulating or exploiting
  1516. gaps in client knowledge? Third, if there are too many servers
  1517. for every server to constantly communicate with every other, what kind
  1518. of non-clique topology should the network use? Restricted-route
  1519. topologies promise comparable anonymity with better scalability
  1520. \cite{danezis-pets03}, but whatever topology we choose, we need some
  1521. way to keep attackers from manipulating their position within it.
  1522. Fourth, since no centralized authority is tracking server reliability,
  1523. How do we prevent unreliable servers from rendering the network
  1524. unusable? Fifth, do clients receive so much anonymity benefit from
  1525. running their own servers that we should expect them all to do so, or
  1526. do we need to find another incentive structure to motivate them?
  1527. (Tarzan and MorphMix present possible solutions.)
  1528. % [[ XXX how to approve new nodes (advogato, sybil, captcha (RTT));]
  1529. Alternatively, it may be the case that one of these problems proves
  1530. intractable, or that the drawbacks to many-server systems prove
  1531. greater than the benefits. Nevertheless, we may still do well to
  1532. consider non-clique topologies. A cascade topology may provide more
  1533. defense against traffic confirmation.
  1534. % XXX Why would it? Cite. -NM
  1535. Does the hydra (many inputs, few outputs) topology work
  1536. better? Are we going to get a hydra anyway because most nodes will be
  1537. middleman nodes?
  1538. As mentioned in Section~\ref{subsec:dos}, Tor could improve its
  1539. robustness against node failure by buffering transmitted stream data
  1540. at the network's edges until the data has been acknowledged by the
  1541. other end of the stream. The efficacy of this approach remains to be
  1542. tested, however, and there may be more effective means for ensuring
  1543. reliable connections in the presence of unreliable nodes.
  1544. %%% Keeping this original paragraph for a little while, since it
  1545. %%% is not the same as what's written there now.
  1546. %
  1547. %Because Tor depends on TLS and TCP to provide a reliable transport,
  1548. %when one of the servers goes down, all the circuits (and thus streams)
  1549. %traveling over that server must break. This reduces anonymity because
  1550. %everybody needs to reconnect right then (does it? how much?) and
  1551. %because exit connections all break at the same time, and it also harms
  1552. %usability. It seems the problem is even worse in a peer-to-peer
  1553. %environment, because so far such systems don't really provide an
  1554. %incentive for nodes to stay connected when they're done browsing, so
  1555. %we would expect a much higher churn rate than for onion routing.
  1556. %there ways of allowing streams to survive the loss of a node in the
  1557. %path?
  1558. % Roger or Paul suggested that we say something about incentives,
  1559. % too, but I think that's a better candidate for our future work
  1560. % section. After all, we will doubtlessly learn very much about why
  1561. % people do or don't run and use Tor in the near future. -NM
  1562. %We should run a squid at each exit node, to provide comparable anonymity
  1563. %to private exit nodes for cache hits, to speed everything up, and to
  1564. %have a buffer for funny stuff coming out of port 80.
  1565. % on the other hand, it hampers PFS, because ORs have pages in the cache.
  1566. %I previously elsewhere suggested bulk transfer proxies to carve
  1567. %up big things so that they could be downloaded in less noticeable
  1568. %pieces over several normal looking connections. We could suggest
  1569. %similarly one or a handful of squid nodes that might serve up
  1570. %some of the more sensitive but common material, especially if
  1571. %the relevant sites didn't want to or couldn't run their own OR.
  1572. %This would be better than having everyone run a squid which would
  1573. %just help identify after the fact the different history of that
  1574. %node's activity. All this kind of speculation needs to move to
  1575. %future work section I guess. -PS]
  1576. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1577. \Section{Future Directions}
  1578. \label{sec:conclusion}
  1579. Tor brings together many innovations into
  1580. a unified deployable system. But there are still several attacks that
  1581. work quite well, as well as a number of sustainability and run-time
  1582. issues remaining to be ironed out. In particular:
  1583. % Many of these (Scalability, cover traffic, morphmix)
  1584. % are duplicates from open problems.
  1585. %
  1586. \begin{tightlist}
  1587. \item \emph{Scalability:} Tor's emphasis on design simplicity and
  1588. deployability has led us to adopt a clique topology, a
  1589. semi-centralized model for directories and trusts, and a
  1590. full-network-visibility model for client knowledge. None of these
  1591. properties will scale to more than a few hundred servers, at most.
  1592. Promising approaches to better scalability exist (see
  1593. Section~\ref{sec:maintaining-anonymity}), but more deployment
  1594. experience would be helpful in learning the relative importance of
  1595. these bottlenecks.
  1596. \item \emph{Cover traffic:} Currently we avoid cover traffic because
  1597. of its clear costs in performance and bandwidth, and because its
  1598. security benefits are not well understood. With more research
  1599. \cite{SS03,defensive-dropping}, the price/value ratio may change,
  1600. both for link-level cover traffic and also long-range cover traffic.
  1601. \item \emph{Better directory distribution:} Even with the threshold
  1602. directory agreement algorithm described in Section~\ref{subsec:dirservers},
  1603. the directory servers are still trust bottlenecks. We must find more
  1604. decentralized yet practical ways to distribute up-to-date snapshots of
  1605. network status without introducing new attacks. Also, directory
  1606. retrieval presents a scaling problem, since clients currently
  1607. download a description of the entire network state every 15
  1608. minutes. As the state grows larger and clients more numerous, we
  1609. may need to move to a solution in which clients only receive
  1610. incremental updates to directory state, or where directories are
  1611. cached at the ORs to avoid high loads on the directory servers.
  1612. % XXX this is a design paper, not an implementation paper. the design
  1613. % says that they're already cached at the ORs. Agree/disagree?
  1614. \item \emph{Implementing location-hidden servers:} While
  1615. Section~\ref{sec:rendezvous} describes a design for rendezvous
  1616. points and location-hidden servers, these feature has not yet been
  1617. implemented. While doing so, will likely encounter additional
  1618. issues, both in terms of usability and anonymity, that must be
  1619. resolved.
  1620. \item \emph{Further specification review:} Although we have a public,
  1621. byte-level specification for the Tor protocols, this protocol has
  1622. not received extensive external review. We hope that as Tor
  1623. becomes more widely deployed, more people will become interested in
  1624. examining our specification.
  1625. \item \emph{Wider-scale deployment:} The original goal of Tor was to
  1626. gain experience in deploying an anonymizing overlay network, and
  1627. learn from having actual users. We are now at the point in design
  1628. and development where we can start deploying a wider network. Once
  1629. we have are ready for actual users, we will doubtlessly be better
  1630. able to evaluate some of our design decisions, including our
  1631. robustness/latency trade-offs, our performance trade-offs (including
  1632. cell size), our abuse-prevention mechanisms, and
  1633. our overall usability.
  1634. % XXX large and small cells on same network.
  1635. % XXX work with morphmix spec
  1636. \end{tightlist}
  1637. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1638. %% commented out for anonymous submission
  1639. %\Section{Acknowledgments}
  1640. % Peter Palfrader, Geoff Goodell, Adam Shostack, Joseph Sokol-Margolis,
  1641. % John Bashinski
  1642. % for editing and comments
  1643. % Matej Pfajfar, Andrei Serjantov, Marc Rennhard for design discussions
  1644. % Bram Cohen for congestion control discussions
  1645. % Adam Back for suggesting telescoping circuits
  1646. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
  1647. \bibliographystyle{latex8}
  1648. \bibliography{tor-design}
  1649. \end{document}
  1650. % Style guide:
  1651. % U.S. spelling
  1652. % avoid contractions (it's, can't, etc.)
  1653. % prefer ``for example'' or ``such as'' to e.g.
  1654. % prefer ``that is'' to i.e.
  1655. % 'mix', 'mixes' (as noun)
  1656. % 'mix-net'
  1657. % 'mix', 'mixing' (as verb)
  1658. % 'middleman' [Not with a hyphen; the hyphen has been optional
  1659. % since Middle English.]
  1660. % 'nymserver'
  1661. % 'Cypherpunk', 'Cypherpunks', 'Cypherpunk remailer'
  1662. % 'Onion Routing design', 'onion router' [note capitalization]
  1663. % 'SOCKS'
  1664. % Try not to use \cite as a noun.
  1665. % 'Authorizating' sounds great, but it isn't a word.
  1666. % 'First, second, third', not 'Firstly, secondly, thirdly'.
  1667. % 'circuit', not 'channel'
  1668. % Typography: no space on either side of an em dash---ever.
  1669. % Hyphens are for multi-part words; en dashs imply movement or
  1670. % opposition (The Alice--Bob connection); and em dashes are
  1671. % for punctuation---like that.
  1672. % A relay cell; a control cell; a \emph{create} cell; a
  1673. % \emph{relay truncated} cell. Never ``a \emph{relay truncated}.''
  1674. %
  1675. % 'Substitute ``Damn'' every time you're inclined to write ``very;'' your
  1676. % editor will delete it and the writing will be just as it should be.'
  1677. % -- Mark Twain