| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112 | <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"           "DTD/xhtml1-transitional.dtd"><html><meta name="GENERATOR" content="TtH 3.77"><meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1"> <style type="text/css"> div.p { margin-top: 7pt;}</style> <style type="text/css"><!-- td div.comp { margin-top: -0.6ex; margin-bottom: -1ex;} td div.comb { margin-top: -0.6ex; margin-bottom: -.6ex;} td div.hrcomp { line-height: 0.9; margin-top: -0.8ex; margin-bottom: -1ex;} td div.norm {line-height:normal;} span.roman {font-family: serif; font-style: normal; font-weight: normal;} span.overacc2 {position: relative;  left: .8em; top: -1.2ex;} span.overacc1 {position: relative;  left: .6em; top: -1.2ex;} --></style><title> Design of a blocking-resistant anonymity system\DRAFT</title><h1 align="center">Design of a blocking-resistant anonymity system<br />DRAFT </h1><div class="p"><!----></div><h3 align="center">Roger Dingledine, Nick Mathewson </h3><div class="p"><!----></div><h2> Abstract</h2>Internet censorship is on the rise as websites around the world areincreasingly blocked by government-level firewalls.  Although popularanonymizing networks like Tor were originally designed to keep attackers fromtracing people's activities, many people are also using them to evade localcensorship.  But if the censor simply denies access to the Tor networkitself, blocked users can no longer benefit from the security Tor offers.<div class="p"><!----></div>Here we describe a design that builds upon the current Tor networkto provide an anonymizing network that resists blockingby government-level attackers.<div class="p"><!----></div> <h2><a name="tth_sEc1">1</a>  Introduction and Goals</h2><div class="p"><!----></div>Anonymizing networks like Tor [<a href="#tor-design" name="CITEtor-design">11</a>] bounce traffic around anetwork of encrypting relays.  Unlike encryption, which hides only <i>what</i>is said, these networks also aim to hide who is communicating with whom, whichusers are using which websites, and similar relations.  These systems have abroad range of users, including ordinary citizens who want to avoid beingprofiled for targeted advertisements, corporations who don't want to revealinformation to their competitors, and law enforcement and governmentintelligence agencies who need to do operations on the Internet without beingnoticed.<div class="p"><!----></div>Historical anonymity research has focused on anattacker who monitors the user (call her Alice) and tries to discover heractivities, yet lets her reach any piece of the network. In more modernthreat models such as Tor's, the adversary is allowed to perform activeattacks such as modifying communications to trick Aliceinto revealing her destination, or intercepting some connectionsto run a man-in-the-middle attack. But these systems still assume thatAlice can eventually reach the anonymizing network.<div class="p"><!----></div>An increasing number of users are using the Tor softwareless for its anonymity properties than for its censorshipresistance properties — if they use Tor to access Internet sites likeWikipediaand Blogspot, they are no longer affected by local censorshipand firewall rules. In fact, an informal user studyshowed China as the third largest user basefor Tor clients, with perhaps ten thousand people accessing the Tornetwork from China each day.<div class="p"><!----></div>The current Tor design is easy to block if the attacker controls Alice'sconnection to the Tor network — by blocking the directory authorities,by blocking all the server IP addresses in the directory, or by filteringbased on the fingerprint of the Tor TLS handshake. Here we describe anextended design that builds upon the current Tor network to provide ananonymizingnetwork that resists censorship as well as anonymity-breaking attacks.In section <a href="#sec:adversary">2</a> we discuss our threat model — that is,the assumptions we make about our adversary. Section <a href="#sec:current-tor">3</a>describes the components of the current Tor design and how they can beleveraged for a new blocking-resistant design. Section <a href="#sec:related">4</a>explains the features and drawbacks of the currently deployed solutions.In sections <a href="#sec:bridges">5</a> through <a href="#sec:discovery">7</a>, we explore thecomponents of our designs in detail.  Section <a href="#sec:security">8</a> considerssecurity implications and Section <a href="#sec:reachability">9</a> presents otherissues with maintaining connectivity and sustainability for the design.Section <a href="#sec:future">10</a> speculates about future more complex designs,and finally Section <a href="#sec:conclusion">11</a> summarizes our next steps andrecommendations.<div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div> <h2><a name="tth_sEc2"><a name="sec:adversary">2</a>  Adversary assumptions</h2></a><div class="p"><!----></div>To design an effective anti-censorship tool, we need a good model for thegoals and resources of the censors we are evading.  Otherwise, we riskspending our effort on keeping the adversaries from doing things they have nointerest in doing, and thwarting techniques they do not use.The history of blocking-resistance designs is littered with conflictingassumptions about what adversaries to expect and what problems arein the critical path to a solution. Here we describe our bestunderstanding of the current situation around the world.<div class="p"><!----></div>In the traditional security style, we aim to defeat a strongattacker — if we can defend against this attacker, we inherit protectionagainst weaker attackers as well.  After all, we want a general designthat will work for citizens of China, Thailand, and other censoredcountries; forwhistleblowers in firewalled corporate networks; and for people inunanticipated oppressive situations. In fact, by designing witha variety of adversaries in mind, we can take advantage of the fact thatadversaries will be in different stages of the arms race at each location,so a server blocked in one locale can still be useful in others.<div class="p"><!----></div>We assume that the attackers' goals are somewhat complex.<dl compact="compact"> <dt><b></b></dt>	<dd><li>The attacker would like to restrict the flow of certain kinds of  information, particularly when this information is seen as embarrassing to  those in power (such as information about rights violations or corruption),  or when it enables or encourages others to oppose them effectively (such as  information about opposition movements or sites that are used to organize  protests).</dd> <dt><b></b></dt>	<dd><li>As a second-order effect, censors aim to chill citizens' behavior by  creating an impression that their online activities are monitored.</dd> <dt><b></b></dt>	<dd><li>In some cases, censors make a token attempt to block a few sites for  obscenity, blasphemy, and so on, but their efforts here are mainly for  show. In other cases, they really do try hard to block such content.</dd> <dt><b></b></dt>	<dd><li>Complete blocking (where nobody at all can ever download censored  content) is not a  goal. Attackers typically recognize that perfect censorship is not only  impossible, but unnecessary: if "undesirable" information is known only  to a small few, further censoring efforts can be focused elsewhere.</dd> <dt><b></b></dt>	<dd><li>Similarly, the censors are not attempting to shut down or block <i>  every</i> anti-censorship tool — merely the tools that are popular and  effective (because these tools impede the censors' information restriction  goals) and those tools that are highly visible (thus making the censors  look ineffectual to their citizens and their bosses).</dd> <dt><b></b></dt>	<dd><li>Reprisal against <i>most</i> passive consumers of <i>most</i> kinds of  blocked information is also not a goal, given the broadness of most  censorship regimes. This seems borne out by fact.<a href="#tthFtNtAAB" name="tthFrefAAB"><sup>1</sup></a></dd> <dt><b></b></dt>	<dd><li>Producers and distributors of targeted information are in much  greater danger than consumers; the attacker would like to not only block  their work, but identify them for reprisal.</dd> <dt><b></b></dt>	<dd><li>The censors (or their governments) would like to have a working, useful  Internet. There are economic, political, and social factors that prevent  them from "censoring" the Internet by outlawing it entirely, or by  blocking access to all but a tiny list of sites.  Nevertheless, the censors <i>are</i> willing to block innocuous content  (like the bulk of a newspaper's reporting) in order to censor other content  distributed through the same channels (like that newspaper's coverage of  the censored country).</dd></dl><div class="p"><!----></div>We assume there are three main technical network attacks in use by censorscurrently [<a href="#clayton:pet2006" name="CITEclayton:pet2006">7</a>]:<div class="p"><!----></div><dl compact="compact"> <dt><b></b></dt>	<dd><li>Block a destination or type of traffic by automatically searching for  certain strings or patterns in TCP packets.  Offending packets can be  dropped, or can trigger a response like closing the  connection.</dd> <dt><b></b></dt>	<dd><li>Block a destination by listing its IP address at a  firewall or other routing control point.</dd> <dt><b></b></dt>	<dd><li>Intercept DNS requests and give bogus responses for certain  destination hostnames.</dd></dl><div class="p"><!----></div>We assume the network firewall has limited CPU and memory perconnection [<a href="#clayton:pet2006" name="CITEclayton:pet2006">7</a>].  Against an adversary who could carefullyexamine the contents of every packet and correlate the packets in everystream on the network, we would need some stronger mechanism such assteganography, which introduces its ownproblems [<a href="#active-wardens" name="CITEactive-wardens">15</a>,<a href="#tcpstego" name="CITEtcpstego">26</a>].  But we make a "weaksteganography" assumption here: to remain unblocked, it is necessary toremain unobservable only by computational resources on par with a modernrouter, firewall, proxy, or IDS.<div class="p"><!----></div>We assume that while various different regimes can coordinate and sharenotes, there will be a time lag between one attacker learning how to overcomea facet of our design and other attackers picking it up.  (The most commonvector of transmission seems to be commercial providers of censorship tools:once a provider adds a feature to meet one country's needs or requests, thefeature is available to all of the provider's customers.)  Conversely, weassume that insider attacks become a higher risk only after the early stagesof network development, once the system has reached a certain level ofsuccess and visibility.<div class="p"><!----></div>We do not assume that government-level attackers are always uniformacross the country. For example, users of different ISPs in Chinaexperience different censorship policies and mechanisms.<div class="p"><!----></div>We assume that the attacker may be able to use political and economicresources to secure the cooperation of extraterritorial or multinationalcorporations and entities in investigating information sources.For example, the censors can threaten the service providers oftroublesome blogs with economic reprisals if they do not reveal theauthors' identities.<div class="p"><!----></div>We assume that our users have control over their hardware andsoftware — they don't have any spyware installed, there are nocameras watching their screens, etc. Unfortunately, in many situationsthese threats are real [<a href="#zuckerman-threatmodels" name="CITEzuckerman-threatmodels">28</a>]; yetsoftware-based security systems like ours are poorly equipped to handlea user who is entirely observed and controlled by the adversary. SeeSection <a href="#subsec:cafes-and-livecds">8.4</a> for more discussion of what littlewe can do about this issue.<div class="p"><!----></div>Similarly, we assume that the user will be able to fetch a genuineversion of Tor, rather than one supplied by the adversary; seeSection <a href="#subsec:trust-chain">8.5</a> for discussion on helping the userconfirm that he has a genuine version and that he can connect to thereal Tor network.<div class="p"><!----></div> <h2><a name="tth_sEc3"><a name="sec:current-tor">3</a>  Adapting the current Tor design to anti-censorship</h2></a><div class="p"><!----></div>Tor is popular and sees a lot of use — it's the largest anonymitynetwork of its kind, and hasattracted more than 800 volunteer-operated routers from around theworld.  Tor protects each user by routing their traffic through a multiplyencrypted "circuit" built of a few randomly selected servers, each of whichcan remove only a single layer of encryption.  Each server sees only the stepbefore it and the step after it in the circuit, and so no single server canlearn the connection between a user and her chosen communication partners.In this section, we examine some of the reasons why Tor has become popular,with particular emphasis to how we can take advantage of these propertiesfor a blocking-resistance design.<div class="p"><!----></div>Tor aims to provide three security properties:<dl compact="compact"> <dt><b></b></dt>	<dd>1. A local network attacker can't learn, or influence, yourdestination.</dd> <dt><b></b></dt>	<dd>2. No single router in the Tor network can link you to yourdestination.</dd> <dt><b></b></dt>	<dd>3. The destination, or somebody watching the destination,can't learn your location.</dd></dl><div class="p"><!----></div>For blocking-resistance, we care most clearly about the firstproperty. But as the arms race progresses, the second propertywill become important — for example, to discourage an adversaryfrom volunteering a relay in order to learn that Alice is readingor posting to certain websites. The third property helps keep users safe fromcollaborating websites: consider websites and other Internet servicesthat have been pressuredrecently into revealing the identity of bloggersor treating clients differently depending on their networklocation [<a href="#goodell-syverson06" name="CITEgoodell-syverson06">17</a>].<div class="p"><!----></div>The Tor design provides other features as well that are not typicallypresent in manual or ad hoc circumvention techniques.<div class="p"><!----></div>First, Tor has a well-analyzed and well-understood way to distributeinformation about servers.Tor directory authorities automatically aggregate, test,and publish signed summaries of the available Tor routers. Tor clientscan fetch these summaries to learn which routers are available andwhich routers are suitable for their needs. Directory information is cachedthroughout the Tor network, so once clients have bootstrapped they neverneed to interact with the authorities directly. (To tolerate a minorityof compromised directory authorities, we use a threshold trust scheme —see Section <a href="#subsec:trust-chain">8.5</a> for details.)<div class="p"><!----></div>Second, the list of directory authorities is not hard-wired.Clients use the default authorities if no others are specified,but it's easy to start a separate (or even overlapping) Tor network justby running a different set of authorities and convincing users to prefera modified client. For example, we could launch a distinct Tor networkinside China; some users could even use an aggregate network made up ofboth the main network and the China network. (But we should not be tooquick to create other Tor networks — part of Tor's anonymity comes fromusers behaving like other users, and there are many unsolved anonymityquestions if different users know about different pieces of the network.)<div class="p"><!----></div>Third, in addition to automatically learning from the chosen directorieswhich Tor routers are available and working, Tor takes care of buildingpaths through the network and rebuilding them as needed. So the usernever has to know how paths are chosen, never has to manually pickworking proxies, and so on. More generally, at its core the Tor protocolis simply a tool that can build paths given a set of routers. Tor isquite flexible about how it learns about the routers and how it choosesthe paths. Harvard's Blossom project [<a href="#blossom-thesis" name="CITEblossom-thesis">16</a>] makes thisflexibility more concrete: Blossom makes use of Tor not for its securityproperties but for its reachability properties. It runs a separate setof directory authorities, its own set of Tor routers (called the Blossomnetwork), and uses Tor's flexible path-building to let users view Internetresources from any point in the Blossom network.<div class="p"><!----></div>Fourth, Tor separates the role of <em>internal relay</em> from therole of <em>exit relay</em>. That is, some volunteers choose just to relaytraffic between Tor users and Tor routers, and others choose to also allowconnections to external Internet resources. Because we don't force allvolunteers to play both roles, we end up with more relays. This increaseddiversity in turn is what gives Tor its security: the more options theuser has for her first hop, and the more options she has for her last hop,the less likely it is that a given attacker will be watching both endsof her circuit [<a href="#tor-design" name="CITEtor-design">11</a>]. As a bonus, because our design attractsmore internal relays that want to help out but don't want to deal withbeing an exit relay, we end up providing more options for the firsthop — the one most critical to being able to reach the Tor network.<div class="p"><!----></div>Fifth, Tor is sustainable. Zero-Knowledge Systems offered the commercialbut now defunct Freedom Network [<a href="#freedom21-security" name="CITEfreedom21-security">2</a>], a design withsecurity comparable to Tor's, but its funding model relied on collectingmoney from users to pay relay operators. Modern commercial proxy systemssimilarlyneed to keep collecting money to support their infrastructure. On theother hand, Tor has built a self-sustaining community of volunteers whodonate their time and resources. This community trust is rooted in Tor'sopen design: we tell the world exactly how Tor works, and we provide allthe source code. Users can decide for themselves, or pay any securityexpert to decide, whether it is safe to use. Further, Tor's modularityas described above, along with its open license, mean that its impactwill continue to grow.<div class="p"><!----></div>Sixth, Tor has an established user base of hundreds ofthousands of people from around the world. This diversity ofusers contributes to sustainability as above: Tor is used byordinary citizens, activists, corporations, law enforcement, andeven government and military users,and they canonly achieve their security goals by blending together in the samenetwork [<a href="#econymics" name="CITEeconymics">1</a>,<a href="#usability:weis2006" name="CITEusability:weis2006">9</a>]. This user base also providessomething else: hundreds of thousands of different and often-changingaddresses that we can leverage for our blocking-resistance design.<div class="p"><!----></div>Finally and perhaps most importantly, Tor provides anonymity and prevents anysingle server from linking users to their communication partners.  Despiteinitial appearances, <i>distributed-trust anonymity is critical foranti-censorship efforts</i>.  If any single server can expose dissident bloggersor compile a list of users' behavior, the censors can profitably compromisethat server's operator, perhaps by  applying economic pressure to theiremployers,breaking into their computer, pressuring their family (if they have relativesin the censored area), or so on.  Furthermore, in designs where any relay canexpose its users, the censors can spread suspicion that they are running someof the relays and use this belief to chill use of the network.<div class="p"><!----></div>We discuss and adapt these components further inSection <a href="#sec:bridges">5</a>. But first we examine the strengths andweaknesses of other blocking-resistance approaches, so we can expandour repertoire of building blocks and ideas.<div class="p"><!----></div> <h2><a name="tth_sEc4"><a name="sec:related">4</a>  Current proxy solutions</h2></a><div class="p"><!----></div>Relay-based blocking-resistance schemes generally have two maincomponents: a relay component and a discovery component. The relay partencompasses the process of establishing a connection, sending trafficback and forth, and so on — everything that's done once the user knowswhere she's going to connect. Discovery is the step before that: theprocess of finding one or more usable relays.<div class="p"><!----></div>For example, we can divide the pieces of Tor in the previous sectioninto the process of building paths and sendingtraffic over them (relay) and the process of learning from the directoryservers about what routers are available (discovery).  With this distinctionin mind, we now examine several categories of relay-based schemes.<div class="p"><!----></div>     <h3><a name="tth_sEc4.1">4.1</a>  Centrally-controlled shared proxies</h3><div class="p"><!----></div>Existing commercial anonymity solutions (like Anonymizer.com) are basedon a set of single-hop proxies. In these systems, each user connects toa single proxy, which then relays traffic between the user and herdestination. These public proxysystems are typically characterized by two features: they control andoperate the proxies centrally, and many different users get assignedto each proxy.<div class="p"><!----></div>In terms of the relay component, single proxies provide weak securitycompared to systems that distribute trust over multiple relays, since acompromised proxy can trivially observe all of its users' actions, andan eavesdropper only needs to watch a single proxy to perform timingcorrelation attacks against all its users' traffic and thus learn whereeveryone is connecting. Worse, all usersneed to trust the proxy company to have good security itself as well asto not reveal user activities.<div class="p"><!----></div>On the other hand, single-hop proxies are easier to deploy, and theycan provide better performance than distributed-trust designs like Tor,since traffic only goes through one relay. They're also more convenientfrom the user's perspective — since users entirely trust the proxy,they can just use their web browser directly.<div class="p"><!----></div>Whether public proxy schemes are more or less scalable than Tor isstill up for debate: commercial anonymity systems can use some of theirrevenue to provision more bandwidth as they grow, whereas volunteer-basedanonymity systems can attract thousands of fast relays to spread the load.<div class="p"><!----></div>The discovery piece can take several forms. Most commercial anonymousproxies have one or a handful of commonly known websites, and their userslog in to those websites and relay their traffic through them. Whenthese websites get blocked (generally soon after the company becomespopular), if the company cares about users in the blocked areas, theystart renting lots of disparate IP addresses and rotating through themas they get blocked. They notify their users of new addresses (by email,for example). It's an arms race, since attackers can sign up to receive theemail too, but operators have one nice trick available to them: because theyhave a list of paying subscribers, they can notify certain subscribersabout updates earlier than others.<div class="p"><!----></div>Access control systems on the proxy let them provide service only tousers with certain characteristics, such as paying customers or peoplefrom certain IP address ranges.<div class="p"><!----></div>Discovery in the face of a government-level firewall is a complex andunsolvedtopic, and we're stuck in this same arms race ourselves; we explore itin more detail in Section <a href="#sec:discovery">7</a>. But first we examine theother end of the spectrum — getting volunteers to run the proxies,and telling only a few people about each proxy.<div class="p"><!----></div>     <h3><a name="tth_sEc4.2">4.2</a>  Independent personal proxies</h3><div class="p"><!----></div>Personal proxies such as Circumventor [<a href="#circumventor" name="CITEcircumventor">18</a>] andCGIProxy [<a href="#cgiproxy" name="CITEcgiproxy">23</a>] use the same technology as the public ones asfar as the relay component goes, but they use a different strategy fordiscovery. Rather than managing a few centralized proxies and constantlygetting new addresses for them as the old addresses are blocked, theyaim to have a large number of entirely independent proxies, each managingits own (much smaller) set of users.<div class="p"><!----></div>As the Circumventor site explains, "You don'tactually install the Circumventor <em>on</em> the computer that is blockedfrom accessing Web sites. You, or a friend of yours, has to install theCircumventor on some <em>other</em> machine which is not censored."<div class="p"><!----></div>This tactic has great advantages in terms of blocking-resistance — recallour assumption in Section <a href="#sec:adversary">2</a> that the attentiona system attracts from the attacker is proportional to its number ofusers and level of publicity. If each proxy only has a few users, andthere is no central list of proxies, most of them will never get noticed bythe censors.<div class="p"><!----></div>On the other hand, there's a huge scalability question that so far hasprevented these schemes from being widely useful: how does the fellowin China find a person in Ohio who will run a Circumventor for him? Insome cases he may know and trust some people on the outside, but in manycases he's just out of luck. Just as hard, how does a new volunteer inOhio find a person in China who needs it?<div class="p"><!----></div><div class="p"><!----></div>This challenge leads to a hybrid design-centrally — distributedpersonal proxies — which we will investigate in more detail inSection <a href="#sec:discovery">7</a>.<div class="p"><!----></div>     <h3><a name="tth_sEc4.3">4.3</a>  Open proxies</h3><div class="p"><!----></div>Yet another currently used approach to bypassing firewalls is to locateopen and misconfigured proxies on the Internet. A quick Google searchfor "open proxy list" yields a wide variety of freely available listsof HTTP, HTTPS, and SOCKS proxies. Many small companies have sprung upproviding more refined lists to paying customers.<div class="p"><!----></div>There are some downsides to using these open proxies though. First,the proxies are of widely varying quality in terms of bandwidth andstability, and many of them are entirely unreachable. Second, unlikenetworks of volunteers like Tor, the legality of routing traffic throughthese proxies is questionable: it's widely believed that most of themdon't realize what they're offering, and probably wouldn't allow it ifthey realized. Third, in many cases the connection to the proxy isunencrypted, so firewalls that filter based on keywords in IP packetswill not be hindered. Fourth, in many countries (including China), thefirewall authorities hunt for open proxies as well, to preemptivelyblock them. And last, many users are suspicious that someopen proxies are a little <em>too</em> convenient: are they run by theadversary, in which case they get to monitor all the user's requestsjust as single-hop proxies can?<div class="p"><!----></div>A distributed-trust design like Tor resolves each of these issues forthe relay component, but a constantly changing set of thousands of openrelays is clearly a useful idea for a discovery component. For example,users might be able to make use of these proxies to bootstrap theirfirst introduction into the Tor network.<div class="p"><!----></div>     <h3><a name="tth_sEc4.4">4.4</a>  Blocking resistance and JAP</h3><div class="p"><!----></div>Köpsell and Hilling's Blocking Resistancedesign [<a href="#koepsell:wpes2004" name="CITEkoepsell:wpes2004">20</a>] is probablythe closest related work, and is the starting point for the design in thispaper.  In this design, the JAP anonymity system [<a href="#web-mix" name="CITEweb-mix">3</a>] is usedas a base instead of Tor.  Volunteers operate a large number of accesspoints that relay traffic to the core JAPnetwork, which in turn anonymizes users' traffic.  The software to run theserelays is, as in our design, included in the JAP client software and enabledonly when the user decides to enable it.  Discovery is handled with aCAPTCHA-based mechanism; users prove that they aren't an automated process,and are given the address of an access point.  (The problem of a determinedattacker with enough manpower to launch many requests and enumerate all theaccess points is not considered in depth.)  There is also some suggestionthat information about access points could spread through existing socialnetworks.<div class="p"><!----></div>     <h3><a name="tth_sEc4.5">4.5</a>  Infranet</h3><div class="p"><!----></div>The Infranet design [<a href="#infranet" name="CITEinfranet">14</a>] uses one-hop relays to deliver webcontent, but disguises its communications as ordinary HTTP traffic.  Requestsare split into multiple requests for URLs on the relay, which then encodesits responses in the content it returns.  The relay needs to be an actualwebsite with plausible content and a number of URLs which the user might wantto access — if the Infranet software produced its own cover content, it wouldbe far easier for censors to identify.  To keep the censors from noticingthat cover content changes depending on what data is embedded, Infranet needsthe cover content to have an innocuous reason for changing frequently: thepaper recommends watermarked images and webcams.<div class="p"><!----></div>The attacker and relay operators in Infranet's threat model are significantlydifferent than in ours.  Unlike our attacker, Infranet's censor can't bebypassed with encrypted traffic (presumably because the censor blocksencrypted traffic, or at least considers it suspicious), and has morecomputational resources to devote to each connection than ours (so it cannotice subtle patterns over time).  Unlike our bridge operators, Infranet'soperators (and users) have more bandwidth to spare; the overhead in typicalsteganography schemes is far higher than Tor's.<div class="p"><!----></div>The Infranet design does not include a discovery element.  Discovery,however, is a critical point: if whatever mechanism allows users to learnabout relays also allows the censor to do so, he can trivially discover andblock their addresses, even if the steganography would prevent mere trafficobservation from revealing the relays' addresses.<div class="p"><!----></div>     <h3><a name="tth_sEc4.6">4.6</a>  RST-evasion and other packet-level tricks</h3><div class="p"><!----></div>In their analysis of China's firewall's content-based blocking, Clayton,Murdoch and Watson discovered that rather than blocking all packets in a TCPstreams once a forbidden word was noticed, the firewall was simply forgingRST packets to make the communicating parties believe that the connection wasclosed [<a href="#clayton:pet2006" name="CITEclayton:pet2006">7</a>]. They proposed altering operating systemsto ignore forged RST packets. This approach might work in some cases, butin practice it appears that many firewalls start filtering by IP addressonce a sufficient number of RST packets have been sent.<div class="p"><!----></div>Other packet-level responses to filtering include splittingsensitive words across multiple TCP packets, so that the censors'firewalls can't notice them without performing expensive streamreconstruction [<a href="#ptacek98insertion" name="CITEptacek98insertion">27</a>]. This technique relies on thesame insight as our weak steganography assumption.<div class="p"><!----></div>     <h3><a name="tth_sEc4.7">4.7</a>  Internal caching networks</h3><div class="p"><!----></div>Freenet [<a href="#freenet-pets00" name="CITEfreenet-pets00">6</a>] is an anonymous peer-to-peer data store.Analyzing Freenet's security can be difficult, as its design is in flux asnew discovery and routing mechanisms are proposed, and no completespecification has (to our knowledge) been written.  Freenet servers relayrequests for specific content (indexed by a digest of the content)"toward" the server that hosts it, and then cache the content as itfollows the same path back tothe requesting user.  If Freenet's routing mechanism is successful inallowing nodes to learn about each other and route correctly even as somenode-to-node links are blocked by firewalls, then users inside censored areascan ask a local Freenet server for a piece of content, and get an answerwithout having to connect out of the country at all.  Of course, operators ofservers inside the censored area can still be targeted, and the addresses ofexternal servers can still be blocked.<div class="p"><!----></div>     <h3><a name="tth_sEc4.8">4.8</a>  Skype</h3><div class="p"><!----></div>The popular Skype voice-over-IP software uses multiple techniques to toleraterestrictive networks, some of which allow it to continue operating in thepresence of censorship.  By switching ports and using encryption, Skypeattempts to resist trivial blocking and content filtering.  Even if noencryption were used, it would still be expensive to scan all voicetraffic for sensitive words.  Also, most current keyloggers are unable tostore voice traffic.  Nevertheless, Skype can still be blocked, especially atits central login server.<div class="p"><!----></div>     <h3><a name="tth_sEc4.9">4.9</a>  Tor itself</h3><div class="p"><!----></div>And last, we include Tor itself in the list of current solutionsto firewalls. Tens of thousands of people use Tor from countries thatroutinely filter their Internet. Tor's website has been blocked in mostof them. But why hasn't the Tor network been blocked yet?<div class="p"><!----></div>We have several theories. The first is the most straightforward: tens ofthousands of people are simply too few to matter. It may help that Tor isperceived to be for experts only, and thus not worth attention yet. Themore subtle variant on this theory is that we've positioned Tor in thepublic eye as a tool for retaining civil liberties in more free countries,so perhaps blocking authorities don't view it as a threat. (We revisitthis idea when we consider whether and how to publicize a Tor variantthat improves blocking-resistance — see Section <a href="#subsec:publicity">9.5</a>for more discussion.)<div class="p"><!----></div>The broader explanation is that the maintenance of most government-levelfilters is aimed at stopping widespread information flow and appearing to bein control, not by the impossible goal of blocking all possible ways to bypasscensorship. Censors realize that there will alwaysbe ways for a few people to get around the firewall, and as long as Torhas not publically threatened their control, they see no urgent need toblock it yet.<div class="p"><!----></div>We should recognize that we're <em>already</em> in the arms race. Theseconstraints can give us insight into the priorities and capabilities ofour various attackers.<div class="p"><!----></div> <h2><a name="tth_sEc5"><a name="sec:bridges">5</a>  The relay component of our blocking-resistant design</h2></a><div class="p"><!----></div>Section <a href="#sec:current-tor">3</a> describes many reasons why Tor iswell-suited as a building block in our context, but several changes willallow the design to resist blocking better. The most critical changes areto get more relay addresses, and to distribute them to users differently.<div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc5.1">5.1</a>  Bridge relays</h3><div class="p"><!----></div>Today, Tor servers operate on less than a thousand distinct IP addresses;an adversarycould enumerate and block them all with little trouble.  To provide ameans of ingress to the network, we need a larger set of entry points, mostof which an adversary won't be able to enumerate easily.  Fortunately, wehave such a set: the Tor users.<div class="p"><!----></div>Hundreds of thousands of people around the world use Tor. We can leverageour already self-selected user base to produce a list of thousands offrequently-changing IP addresses. Specifically, we can give them a littlebutton in the GUI that says "Tor for Freedom", and users who clickthe button will turn into <em>bridge relays</em> (or just <em>bridges</em>for short). They can rate limit relayed connections to 10 KB/s (almostnothing for a broadband user in a free country, but plenty for a userwho otherwise has no access at all), and since they are just relayingbytes back and forth between blocked users and the main Tor network, theywon't need to make any external connections to Internet sites. Becauseof this separation of roles, and because we're making use of softwarethat the volunteers have already installed for their own use, we expectour scheme to attract and maintain more volunteers than previous schemes.<div class="p"><!----></div>As usual, there are new anonymity and security implications from running abridge relay, particularly from letting people relay traffic through yourTor client; but we leave this discussion for Section <a href="#sec:security">8</a>.<div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc5.2">5.2</a>  The bridge directory authority</h3><div class="p"><!----></div>How do the bridge relays advertise their existence to the world? Weintroduce a second new component of the design: a specialized directoryauthority that aggregates and tracks bridges. Bridge relays periodicallypublish server descriptors (summaries of their keys, locations, etc,signed by their long-term identity key), just like the relays in the"main" Tor network, but in this case they publish them only to thebridge directory authorities.<div class="p"><!----></div>The main difference between bridge authorities and the directoryauthorities for the main Tor network is that the main authorities providea list of every known relay, but the bridge authorities only giveout a server descriptor if you already know its identity key. That is,you can keep up-to-date on a bridge's location and other informationonce you know about it, but you can't just grab a list of all the bridges.<div class="p"><!----></div>The identity key, IP address, and directory port for each bridgeauthority ship by default with the Tor software, so the bridge relayscan be confident they're publishing to the right location, and theblocked users can establish an encrypted authenticated channel. SeeSection <a href="#subsec:trust-chain">8.5</a> for more discussion of the public keyinfrastructure and trust chain.<div class="p"><!----></div>Bridges use Tor to publish their descriptors privately and securely,so even an attacker monitoring the bridge directory authority's networkcan't make a list of all the addresses contacting the authority.Bridges may publish to only a subset of theauthorities, to limit the potential impact of an authority compromise.<div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc5.3"><a name="subsec:relay-together">5.3</a>  Putting them together</h3></a><div class="p"><!----></div>If a blocked user knows the identity keys of a set of bridge relays, andhe has correct address information for at least one of them, he can usethat one to make a secure connection to the bridge authority and updatehis knowledge about the other bridge relays. He can also use it to makesecure connections to the main Tor network and directory servers, so hecan build circuits and connect to the rest of the Internet. All of theseupdates happen in the background: from the blocked user's perspective,he just accesses the Internet via his Tor client like always.<div class="p"><!----></div>So now we've reduced the problem from how to circumvent the firewallfor all transactions (and how to know that the pages you get have notbeen modified by the local attacker) to how to learn about a workingbridge relay.<div class="p"><!----></div>There's another catch though. We need to make sure that the networktraffic we generate by simply connecting to a bridge relay doesn't standout too much.<div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div> <h2><a name="tth_sEc6"><a name="sec:network-fingerprint"><a name="subsec:enclave-dirs">6</a>  Hiding Tor's network fingerprint</h2></a></a><div class="p"><!----></div>Currently, Tor uses two protocols for its network communications. Themain protocol uses TLS for encrypted and authenticated communicationbetween Tor instances. The second protocol is standard HTTP, used forfetching directory information. All Tor servers listen on their "ORPort"for TLS connections, and some of them opt to listen on their "DirPort"as well, to serve directory information. Tor servers choose whatever portnumbers they like; the server descriptor they publish to the directorytells users where to connect.<div class="p"><!----></div>One format for communicating address information about a bridge relay isits IP address and DirPort. From there, the user can ask the bridge'sdirectory cache for an up-to-date copy of its server descriptor, andlearn its current circuit keys, its ORPort, and so on.<div class="p"><!----></div>However, connecting directly to the directory cache involves a plaintextHTTP request. A censor could create a network fingerprint (known as a<em>signature</em> in the intrusion detection field) for the requestand/or its response, thus preventing these connections. To resolve thisvulnerability, we've modified the Tor protocol so that users can connectto the directory cache via the main Tor port — they establish a TLSconnection with the bridge as normal, and then send a special "begindir"relay command to establish an internal connection to its directory cache.<div class="p"><!----></div>Therefore a better way to summarize a bridge's address is by its IPaddress and ORPort, so all communications between the client and thebridge will use ordinary TLS. But there are other details that needmore investigation.<div class="p"><!----></div>What port should bridges pick for their ORPort? We currently recommendthat they listen on port 443 (the default HTTPS port) if they want tobe most useful, because clients behind standard firewalls will havethe best chance to reach them. Is this the best choice in all cases,or should we encourage some fraction of them pick random ports, or otherports commonly permitted through firewalls like 53 (DNS) or 110(POP)?  Or perhaps we should use other ports where TLS traffic isexpected, like 993 (IMAPS) or 995 (POP3S).  We need more research on ourpotential users, and their current and anticipated firewall restrictions.<div class="p"><!----></div>Furthermore, we need to look at the specifics of Tor's TLS handshake.Right now Tor uses some predictable strings in its TLS handshakes. Forexample, it sets the X.509 organizationName field to "Tor", and it putsthe Tor server's nickname in the certificate's commonName field. Weshould tweak the handshake protocol so it doesn't rely on any unusual detailsin the certificate, yet it remains secure; the certificate itselfshould be made to resemble an ordinary HTTPS certificate.  We should also tryto make our advertised cipher-suites closer to what an ordinary web serverwould support.<div class="p"><!----></div>Tor's TLS handshake uses two-certificate chains: one certificatecontains the self-signed identity key forthe router, and the second contains a current TLS key, signed by theidentity key. We use these to authenticate that we're talking to the rightrouter, and to limit the impact of TLS-key exposure.  Most (though far fromall) consumer-oriented HTTPS services provide only a single certificate.These extra certificates may help identify Tor's TLS handshake; instead,bridges should consider using only a single TLS key certificate signed bytheir identity key, and providing the full value of the identity key in anearly handshake cell.  More significantly, Tor currently has all clientspresent certificates, so that clients are harder to distinguish from servers.But in a blocking-resistance environment, clients should not presentcertificates at all.<div class="p"><!----></div>Last, what if the adversary starts observing the network traffic evenmore closely? Even if our TLS handshake looks innocent, our traffic timingand volume still look different than a user making a secure web connectionto his bank. The same techniques used in the growing trend to build toolsto recognize encrypted Bittorrent trafficcould be used to identify Tor communication and recognize bridgerelays. Rather than trying to look like encrypted web traffic, we may bebetter off trying to blend with some other encrypted network protocol. Thefirst step is to compare typical network behavior for a Tor client totypical network behavior for various other protocols. This statisticalcat-and-mouse game is made more complex by the fact that Tor transports avariety of protocols, and we'll want to automatically handle web browsingdifferently from, say, instant messaging.<div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc6.1"><a name="subsec:id-address">6.1</a>  Identity keys as part of addressing information</h3></a><div class="p"><!----></div>We have described a way for the blocked user to bootstrap into thenetwork once he knows the IP address and ORPort of a bridge. What aboutlocal spoofing attacks? That is, since we never learned an identitykey fingerprint for the bridge, a local attacker could intercept ourconnection and pretend to be the bridge we had in mind. It turns outthat giving false information isn't that bad — since the Tor clientships with trusted keys for the bridge directory authority and the Tornetwork directory authorities, the user can learn whether he's beinggiven a real connection to the bridge authorities or not. (After all,if the adversary intercepts every connection the user makes and giveshim a bad connection each time, there's nothing we can do.)<div class="p"><!----></div>What about anonymity-breaking attacks from observing traffic, if theblocked user doesn't start out knowing the identity key of his intendedbridge? The vulnerabilities aren't so bad in this case either — theadversary could do similar attacks just by monitoring the networktraffic.<div class="p"><!----></div>Once the Tor client has fetched the bridge's server descriptor, it shouldremember the identity key fingerprint for that bridge relay. Thus ifthe bridge relay moves to a new IP address, the client can query thebridge directory authority to look up a fresh server descriptor usingthis fingerprint.<div class="p"><!----></div>So we've shown that it's <em>possible</em> to bootstrap into the networkjust by learning the IP address and ORPort of a bridge, but are theresituations where it's more convenient or more secure to learn the bridge'sidentity fingerprint as well as instead, while bootstrapping? We keepthat question in mind as we next investigate bootstrapping and discovery.<div class="p"><!----></div> <h2><a name="tth_sEc7"><a name="sec:discovery">7</a>  Discovering working bridge relays</h2></a><div class="p"><!----></div>Tor's modular design means that we can develop a better relay componentindependently of developing the discovery component. This modularity'sgreat promise is that we can pick any discovery approach we like; but theunfortunate fact is that we have no magic bullet for discovery. We'rein the same arms race as all the other designs we described inSection <a href="#sec:related">4</a>.<div class="p"><!----></div>In this section we describe a variety of approaches to adding discoverycomponents for our design.<div class="p"><!----></div>     <h3><a name="tth_sEc7.1"><a name="subsec:first-bridge">7.1</a>  Bootstrapping: finding your first bridge.</h3></a><div class="p"><!----></div>In Section <a href="#subsec:relay-together">5.3</a>, we showed that a user who knowsa working bridge address can use it to reach the bridge authority andto stay connected to the Tor network. But how do new users reach thebridge authority in the first place? After all, the bridge authoritywill be one of the first addresses that a censor blocks.<div class="p"><!----></div>First, we should recognize that most government firewalls are notperfect. That is, they may allow connections to Google cache or someopen proxy servers, or they let file-sharing traffic, Skype, instantmessaging, or World-of-Warcraft connections through. Different users willhave different mechanisms for bypassing the firewall initially. Second,we should remember that most people don't operate in a vacuum; users willhopefully know other people who are in other situations or have otherresources available. In the rest of this section we develop a toolkitof different options and mechanisms, so that we can enable users in adiverse set of contexts to bootstrap into the system.<div class="p"><!----></div>(For users who can't use any of these techniques, hopefully they knowa friend who can — for example, perhaps the friend already knows somebridge relay addresses. If they can't get around it at all, then wecan't help them — they should go meet more people or learn more aboutthe technology running the firewall in their area.)<div class="p"><!----></div>By deploying all the schemes in the toolkit at once, we let bridges andblocked users employ the discovery approach that is most appropriatefor their situation.<div class="p"><!----></div>     <h3><a name="tth_sEc7.2">7.2</a>  Independent bridges, no central discovery</h3><div class="p"><!----></div>The first design is simply to have no centralized discovery component atall. Volunteers run bridges, and we assume they have some blocked usersin mind and communicate their address information to them out-of-band(for example, through Gmail). This design allows for small personalbridges that have only one or a handful of users in mind, but it canalso support an entire community of users. For example, Citizen Lab'supcoming Psiphon single-hop proxy tool [<a href="#psiphon" name="CITEpsiphon">13</a>] plans to use this<em>social network</em> approach as its discovery component.<div class="p"><!----></div>There are several ways to do bootstrapping in this design. In the simplecase, the operator of the bridge informs each chosen user about hisbridge's address information and/or keys. A different approach involvesblocked users introducing new blocked users to the bridges they know.That is, somebody in the blocked area can pass along a bridge's address tosomebody else they trust. This scheme brings in appealing but complex gametheoretic properties: the blocked user making the decision has an incentiveonly to delegate to trustworthy people, since an adversary who learnsthe bridge's address and filters it makes it unavailable for both of them.Also, delegating known bridges to members of your social network can bedangerous: an the adversary who can learn who knows which bridges maybe able to reconstruct the social network.<div class="p"><!----></div>Note that a central set of bridge directory authorities can still becompatible with a decentralized discovery process. That is, how usersfirst learn about bridges is entirely up to the bridges, but the processof fetching up-to-date descriptors for them can still proceed as describedin Section <a href="#sec:bridges">5</a>. Of course, creating a central place thatknows about all the bridges may not be smart, especially if every otherpiece of the system is decentralized. Further, if a user only knowsabout one bridge and he loses track of it, it may be quite a hassle toreach the bridge authority. We address these concerns next.<div class="p"><!----></div>     <h3><a name="tth_sEc7.3">7.3</a>  Families of bridges, no central discovery</h3><div class="p"><!----></div>Because the blocked users are running our software too, we have manyopportunities to improve usability or robustness. Our second design buildson the first by encouraging volunteers to run several bridges at once(or coordinate with other bridge volunteers), such that someof the bridges are likely to be available at any given time.<div class="p"><!----></div>The blocked user's Tor client would periodically fetch an updated set ofrecommended bridges from any of the working bridges. Now the client canlearn new additions to the bridge pool, and can expire abandoned bridgesor bridges that the adversary has blocked, without the user ever needingto care. To simplify maintenance of the community's bridge pool, eachcommunity could run its own bridge directory authority — reachable viathe available bridges, and also mirrored at each bridge.<div class="p"><!----></div>     <h3><a name="tth_sEc7.4">7.4</a>  Public bridges with central discovery</h3><div class="p"><!----></div>What about people who want to volunteer as bridges but don't know anysuitable blocked users? What about people who are blocked but don'tknow anybody on the outside? Here we describe how to make use of these<em>public bridges</em> in a way that still makes it hard for the attackerto learn all of them.<div class="p"><!----></div>The basic idea is to divide public bridges into a set of pools based onidentity key. Each pool corresponds to a <em>distribution strategy</em>:an approach to distributing its bridge addresses to users. Each strategyis designed to exercise a different scarce resource or property ofthe user.<div class="p"><!----></div>How do we divide bridges between these strategy pools such that they'reevenly distributed and the allocation is hard to influence or predict,but also in a way that's amenable to creating more strategies lateron without reshuffling all the pools? We assign a given bridgeto a strategy pool by hashing the bridge's identity key along with asecret that only the bridge authority knows: the first n bits of thishash dictate the strategy pool number, where n is a parameter thatdescribes how many strategy pools we want at this point. We choose n=3to start, so we divide bridges between 8 pools; but as we later inventnew distribution strategies, we can increment n to split the 8 into16. Since a bridge can't predict the next bit in its hash, it can'tanticipate which identity key will correspond to a certain new poolwhen the pools are split. Further, since the bridge authority doesn'tprovide any feedback to the bridge about which strategy pool it's in,an adversary who signs up bridges with the goal of filling a certainpool [<a href="#casc-rep" name="CITEcasc-rep">12</a>] will be hindered.<div class="p"><!----></div><div class="p"><!----></div>The first distribution strategy (used for the first pool) publishes bridgeaddresses in a time-release fashion. The bridge authority divides theavailable bridges into partitions, and each partition is deterministicallyavailable only in certain time windows. That is, over the course of agiven time slot (say, an hour), each requester is given a random bridgefrom within that partition. When the next time slot arrives, a new setof bridges from the pool are available for discovery. Thus some bridgeaddress is always available when a newuser arrives, but to learn about all bridges the attacker needs to fetchall new addresses at every new time slot. By varying the length of thetime slots, we can make it harder for the attacker to guess when to checkback. We expect these bridges will be the first to be blocked, but they'llhelp the system bootstrap until they <em>do</em> get blocked. Further,remember that we're dealing with different blocking regimes around theworld that will progress at different rates — so this pool will stillbe useful to some users even as the arms races progress.<div class="p"><!----></div>The second distribution strategy publishes bridge addresses based on the IPaddress of the requesting user. Specifically, the bridge authority willdivide the available bridges in the pool into a bunch of partitions(as in the first distribution scheme), hash the requester's IP addresswith a secret of its own (as in the above allocation scheme for creatingpools), and give the requester a random bridge from the appropriatepartition. To raise the bar, we should discard the last octet of theIP address before inputting it to the hash function, so an attackerwho only controls a single "/24" network only counts as one user. Alarge attacker like China will still be able to control many addresses,but the hassle of establishing connections from each network (or spoofingTCP connections) may still slow them down. Similarly, as a special case,we should treat IP addresses that are Tor exit nodes as all being onthe same network.<div class="p"><!----></div>The third strategy combines the time-based and location-basedstrategies to further constrain and rate-limit the available bridgeaddresses. Specifically, the bridge address provided in a given timeslot to a given network location is deterministic within the partition,rather than chosen randomly each time from the partition. Thus, repeatedrequests during that time slot from a given network are given the samebridge address as the first request.<div class="p"><!----></div>The fourth strategy is based on Circumventor's discovery strategy.The Circumventor project, realizing that its adoption will remain limitedif it has no central coordination mechanism, has started a mailing list todistribute new proxy addresses every few days. From experimentation itseems they have concluded that sending updates every three or four daysis sufficient to stay ahead of the current attackers.<div class="p"><!----></div>The fifth strategy provides an alternative approach to a mailing list:users provide an email address and receive an automated responselisting an available bridge address. We could limit one response peremail address. To further rate limit queries, we could require a CAPTCHAsolutionin each case too. In fact, we wouldn't need toimplement the CAPTCHA on our side: if we only deliver bridge addressesto Yahoo or GMail addresses, we can leverage the rate-limiting schemesthat other parties already impose for account creation.<div class="p"><!----></div>The sixth strategy ties in the social network design with publicbridges and a reputation system. We pick some seeds — trusted people inblocked areas — and give them each a few dozen bridge addresses and a few<em>delegation tokens</em>. We run a website next to the bridge authority,where users can log in (they connect via Tor, and they don't need toprovide actual identities, just persistent pseudonyms). Users can delegatetrust to other people they know by giving them a token, which can beexchanged for a new account on the website. Accounts in "good standing"then accrue new bridge addresses and new tokens. As usual, reputationschemes bring in a host of new complexities [<a href="#rep-anon" name="CITErep-anon">10</a>]: how do wedecide that an account is in good standing? We could tie reputationto whether the bridges they're told about have been blocked — seeSection <a href="#subsec:geoip">7.7</a> below for initial thoughts on how to discoverwhether bridges have been blocked. We could track reputation betweenaccounts (if you delegate to somebody who screws up, it impacts you too),or we could use blinded delegation tokens [<a href="#chaum-blind" name="CITEchaum-blind">5</a>] to preventthe website from mapping the seeds' social network. We put off deeperdiscussion of the social network reputation strategy for future work.<div class="p"><!----></div>Pools seven and eight are held in reserve, in case our currently deployedtricks all fail at once and the adversary blocks all those bridges — sowe can adapt and move to new approaches quickly, and have some bridgesimmediately available for the new schemes. New strategies might be basedon some other scarce resource, such as relaying traffic for others orother proof of energy spent. (We might also worry about the incentivesfor bridges that sign up and get allocated to the reserve pools: will theybe unhappy that they're not being used? But this is a transient problem:if Tor users are bridges by default, nobody will mind not being used yet.See also Section <a href="#subsec:incentives">9.4</a>.)<div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc7.5">7.5</a>  Public bridges with coordinated discovery</h3><div class="p"><!----></div>We presented the above discovery strategies in the context of a singlebridge directory authority, but in practice we will want to distribute theoperations over several bridge authorities — a single point of failureor attack is a bad move. The first answer is to run several independentbridge directory authorities, and bridges gravitate to one based ontheir identity key. The better answer would be some federation of bridgeauthorities that work together to provide redundancy but don't introducenew security issues. We could even imagine designs where the bridgeauthorities have encrypted versions of the bridge's server descriptors,and the users learn a decryption key that they keep private when theyfirst hear about the bridge — this way the bridge authorities would notbe able to learn the IP address of the bridges.<div class="p"><!----></div>We leave this design question for future work.<div class="p"><!----></div>     <h3><a name="tth_sEc7.6">7.6</a>  Assessing whether bridges are useful</h3><div class="p"><!----></div>Learning whether a bridge is useful is important in the bridge authority'sdecision to include it in responses to blocked users. For example, ifwe end up with a list of thousands of bridges and only a few dozen ofthem are reachable right now, most blocked users will not end up knowingabout working bridges.<div class="p"><!----></div>There are three components for assessing how useful a bridge is. First,is it reachable from the public Internet? Second, what proportion ofthe time is it available? Third, is it blocked in certain jurisdictions?<div class="p"><!----></div>The first component can be tested just as we test reachability ofordinary Tor servers. Specifically, the bridges do a self-test — connectto themselves via the Tor network — before they are willing topublish their descriptor, to make sure they're not obviously broken ormisconfigured. Once the bridges publish, the bridge authority also testsreachability to make sure they're not confused or outright lying.<div class="p"><!----></div>The second component can be measured and tracked by the bridge authority.By doing periodic reachability tests, we can get a sense of how often thebridge is available. More complex tests will involve bandwidth-intensivechecks to force the bridge to commit resources in order to be counted asavailable. We need to evaluate how the relationship of uptime percentageshould weigh into our choice of which bridges to advertise. We leavethis to future work.<div class="p"><!----></div>The third component is perhaps the trickiest: with many differentadversaries out there, how do we keep track of which adversaries haveblocked which bridges, and how do we learn about new blocks as theyoccur? We examine this problem next.<div class="p"><!----></div>     <h3><a name="tth_sEc7.7"><a name="subsec:geoip">7.7</a>  How do we know if a bridge relay has been blocked?</h3></a><div class="p"><!----></div>There are two main mechanisms for testing whether bridges are reachablefrom inside each blocked area: active testing via users, and passivetesting via bridges.<div class="p"><!----></div>In the case of active testing, certain users inside each areasign up as testing relays. The bridge authorities can then use aBlossom-like [<a href="#blossom-thesis" name="CITEblossom-thesis">16</a>] system to build circuits through themto each bridge and see if it can establish the connection. But how dowe pick the users? If we ask random users to do the testing (or if wesolicit volunteers from the users), the adversary should sign up so hecan enumerate the bridges we test. Indeed, even if we hand-select ourtesters, the adversary might still discover their location and monitortheir network activity to learn bridge addresses.<div class="p"><!----></div>Another answer is not to measure directly, but rather let the bridgesreport whether they're being used.Specifically, bridges should install a GeoIP database such as the publicIP-To-Country list [<a href="#ip-to-country" name="CITEip-to-country">19</a>], and then periodically report to thebridge authorities which countries they're seeing use from. This datawould help us track which countries are making use of the bridge design,and can also let us learn about new steps the adversary has taken inthe arms race. (The compressed GeoIP database is only several hundredkilobytes, and we could even automate the update process by serving itfrom the bridge authorities.)More analysis of this passive reachabilitytesting design is needed to resolve its many edge cases: for example,if a bridge stops seeing use from a certain area, does that mean thebridge is blocked or does that mean those users are asleep?<div class="p"><!----></div>There are many more problems with the general concept of detecting whetherbridges are blocked. First, different zones of the Internet are blockedin different ways, and the actual firewall jurisdictions do not matchcountry borders. Our bridge scheme could help us map out the topologyof the censored Internet, but this is a huge task. More generally,if a bridge relay isn't reachable, is that because of a network blocksomewhere, because of a problem at the bridge relay, or just a temporaryoutage somewhere in between? And last, an attacker could poison ourbridge database by signing up already-blocked bridges. In this case,if we're stingy giving out bridge addresses, users in that country won'tlearn working bridges.<div class="p"><!----></div>All of these issues are made more complex when we try to integrate thistesting into our social network reputation system above.Since in that case we punish or reward users based on whether bridgesget blocked, the adversary has new attacks to trick or bog down thereputation tracking. Indeed, the bridge authority doesn't even knowwhat zone the blocked user is in, so do we blame him for any possiblecensored zone, or what?<div class="p"><!----></div>Clearly more analysis is required. The eventual solution will probablyinvolve a combination of passive measurement via GeoIP and activemeasurement from trusted testers.  More generally, we can use the passivefeedback mechanism to track usage of the bridge network as a whole — whichwould let us respond to attacks and adapt the design, and it would alsolet the general public track the progress of the project.<div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc7.8">7.8</a>  Advantages of deploying all solutions at once</h3><div class="p"><!----></div>For once, we're not in the position of the defender: we don't have todefend against every possible filtering scheme; we just have to defendagainst at least one. On the flip side, the attacker is forced to guesshow to allocate his resources to defend against each of these discoverystrategies. So by deploying all of our strategies at once, we not onlyincrease our chances of finding one that the adversary has difficultyblocking, but we actually make <em>all</em> of the strategies more robustin the face of an adversary with limited resources.<div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div> <h2><a name="tth_sEc8"><a name="sec:security">8</a>  Security considerations</h2></a><div class="p"><!----></div>     <h3><a name="tth_sEc8.1">8.1</a>  Possession of Tor in oppressed areas</h3><div class="p"><!----></div>Many people speculate that installing and using a Tor client in areas withparticularly extreme firewalls is a high risk — and the risk increasesas the firewall gets more restrictive. This notion certain has merit, butthere'sa counter pressure as well: as the firewall gets more restrictive, moreordinary people behind it end up using Tor for more mainstream activities,such as learningabout Wall Street prices or looking at pictures of women's ankles. Soas the restrictive firewall pushes up the number of Tor users, the"typical" Tor user becomes more mainstream, and therefore mereuse or possession of the Tor software is not so surprising.<div class="p"><!----></div>It's hard to say which of these pressures will ultimately win out,but we should keep both sides of the issue in mind.<div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc8.2"><a name="subsec:upload-padding">8.2</a>  Observers can tell who is publishing and who is reading</h3></a><div class="p"><!----></div>Tor encrypts traffic on the local network, and it obscures the eventualdestination of the communication, but it doesn't do much to obscure thetraffic volume. In particular, a user publishing a home video will have adifferent network fingerprint than a user reading an online news article.Based on our assumption in Section <a href="#sec:adversary">2</a> that users whopublish material are in more danger, should we work to improve Tor'ssecurity in this situation?<div class="p"><!----></div>In the general case this is an extremely challenging task:effective <em>end-to-end traffic confirmation attacks</em>are known where the adversary observes the origin and thedestination of traffic and confirms that they are part of thesame communication [<a href="#danezis:pet2004" name="CITEdanezis:pet2004">8</a>,<a href="#e2e-traffic" name="CITEe2e-traffic">24</a>]. Related are<em>website fingerprinting attacks</em>, where the adversary downloadsa few hundred popular websites, makes a set of "fingerprints" for eachsite, and then observes the target Tor client's traffic to look fora match [<a href="#pet05-bissias" name="CITEpet05-bissias">4</a>,<a href="#defensive-dropping" name="CITEdefensive-dropping">21</a>]. But can we do betteragainst a limited adversary who just does coarse-grained sweeps lookingfor unusually prolific publishers?<div class="p"><!----></div>One answer is for bridge users to automatically send bursts of paddingtraffic periodically. (This traffic can be implemented in terms oflong-range drop cells, which are already part of the Tor specification.)Of course, convincingly simulating an actual human publishing interestingcontent is a difficult arms race, but it may be worthwhile to at leaststart the race. More research remains.<div class="p"><!----></div>     <h3><a name="tth_sEc8.3">8.3</a>  Anonymity effects from acting as a bridge relay</h3><div class="p"><!----></div>Against some attacks, relaying traffic for others can improveanonymity. The simplest example is an attacker who owns a small numberof Tor servers. He will see a connection from the bridge, but he won'tbe able to know whether the connection originated there or was relayedfrom somebody else. More generally, the mere uncertainty of whether thetraffic originated from that user may be helpful.<div class="p"><!----></div>There are some cases where it doesn't seem to help: if an attacker canwatch all of the bridge's incoming and outgoing traffic, then it's easyto learn which connections were relayed and which started there. (In thiscase he still doesn't know the final destinations unless he is watchingthem too, but in this case bridges are no better off than if they werean ordinary client.)<div class="p"><!----></div>There are also some potential downsides to running a bridge. First, whilewe try to make it hard to enumerate all bridges, it's still possible tolearn about some of them, and for some people just the fact that they'rerunning one might signal to an attacker that they place a higher valueon their anonymity. Second, there are some more esoteric attacks on Torrelays that are not as well-understood or well-tested — for example, anattacker may be able to "observe" whether the bridge is sending trafficeven if he can't actually watch its network, by relaying traffic throughit and noticing changes in traffic timing [<a href="#attack-tor-oak05" name="CITEattack-tor-oak05">25</a>]. Onthe other hand, it may be that limiting the bandwidth the bridge iswilling to relay will allow this sort of attacker to determine if it'sbeing used as a bridge but not easily learn whether it is adding trafficof its own.<div class="p"><!----></div>We also need to examine how entry guards fit in. Entry guards(a small set of nodes that are always used for the firststep in a circuit) help protect against certain attackswhere the attacker runs a few Tor servers and waits forthe user to choose these servers as the beginning and end of hercircuit<a href="#tthFtNtAAC" name="tthFrefAAC"><sup>2</sup></a>.If the blocked user doesn't use the bridge's entry guards, then the bridgedoesn't gain as much cover benefit. On the other hand, what design changesare needed for the blocked user to use the bridge's entry guards withoutlearning what they are (this seems hard), and even if we solve that,do they then need to use the guards' guards and so on down the line?<div class="p"><!----></div>It is an open research question whether the benefits of running a bridgeoutweigh the risks. A lot of the decision rests on which attacks theusers are most worried about. For most users, we don't think running abridge relay will be that damaging, and it could help quite a bit.<div class="p"><!----></div>     <h3><a name="tth_sEc8.4"><a name="subsec:cafes-and-livecds">8.4</a>  Trusting local hardware: Internet cafes and LiveCDs</h3></a><div class="p"><!----></div>Assuming that users have their own trusted hardware is notalways reasonable.<div class="p"><!----></div>For Internet cafe Windows computers that let you attach your own USB key,a USB-based Tor image would be smart. There's Torpark, and hopefullythere will be more thoroughly analyzed and trustworthy options down theroad. Worries remain about hardware or software keyloggers and otherspyware, as well as physical surveillance.<div class="p"><!----></div>If the system lets you boot from a CD or from a USB key, you can gaina bit more security by bringing a privacy LiveCD with you. (Thisapproach isn't foolproof either of course, since hardwarekeyloggers and physical surveillance are still a worry).<div class="p"><!----></div>In fact, LiveCDs are also useful if it's your own hardware, since it'seasier to avoid leaving private data and logs scattered around thesystem.<div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc8.5"><a name="subsec:trust-chain">8.5</a>  The trust chain</h3></a><div class="p"><!----></div>Tor's "public key infrastructure" provides a chain of trust tolet users verify that they're actually talking to the right servers.There are four pieces to this trust chain.<div class="p"><!----></div>First, when Tor clients are establishing circuits, at each stepthey demand that the next Tor server in the path prove knowledge ofits private key [<a href="#tor-design" name="CITEtor-design">11</a>]. This step prevents the first nodein the path from just spoofing the rest of the path. Second, theTor directory authorities provide a signed list of servers along withtheir public keys — so unless the adversary can control a thresholdof directory authorities, he can't trick the Tor client into using otherTor servers. Third, the location and keys of the directory authorities,in turn, is hard-coded in the Tor source code — so as long as the usergot a genuine version of Tor, he can know that he is using the genuineTor network. And last, the source code and other packages are signedwith the GPG keys of the Tor developers, so users can confirm that theydid in fact download a genuine version of Tor.<div class="p"><!----></div>In the case of blocked users contacting bridges and bridge directoryauthorities, the same logic applies in parallel: the blocked users fetchinformation from both the bridge authorities and the directory authoritiesfor the `main' Tor network, and they combine this information locally.<div class="p"><!----></div>How can a user in an oppressed country know that he has the correctkey fingerprints for the developers? As with other security systems, itultimately comes down to human interaction. The keys are signed by dozensof people around the world, and we have to hope that our users have metenough people in the PGP web of trustthat they can learnthe correct keys. For users that aren't connected to the global securitycommunity, though, this question remains a critical weakness.<div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div> <h2><a name="tth_sEc9"><a name="sec:reachability">9</a>  Maintaining reachability</h2></a><div class="p"><!----></div>     <h3><a name="tth_sEc9.1">9.1</a>  How many bridge relays should you know about?</h3><div class="p"><!----></div>The strategies described in Section <a href="#sec:discovery">7</a> talked aboutlearning one bridge address at a time. But if most bridges are ordinaryTor users on cable modem or DSL connection, many of them will disappearand/or move periodically. How many bridge relays should a blocked userknow about so that she is likely to have at least one reachable at anygiven point? This is already a challenging problem if we only considernatural churn: the best approach is to see what bridges we attract inreality and measure their churn. We may also need to factor in a parameterfor how quickly bridges get discovered and blocked by the attacker;we leave this for future work after we have more deployment experience.<div class="p"><!----></div>A related question is: if the bridge relays change IP addressesperiodically, how often does the blocked user need to fetch updates inorder to keep from being cut out of the loop?<div class="p"><!----></div>Once we have more experience and intuition, we should explore technicalsolutions to this problem too. For example, if the discovery strategiesgive out k bridge addresses rather than a single bridge address, perhapswe can improve robustness from the user perspective without significantlyaiding the adversary. Rather than giving out a new random subset of kaddresses at each point, we could bind them together into <em>bridgefamilies</em>, so all users that learn about one member of the bridge familyare told about the rest as well.<div class="p"><!----></div>This scheme may also help defend against attacks to map the set ofbridges. That is, if all blocked users learn a random subset of bridges,the attacker should learn about a few bridges, monitor the country-levelfirewall for connections to them, then watch those users to see whatother bridges they use, and repeat. By segmenting the bridge addressspace, we can limit the exposure of other users.<div class="p"><!----></div>     <h3><a name="tth_sEc9.2"><a name="subsec:block-cable">9.2</a>  Cablemodem users don't usually provide important websites</h3></a><div class="p"><!----></div>Another attacker we might be concerned about is that the attacker couldjust block all DSL and cablemodem network addresses, on the theory thatthey don't run any important services anyway. If most of our bridgesare on these networks, this attack could really hurt.<div class="p"><!----></div>The first answer is to aim to get volunteers both from traditionally"consumer" networks and also from traditionally "producer" networks.Since bridges don't need to be Tor exit nodes, as we improve our usabilityit seems quite feasible to get a lot of websites helping out.<div class="p"><!----></div>The second answer (not as practical) would be to encourage more use ofconsumer networks for popular and useful Internet services.<div class="p"><!----></div>A related attack we might worry about is based on large countries puttingeconomic pressure on companies that want to expand their business. Forexample, what happens if Verizon wants to sell services in China, andChina pressures Verizon to discourage its users in the free world fromrunning bridges?<div class="p"><!----></div>     <h3><a name="tth_sEc9.3">9.3</a>  Scanning resistance: making bridges more subtle</h3><div class="p"><!----></div>If it's trivial to verify that a given address is operating as a bridge,and most bridges run on a predictable port, then it's conceivable ourattacker could scan the whole Internet looking for bridges. (In fact,he can just concentrate on scanning likely networks like cablemodemand DSL services — see Section <a href="#subsec:block-cable">9.2</a>above forrelated attacks.) It would be nice to slow down this attack. It wouldbe even nicer to make it hard to learn whether we're a bridge withoutfirst knowing some secret. We call this general property <em>scanningresistance</em>, and it goes along with normalizing Tor's TLS handshake andnetwork fingerprint.<div class="p"><!----></div>We could provide a password to the blocked user, and she (or her Torclient) provides a nonced hash of this password when she connects. We'dneed to give her an ID key for the bridge too (in addition to the IPaddress and port — see Section <a href="#subsec:id-address">6.1</a>), and wait topresent the password until we've finished the TLS handshake, else itwould look unusual. If Alice can authenticate the bridge before shetries to send her password, we can resist an adversary who pretendsto be the bridge and launches a man-in-the-middle attack to learn thepassword. But even if she can't, we still resist against widespreadscanning.<div class="p"><!----></div>How should the bridge behave if accessed without the correctauthorization? Perhaps it should act like an unconfigured HTTPS server("welcome to the default Apache page"), or maybe it should mirrorand act like common websites, or websites randomly chosen from Google.<div class="p"><!----></div>We might assume that the attacker can recognize HTTPS connections thatuse self-signed certificates. (This process would be resource-intensivebut not out of the realm of possibility.) But even in this case, manypopular websites around the Internet use self-signed or just plain brokenSSL certificates.<div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc9.4"><a name="subsec:incentives">9.4</a>  How to motivate people to run bridge relays</h3></a><div class="p"><!----></div>One of the traditional ways to get people to run software that benefitsothers is to give them motivation to install it themselves.  An oftensuggested approach is to install it as a stunning screensaver so everybodywill be pleased to run it. We take a similar approach here, by leveragingthe fact that these users are already interested in protecting theirown Internet traffic, so they will install and run the software.<div class="p"><!----></div>Eventually, we may be able to make all Tor users become bridges if theypass their self-reachability tests — the software and installers needmore work on usability first, but we're making progress.<div class="p"><!----></div>In the mean time, we can make a snazzy network graph withVidalia<a href="#tthFtNtAAD" name="tthFrefAAD"><sup>3</sup></a> thatemphasizes the connections the bridge user is currently relaying.<div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div>     <h3><a name="tth_sEc9.5"><a name="subsec:publicity">9.5</a>  Publicity attracts attention</h3></a><div class="p"><!----></div>Many people working on this field want to publicize the existenceand extent of censorship concurrently with the deployment of theircircumvention software. The easy reason for this two-pronged push isto attract volunteers for running proxies in their systems; but in manycases their main goal is not to focus on actually allowing individualsto circumvent the firewall, but rather to educate the world about thecensorship. The media also tries to do its part by broadcasting theexistence of each new circumvention system.<div class="p"><!----></div>But at the same time, this publicity attracts the attention of thecensors. We can slow down the arms race by not attracting as muchattention, and just spreading by word of mouth. If our goal is toestablish a solid social network of bridges and bridge users beforethe adversary gets involved, does this extra attention work to ourdisadvantage?<div class="p"><!----></div>     <h3><a name="tth_sEc9.6">9.6</a>  The Tor website: how to get the software</h3><div class="p"><!----></div>One of the first censoring attacks against a system like ours is toblock the website and make the software itself hard to find. Our systemshould work well once the user is running an authenticcopy of Tor and has found a working bridge, but to get to that pointwe rely on their individual skills and ingenuity.<div class="p"><!----></div>Right now, most countries that block access to Tor block only the mainwebsite and leave mirrors and the network itself untouched.Falling back on word-of-mouth is always a good last resort, but we shouldalso take steps to make sure it's relatively easy for users to get a copy,such as publicizing the mirrors more and making copies available throughother media. We might also mirror the latest version of the software oneach bridge, so users who hear about an honest bridge can get a goodcopy.See Section <a href="#subsec:first-bridge">7.1</a> for more discussion.<div class="p"><!----></div><div class="p"><!----></div> <h2><a name="tth_sEc10"><a name="sec:future">10</a>  Future designs</h2></a><div class="p"><!----></div>     <h3><a name="tth_sEc10.1">10.1</a>  Bridges inside the blocked network too</h3><div class="p"><!----></div>Assuming actually crossing the firewall is the risky part of theoperation, can we have some bridge relays inside the blocked area too,and more established users can use them as relays so they don't need tocommunicate over the firewall directly at all? A simple example here isto make new blocked users into internal bridges also — so they sign upon the bridge authority as part of doing their query, and we give outtheir addressesrather than (or along with) the external bridge addresses. This designis a lot trickier because it brings in the complexity of whether theinternal bridges will remain available, can maintain reachability withthe outside world, etc.<div class="p"><!----></div>More complex future designs involve operating a separate Tor networkinside the blocked area, and using <em>hidden service bridges</em> — bridgesthat can be accessed by users of the internal Tor network but whoseaddresses are not published or findable, even by these users — to getfrom inside the firewall to the rest of the Internet. But this designrequires directory authorities to run inside the blocked area too,and they would be a fine target to take down the network.<div class="p"><!----></div><div class="p"><!----></div> <h2><a name="tth_sEc11"><a name="sec:conclusion">11</a>  Next Steps</h2></a><div class="p"><!----></div>Technical solutions won't solve the whole censorship problem. After all,the firewalls in places like China are <em>socially</em> verysuccessful, even if technologies and tricks exist to get around them.However, having a strong technical solution is still necessary as oneimportant piece of the puzzle.<div class="p"><!----></div>In this paper, we have shown that Tor provides a great set of buildingblocks to start from. The next steps are to deploy prototype bridges andbridge authorities, implement some of the proposed discovery strategies,and then observe the system in operation and get more intuition aboutthe actual requirements and adversaries we're up against.<div class="p"><!----></div><h2>References</h2><dl compact="compact"> <dt><a href="#CITEeconymics" name="econymics">[1]</a></dt><dd>Alessandro Acquisti, Roger Dingledine, and Paul Syverson. On the economics of anonymity. In Rebecca N. Wright, editor, <em>Financial Cryptography</em>.  Springer-Verlag, LNCS 2742, 2003.<div class="p"><!----></div></dd> <dt><a href="#CITEfreedom21-security" name="freedom21-security">[2]</a></dt><dd>Adam Back, Ian Goldberg, and Adam Shostack. Freedom systems 2.1 security issues and analysis. White paper, Zero Knowledge Systems, Inc., May 2001.<div class="p"><!----></div></dd> <dt><a href="#CITEweb-mix" name="web-mix">[3]</a></dt><dd>Oliver Berthold, Hannes Federrath, and Stefan Köpsell. Web MIXes: A system for anonymous and unobservable Internet  access. In H. Federrath, editor, <em>Designing Privacy Enhancing  Technologies: Workshop on Design Issue in Anonymity and Unobservability</em>.  Springer-Verlag, LNCS 2009, 2000.<div class="p"><!----></div></dd> <dt><a href="#CITEpet05-bissias" name="pet05-bissias">[4]</a></dt><dd>George Dean Bissias, Marc Liberatore, and Brian Neil Levine. Privacy vulnerabilities in encrypted http streams. In <em>Proceedings of Privacy Enhancing Technologies workshop (PET  2005)</em>, May 2005.  <a href="http://prisms.cs.umass.edu/brian/pubs/bissias.liberatore.pet.2005.pdf"><tt>http://prisms.cs.umass.edu/brian/pubs/bissias.liberatore.pet.2005.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEchaum-blind" name="chaum-blind">[5]</a></dt><dd>David Chaum. Blind signatures for untraceable payments. In D. Chaum, R.L. Rivest, and A.T. Sherman, editors, <em>Advances in  Cryptology: Proceedings of Crypto 82</em>, pages 199-203. Plenum Press, 1983.<div class="p"><!----></div></dd> <dt><a href="#CITEfreenet-pets00" name="freenet-pets00">[6]</a></dt><dd>Ian Clarke, Oskar Sandberg, Brandon Wiley, and Theodore W. Hong. Freenet: A distributed anonymous information storage and retrieval  system. In H. Federrath, editor, <em>Designing Privacy Enhancing  Technologies: Workshop on Design Issue in Anonymity and Unobservability</em>,  pages 46-66. Springer-Verlag, LNCS 2009, July 2000.<div class="p"><!----></div></dd> <dt><a href="#CITEclayton:pet2006" name="clayton:pet2006">[7]</a></dt><dd>Richard Clayton, Steven J. Murdoch, and Robert N. M. Watson. Ignoring the great firewall of china. In <em>Proceedings of the Sixth Workshop on Privacy Enhancing  Technologies (PET 2006)</em>, Cambridge, UK, June 2006. Springer. <a href="http://www.cl.cam.ac.uk/~rnc1/ignoring.pdf"><tt>http://www.cl.cam.ac.uk/~rnc1/ignoring.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEdanezis:pet2004" name="danezis:pet2004">[8]</a></dt><dd>George Danezis. The traffic analysis of continuous-time mixes. In David Martin and Andrei Serjantov, editors, <em>Privacy Enhancing  Technologies (PET 2004)</em>, LNCS, May 2004. <a href="http://www.cl.cam.ac.uk/users/gd216/cmm2.pdf"><tt>http://www.cl.cam.ac.uk/users/gd216/cmm2.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEusability:weis2006" name="usability:weis2006">[9]</a></dt><dd>Roger Dingledine and Nick Mathewson. Anonymity loves company: Usability and the network effect. In <em>Proceedings of the Fifth Workshop on the Economics of  Information Security (WEIS 2006)</em>, Cambridge, UK, June 2006. <a href="http://freehaven.net/doc/wupss04/usability.pdf"><tt>http://freehaven.net/doc/wupss04/usability.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITErep-anon" name="rep-anon">[10]</a></dt><dd>Roger Dingledine, Nick Mathewson, and Paul Syverson. Reputation in P2P Anonymity Systems. In <em>Proceedings of Workshop on Economics of Peer-to-Peer  Systems</em>, June 2003. <a href="http://freehaven.net/doc/econp2p03/econp2p03.pdf"><tt>http://freehaven.net/doc/econp2p03/econp2p03.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEtor-design" name="tor-design">[11]</a></dt><dd>Roger Dingledine, Nick Mathewson, and Paul Syverson. Tor: The second-generation onion router. In <em>Proceedings of the 13th USENIX Security Symposium</em>, August  2004. <a href="http://tor.eff.org/tor-design.pdf"><tt>http://tor.eff.org/tor-design.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEcasc-rep" name="casc-rep">[12]</a></dt><dd>Roger Dingledine and Paul Syverson. Reliable MIX Cascade Networks through Reputation. In Matt Blaze, editor, <em>Financial Cryptography</em>. Springer-Verlag,  LNCS 2357, 2002.<div class="p"><!----></div></dd> <dt><a href="#CITEpsiphon" name="psiphon">[13]</a></dt><dd>Ronald Deibert et al. Psiphon. <a href="http://psiphon.civisec.org/"><tt>http://psiphon.civisec.org/</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEinfranet" name="infranet">[14]</a></dt><dd>Nick Feamster, Magdalena Balazinska, Greg Harfst, Hari Balakrishnan, and David  Karger. Infranet: Circumventing web censorship and surveillance. In <em>Proceedings of the 11th USENIX Security Symposium</em>, August  2002. <a href="http://nms.lcs.mit.edu/~feamster/papers/usenixsec2002.pdf"><tt>http://nms.lcs.mit.edu/~feamster/papers/usenixsec2002.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEactive-wardens" name="active-wardens">[15]</a></dt><dd>Gina Fisk, Mike Fisk, Christos Papadopoulos, and Joshua Neil. Eliminating steganography in internet traffic with active wardens. In Fabien Petitcolas, editor, <em>Information Hiding Workshop (IH  2002)</em>. Springer-Verlag, LNCS 2578, October 2002.<div class="p"><!----></div></dd> <dt><a href="#CITEblossom-thesis" name="blossom-thesis">[16]</a></dt><dd>Geoffrey Goodell. <em>Perspective Access Networks</em>. PhD thesis, Harvard University, July 2006. <a href="http://afs.eecs.harvard.edu/~goodell/thesis.pdf"><tt>http://afs.eecs.harvard.edu/~goodell/thesis.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEgoodell-syverson06" name="goodell-syverson06">[17]</a></dt><dd>Geoffrey Goodell and Paul Syverson. The right place at the right time: The use of network location in  authentication and abuse prevention, 2006. Submitted.<div class="p"><!----></div></dd> <dt><a href="#CITEcircumventor" name="circumventor">[18]</a></dt><dd>Bennett Haselton. How to install the Circumventor program.  <a href="http://www.peacefire.org/circumventor/simple-circumventor-instructions.html"><tt>http://www.peacefire.org/circumventor/simple-circumventor-instructions.html</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEip-to-country" name="ip-to-country">[19]</a></dt><dd>Ip-to-country database. <a href="http://ip-to-country.webhosting.info/"><tt>http://ip-to-country.webhosting.info/</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEkoepsell:wpes2004" name="koepsell:wpes2004">[20]</a></dt><dd>Stefan Köpsell and Ulf Hilling. How to achieve blocking resistance for existing systems enabling  anonymous web surfing. In <em>Proceedings of the Workshop on Privacy in the Electronic  Society (WPES 2004)</em>, Washington, DC, USA, October 2004. <a href="http://freehaven.net/anonbib/papers/p103-koepsell.pdf"><tt>http://freehaven.net/anonbib/papers/p103-koepsell.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEdefensive-dropping" name="defensive-dropping">[21]</a></dt><dd>Brian N. Levine, Michael K. Reiter, Chenxi Wang, and Matthew Wright. Timing analysis in low-latency mix-based systems. In Ari Juels, editor, <em>Financial Cryptography</em>. Springer-Verlag,  LNCS (forthcoming), 2004.<div class="p"><!----></div></dd> <dt><a href="#CITEmackinnon-personal" name="mackinnon-personal">[22]</a></dt><dd>Rebecca MacKinnon. Private communication, 2006.<div class="p"><!----></div></dd> <dt><a href="#CITEcgiproxy" name="cgiproxy">[23]</a></dt><dd>James Marshall. CGIProxy: HTTP/FTP Proxy in a CGI Script. <a href="http://www.jmarshall.com/tools/cgiproxy/"><tt>http://www.jmarshall.com/tools/cgiproxy/</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEe2e-traffic" name="e2e-traffic">[24]</a></dt><dd>Nick Mathewson and Roger Dingledine. Practical traffic analysis: Extending and resisting statistical  disclosure. In David Martin and Andrei Serjantov, editors, <em>Privacy Enhancing  Technologies (PET 2004)</em>, LNCS, May 2004. <a href="http://freehaven.net/doc/e2e-traffic/e2e-traffic.pdf"><tt>http://freehaven.net/doc/e2e-traffic/e2e-traffic.pdf</tt></a>.<div class="p"><!----></div></dd> <dt><a href="#CITEattack-tor-oak05" name="attack-tor-oak05">[25]</a></dt><dd>Steven J. Murdoch and George Danezis. Low-cost traffic analysis of tor. In <em>IEEE Symposium on Security and Privacy</em>. IEEE CS, May 2005.<div class="p"><!----></div></dd> <dt><a href="#CITEtcpstego" name="tcpstego">[26]</a></dt><dd>Steven J. Murdoch and Stephen Lewis. Embedding covert channels into TCP/IP. In Mauro Barni, Jordi Herrera-Joancomartí, Stefan Katzenbeisser,  and Fernando Pérez-González, editors, <em>Information Hiding: 7th  International Workshop</em>, volume 3727 of <em>LNCS</em>, pages 247-261,  Barcelona, Catalonia (Spain), June 2005. Springer-Verlag.<div class="p"><!----></div></dd> <dt><a href="#CITEptacek98insertion" name="ptacek98insertion">[27]</a></dt><dd>Thomas H. Ptacek and Timothy N. Newsham. Insertion, evasion, and denial of service: Eluding network intrusion  detection. Technical report, Secure Networks, Inc., Suite 330, 1201 5th Street  S.W, Calgary, Alberta, Canada, T2R-0Y6, 1998.<div class="p"><!----></div></dd> <dt><a href="#CITEzuckerman-threatmodels" name="zuckerman-threatmodels">[28]</a></dt><dd>Ethan Zuckerman. We've got to adjust some of our threat models. <a href="http://www.ethanzuckerman.com/blog/?p=1019"><tt>http://www.ethanzuckerman.com/blog/?p=1019</tt></a>.</dd></dl><div class="p"><!----></div><div class="p"><!----></div><div class="p"><!----></div><hr /><h3>Footnotes:</h3><div class="p"><!----></div><a name="tthFtNtAAB"></a><a href="#tthFrefAAB"><sup>1</sup></a>So far in places  like China, the authorities mainly go after people who publish materials  and coordinate organized movements [<a href="#mackinnon-personal" name="CITEmackinnon-personal">22</a>].  If they find that a  user happens to be reading a site that should be blocked, the typical  response is simply to block the site. Of course, even with an encrypted  connection, the adversary may be able to distinguish readers from  publishers by observing whether Alice is mostly downloading bytes or mostly  uploading them — we discuss this issue more in  Section <a href="#subsec:upload-padding">8.2</a>.<div class="p"><!----></div><a name="tthFtNtAAC"></a><a href="#tthFrefAAC"><sup>2</sup></a><a href="http://wiki.noreply.org/noreply/TheOnionRouter/TorFAQ\#EntryGuards"><tt>http://wiki.noreply.org/noreply/TheOnionRouter/TorFAQ#EntryGuards</tt></a><div class="p"><!----></div><a name="tthFtNtAAD"></a><a href="#tthFrefAAD"><sup>3</sup></a><a href="http://vidalia-project.net/"><tt>http://vidalia-project.net/</tt></a><br /><br /><hr /><small>File translated fromT<sub><font size="-1">E</font></sub>Xby <a href="http://hutchinson.belmont.ma.us/tth/">T<sub><font size="-1">T</font></sub>H</a>,version 3.77.<br />On 11 May 2007, 21:49.</small></html>
 |