relay.c 94 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688
  1. /* Copyright (c) 2001 Matej Pfajfar.
  2. * Copyright (c) 2001-2004, Roger Dingledine.
  3. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  4. * Copyright (c) 2007-2012, The Tor Project, Inc. */
  5. /* See LICENSE for licensing information */
  6. /**
  7. * \file relay.c
  8. * \brief Handle relay cell encryption/decryption, plus packaging and
  9. * receiving from circuits, plus queuing on circuits.
  10. **/
  11. #include <math.h>
  12. #define RELAY_PRIVATE
  13. #include "or.h"
  14. #include "buffers.h"
  15. #include "circuitbuild.h"
  16. #include "circuitlist.h"
  17. #include "config.h"
  18. #include "connection.h"
  19. #include "connection_edge.h"
  20. #include "connection_or.h"
  21. #include "control.h"
  22. #include "geoip.h"
  23. #include "main.h"
  24. #include "mempool.h"
  25. #include "networkstatus.h"
  26. #include "nodelist.h"
  27. #include "policies.h"
  28. #include "reasons.h"
  29. #include "relay.h"
  30. #include "rendcommon.h"
  31. #include "router.h"
  32. #include "routerlist.h"
  33. #include "routerparse.h"
  34. static edge_connection_t *relay_lookup_conn(circuit_t *circ, cell_t *cell,
  35. cell_direction_t cell_direction,
  36. crypt_path_t *layer_hint);
  37. static int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
  38. edge_connection_t *conn,
  39. crypt_path_t *layer_hint);
  40. static void circuit_consider_sending_sendme(circuit_t *circ,
  41. crypt_path_t *layer_hint);
  42. static void circuit_resume_edge_reading(circuit_t *circ,
  43. crypt_path_t *layer_hint);
  44. static int circuit_resume_edge_reading_helper(edge_connection_t *conn,
  45. circuit_t *circ,
  46. crypt_path_t *layer_hint);
  47. static int circuit_consider_stop_edge_reading(circuit_t *circ,
  48. crypt_path_t *layer_hint);
  49. static int circuit_queue_streams_are_blocked(circuit_t *circ);
  50. /* XXXX023 move this all to compat_libevent */
  51. /** Cache the current hi-res time; the cache gets reset when libevent
  52. * calls us. */
  53. static struct timeval cached_time_hires = {0, 0};
  54. /** Stop reading on edge connections when we have this many cells
  55. * waiting on the appropriate queue. */
  56. #define CELL_QUEUE_HIGHWATER_SIZE 256
  57. /** Start reading from edge connections again when we get down to this many
  58. * cells. */
  59. #define CELL_QUEUE_LOWWATER_SIZE 64
  60. /* DOCDOC tor_gettimeofday_cached */
  61. static void
  62. tor_gettimeofday_cached(struct timeval *tv)
  63. {
  64. if (cached_time_hires.tv_sec == 0) {
  65. tor_gettimeofday(&cached_time_hires);
  66. }
  67. *tv = cached_time_hires;
  68. }
  69. /* DOCDOC tor_gettimeofday_cache_clear */
  70. void
  71. tor_gettimeofday_cache_clear(void)
  72. {
  73. cached_time_hires.tv_sec = 0;
  74. }
  75. /** Stats: how many relay cells have originated at this hop, or have
  76. * been relayed onward (not recognized at this hop)?
  77. */
  78. uint64_t stats_n_relay_cells_relayed = 0;
  79. /** Stats: how many relay cells have been delivered to streams at this
  80. * hop?
  81. */
  82. uint64_t stats_n_relay_cells_delivered = 0;
  83. /** Update digest from the payload of cell. Assign integrity part to
  84. * cell.
  85. */
  86. static void
  87. relay_set_digest(crypto_digest_t *digest, cell_t *cell)
  88. {
  89. char integrity[4];
  90. relay_header_t rh;
  91. crypto_digest_add_bytes(digest, (char*)cell->payload, CELL_PAYLOAD_SIZE);
  92. crypto_digest_get_digest(digest, integrity, 4);
  93. // log_fn(LOG_DEBUG,"Putting digest of %u %u %u %u into relay cell.",
  94. // integrity[0], integrity[1], integrity[2], integrity[3]);
  95. relay_header_unpack(&rh, cell->payload);
  96. memcpy(rh.integrity, integrity, 4);
  97. relay_header_pack(cell->payload, &rh);
  98. }
  99. /** Does the digest for this circuit indicate that this cell is for us?
  100. *
  101. * Update digest from the payload of cell (with the integrity part set
  102. * to 0). If the integrity part is valid, return 1, else restore digest
  103. * and cell to their original state and return 0.
  104. */
  105. static int
  106. relay_digest_matches(crypto_digest_t *digest, cell_t *cell)
  107. {
  108. char received_integrity[4], calculated_integrity[4];
  109. relay_header_t rh;
  110. crypto_digest_t *backup_digest=NULL;
  111. backup_digest = crypto_digest_dup(digest);
  112. relay_header_unpack(&rh, cell->payload);
  113. memcpy(received_integrity, rh.integrity, 4);
  114. memset(rh.integrity, 0, 4);
  115. relay_header_pack(cell->payload, &rh);
  116. // log_fn(LOG_DEBUG,"Reading digest of %u %u %u %u from relay cell.",
  117. // received_integrity[0], received_integrity[1],
  118. // received_integrity[2], received_integrity[3]);
  119. crypto_digest_add_bytes(digest, (char*) cell->payload, CELL_PAYLOAD_SIZE);
  120. crypto_digest_get_digest(digest, calculated_integrity, 4);
  121. if (tor_memneq(received_integrity, calculated_integrity, 4)) {
  122. // log_fn(LOG_INFO,"Recognized=0 but bad digest. Not recognizing.");
  123. // (%d vs %d).", received_integrity, calculated_integrity);
  124. /* restore digest to its old form */
  125. crypto_digest_assign(digest, backup_digest);
  126. /* restore the relay header */
  127. memcpy(rh.integrity, received_integrity, 4);
  128. relay_header_pack(cell->payload, &rh);
  129. crypto_digest_free(backup_digest);
  130. return 0;
  131. }
  132. crypto_digest_free(backup_digest);
  133. return 1;
  134. }
  135. /** Apply <b>cipher</b> to CELL_PAYLOAD_SIZE bytes of <b>in</b>
  136. * (in place).
  137. *
  138. * If <b>encrypt_mode</b> is 1 then encrypt, else decrypt.
  139. *
  140. * Return -1 if the crypto fails, else return 0.
  141. */
  142. static int
  143. relay_crypt_one_payload(crypto_cipher_t *cipher, uint8_t *in,
  144. int encrypt_mode)
  145. {
  146. int r;
  147. (void)encrypt_mode;
  148. r = crypto_cipher_crypt_inplace(cipher, (char*) in, CELL_PAYLOAD_SIZE);
  149. if (r) {
  150. log_warn(LD_BUG,"Error during relay encryption");
  151. return -1;
  152. }
  153. return 0;
  154. }
  155. /** Receive a relay cell:
  156. * - Crypt it (encrypt if headed toward the origin or if we <b>are</b> the
  157. * origin; decrypt if we're headed toward the exit).
  158. * - Check if recognized (if exitward).
  159. * - If recognized and the digest checks out, then find if there's a stream
  160. * that the cell is intended for, and deliver it to the right
  161. * connection_edge.
  162. * - If not recognized, then we need to relay it: append it to the appropriate
  163. * cell_queue on <b>circ</b>.
  164. *
  165. * Return -<b>reason</b> on failure.
  166. */
  167. int
  168. circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
  169. cell_direction_t cell_direction)
  170. {
  171. or_connection_t *or_conn=NULL;
  172. crypt_path_t *layer_hint=NULL;
  173. char recognized=0;
  174. int reason;
  175. tor_assert(cell);
  176. tor_assert(circ);
  177. tor_assert(cell_direction == CELL_DIRECTION_OUT ||
  178. cell_direction == CELL_DIRECTION_IN);
  179. if (circ->marked_for_close)
  180. return 0;
  181. if (relay_crypt(circ, cell, cell_direction, &layer_hint, &recognized) < 0) {
  182. log_warn(LD_BUG,"relay crypt failed. Dropping connection.");
  183. return -END_CIRC_REASON_INTERNAL;
  184. }
  185. if (recognized) {
  186. edge_connection_t *conn = relay_lookup_conn(circ, cell, cell_direction,
  187. layer_hint);
  188. if (cell_direction == CELL_DIRECTION_OUT) {
  189. ++stats_n_relay_cells_delivered;
  190. log_debug(LD_OR,"Sending away from origin.");
  191. if ((reason=connection_edge_process_relay_cell(cell, circ, conn, NULL))
  192. < 0) {
  193. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  194. "connection_edge_process_relay_cell (away from origin) "
  195. "failed.");
  196. return reason;
  197. }
  198. }
  199. if (cell_direction == CELL_DIRECTION_IN) {
  200. ++stats_n_relay_cells_delivered;
  201. log_debug(LD_OR,"Sending to origin.");
  202. if ((reason = connection_edge_process_relay_cell(cell, circ, conn,
  203. layer_hint)) < 0) {
  204. log_warn(LD_OR,
  205. "connection_edge_process_relay_cell (at origin) failed.");
  206. return reason;
  207. }
  208. }
  209. return 0;
  210. }
  211. /* not recognized. pass it on. */
  212. if (cell_direction == CELL_DIRECTION_OUT) {
  213. cell->circ_id = circ->n_circ_id; /* switch it */
  214. or_conn = circ->n_conn;
  215. } else if (! CIRCUIT_IS_ORIGIN(circ)) {
  216. cell->circ_id = TO_OR_CIRCUIT(circ)->p_circ_id; /* switch it */
  217. or_conn = TO_OR_CIRCUIT(circ)->p_conn;
  218. } else {
  219. log_fn(LOG_PROTOCOL_WARN, LD_OR,
  220. "Dropping unrecognized inbound cell on origin circuit.");
  221. return 0;
  222. }
  223. if (!or_conn) {
  224. // XXXX Can this splice stuff be done more cleanly?
  225. if (! CIRCUIT_IS_ORIGIN(circ) &&
  226. TO_OR_CIRCUIT(circ)->rend_splice &&
  227. cell_direction == CELL_DIRECTION_OUT) {
  228. or_circuit_t *splice = TO_OR_CIRCUIT(circ)->rend_splice;
  229. tor_assert(circ->purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED);
  230. tor_assert(splice->_base.purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED);
  231. cell->circ_id = splice->p_circ_id;
  232. cell->command = CELL_RELAY; /* can't be relay_early anyway */
  233. if ((reason = circuit_receive_relay_cell(cell, TO_CIRCUIT(splice),
  234. CELL_DIRECTION_IN)) < 0) {
  235. log_warn(LD_REND, "Error relaying cell across rendezvous; closing "
  236. "circuits");
  237. /* XXXX Do this here, or just return -1? */
  238. circuit_mark_for_close(circ, -reason);
  239. return reason;
  240. }
  241. return 0;
  242. }
  243. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  244. "Didn't recognize cell, but circ stops here! Closing circ.");
  245. return -END_CIRC_REASON_TORPROTOCOL;
  246. }
  247. log_debug(LD_OR,"Passing on unrecognized cell.");
  248. ++stats_n_relay_cells_relayed; /* XXXX no longer quite accurate {cells}
  249. * we might kill the circ before we relay
  250. * the cells. */
  251. append_cell_to_circuit_queue(circ, or_conn, cell, cell_direction, 0);
  252. return 0;
  253. }
  254. /** Do the appropriate en/decryptions for <b>cell</b> arriving on
  255. * <b>circ</b> in direction <b>cell_direction</b>.
  256. *
  257. * If cell_direction == CELL_DIRECTION_IN:
  258. * - If we're at the origin (we're the OP), for hops 1..N,
  259. * decrypt cell. If recognized, stop.
  260. * - Else (we're not the OP), encrypt one hop. Cell is not recognized.
  261. *
  262. * If cell_direction == CELL_DIRECTION_OUT:
  263. * - decrypt one hop. Check if recognized.
  264. *
  265. * If cell is recognized, set *recognized to 1, and set
  266. * *layer_hint to the hop that recognized it.
  267. *
  268. * Return -1 to indicate that we should mark the circuit for close,
  269. * else return 0.
  270. */
  271. int
  272. relay_crypt(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction,
  273. crypt_path_t **layer_hint, char *recognized)
  274. {
  275. relay_header_t rh;
  276. tor_assert(circ);
  277. tor_assert(cell);
  278. tor_assert(recognized);
  279. tor_assert(cell_direction == CELL_DIRECTION_IN ||
  280. cell_direction == CELL_DIRECTION_OUT);
  281. if (cell_direction == CELL_DIRECTION_IN) {
  282. if (CIRCUIT_IS_ORIGIN(circ)) { /* We're at the beginning of the circuit.
  283. * We'll want to do layered decrypts. */
  284. crypt_path_t *thishop, *cpath = TO_ORIGIN_CIRCUIT(circ)->cpath;
  285. thishop = cpath;
  286. if (thishop->state != CPATH_STATE_OPEN) {
  287. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  288. "Relay cell before first created cell? Closing.");
  289. return -1;
  290. }
  291. do { /* Remember: cpath is in forward order, that is, first hop first. */
  292. tor_assert(thishop);
  293. if (relay_crypt_one_payload(thishop->b_crypto, cell->payload, 0) < 0)
  294. return -1;
  295. relay_header_unpack(&rh, cell->payload);
  296. if (rh.recognized == 0) {
  297. /* it's possibly recognized. have to check digest to be sure. */
  298. if (relay_digest_matches(thishop->b_digest, cell)) {
  299. *recognized = 1;
  300. *layer_hint = thishop;
  301. return 0;
  302. }
  303. }
  304. thishop = thishop->next;
  305. } while (thishop != cpath && thishop->state == CPATH_STATE_OPEN);
  306. log_fn(LOG_PROTOCOL_WARN, LD_OR,
  307. "Incoming cell at client not recognized. Closing.");
  308. return -1;
  309. } else { /* we're in the middle. Just one crypt. */
  310. if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->p_crypto,
  311. cell->payload, 1) < 0)
  312. return -1;
  313. // log_fn(LOG_DEBUG,"Skipping recognized check, because we're not "
  314. // "the client.");
  315. }
  316. } else /* cell_direction == CELL_DIRECTION_OUT */ {
  317. /* we're in the middle. Just one crypt. */
  318. if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->n_crypto,
  319. cell->payload, 0) < 0)
  320. return -1;
  321. relay_header_unpack(&rh, cell->payload);
  322. if (rh.recognized == 0) {
  323. /* it's possibly recognized. have to check digest to be sure. */
  324. if (relay_digest_matches(TO_OR_CIRCUIT(circ)->n_digest, cell)) {
  325. *recognized = 1;
  326. return 0;
  327. }
  328. }
  329. }
  330. return 0;
  331. }
  332. /** Package a relay cell from an edge:
  333. * - Encrypt it to the right layer
  334. * - Append it to the appropriate cell_queue on <b>circ</b>.
  335. */
  336. static int
  337. circuit_package_relay_cell(cell_t *cell, circuit_t *circ,
  338. cell_direction_t cell_direction,
  339. crypt_path_t *layer_hint, streamid_t on_stream)
  340. {
  341. or_connection_t *conn; /* where to send the cell */
  342. if (cell_direction == CELL_DIRECTION_OUT) {
  343. crypt_path_t *thishop; /* counter for repeated crypts */
  344. conn = circ->n_conn;
  345. if (!CIRCUIT_IS_ORIGIN(circ) || !conn) {
  346. log_warn(LD_BUG,"outgoing relay cell has n_conn==NULL. Dropping.");
  347. return 0; /* just drop it */
  348. }
  349. relay_set_digest(layer_hint->f_digest, cell);
  350. thishop = layer_hint;
  351. /* moving from farthest to nearest hop */
  352. do {
  353. tor_assert(thishop);
  354. /* XXXX RD This is a bug, right? */
  355. log_debug(LD_OR,"crypting a layer of the relay cell.");
  356. if (relay_crypt_one_payload(thishop->f_crypto, cell->payload, 1) < 0) {
  357. return -1;
  358. }
  359. thishop = thishop->prev;
  360. } while (thishop != TO_ORIGIN_CIRCUIT(circ)->cpath->prev);
  361. } else { /* incoming cell */
  362. or_circuit_t *or_circ;
  363. if (CIRCUIT_IS_ORIGIN(circ)) {
  364. /* We should never package an _incoming_ cell from the circuit
  365. * origin; that means we messed up somewhere. */
  366. log_warn(LD_BUG,"incoming relay cell at origin circuit. Dropping.");
  367. assert_circuit_ok(circ);
  368. return 0; /* just drop it */
  369. }
  370. or_circ = TO_OR_CIRCUIT(circ);
  371. conn = or_circ->p_conn;
  372. relay_set_digest(or_circ->p_digest, cell);
  373. if (relay_crypt_one_payload(or_circ->p_crypto, cell->payload, 1) < 0)
  374. return -1;
  375. }
  376. ++stats_n_relay_cells_relayed;
  377. append_cell_to_circuit_queue(circ, conn, cell, cell_direction, on_stream);
  378. return 0;
  379. }
  380. /** If cell's stream_id matches the stream_id of any conn that's
  381. * attached to circ, return that conn, else return NULL.
  382. */
  383. static edge_connection_t *
  384. relay_lookup_conn(circuit_t *circ, cell_t *cell,
  385. cell_direction_t cell_direction, crypt_path_t *layer_hint)
  386. {
  387. edge_connection_t *tmpconn;
  388. relay_header_t rh;
  389. relay_header_unpack(&rh, cell->payload);
  390. if (!rh.stream_id)
  391. return NULL;
  392. /* IN or OUT cells could have come from either direction, now
  393. * that we allow rendezvous *to* an OP.
  394. */
  395. if (CIRCUIT_IS_ORIGIN(circ)) {
  396. for (tmpconn = TO_ORIGIN_CIRCUIT(circ)->p_streams; tmpconn;
  397. tmpconn=tmpconn->next_stream) {
  398. if (rh.stream_id == tmpconn->stream_id &&
  399. !tmpconn->_base.marked_for_close &&
  400. tmpconn->cpath_layer == layer_hint) {
  401. log_debug(LD_APP,"found conn for stream %d.", rh.stream_id);
  402. return tmpconn;
  403. }
  404. }
  405. } else {
  406. for (tmpconn = TO_OR_CIRCUIT(circ)->n_streams; tmpconn;
  407. tmpconn=tmpconn->next_stream) {
  408. if (rh.stream_id == tmpconn->stream_id &&
  409. !tmpconn->_base.marked_for_close) {
  410. log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id);
  411. if (cell_direction == CELL_DIRECTION_OUT ||
  412. connection_edge_is_rendezvous_stream(tmpconn))
  413. return tmpconn;
  414. }
  415. }
  416. for (tmpconn = TO_OR_CIRCUIT(circ)->resolving_streams; tmpconn;
  417. tmpconn=tmpconn->next_stream) {
  418. if (rh.stream_id == tmpconn->stream_id &&
  419. !tmpconn->_base.marked_for_close) {
  420. log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id);
  421. return tmpconn;
  422. }
  423. }
  424. }
  425. return NULL; /* probably a begin relay cell */
  426. }
  427. /** Pack the relay_header_t host-order structure <b>src</b> into
  428. * network-order in the buffer <b>dest</b>. See tor-spec.txt for details
  429. * about the wire format.
  430. */
  431. void
  432. relay_header_pack(uint8_t *dest, const relay_header_t *src)
  433. {
  434. set_uint8(dest, src->command);
  435. set_uint16(dest+1, htons(src->recognized));
  436. set_uint16(dest+3, htons(src->stream_id));
  437. memcpy(dest+5, src->integrity, 4);
  438. set_uint16(dest+9, htons(src->length));
  439. }
  440. /** Unpack the network-order buffer <b>src</b> into a host-order
  441. * relay_header_t structure <b>dest</b>.
  442. */
  443. void
  444. relay_header_unpack(relay_header_t *dest, const uint8_t *src)
  445. {
  446. dest->command = get_uint8(src);
  447. dest->recognized = ntohs(get_uint16(src+1));
  448. dest->stream_id = ntohs(get_uint16(src+3));
  449. memcpy(dest->integrity, src+5, 4);
  450. dest->length = ntohs(get_uint16(src+9));
  451. }
  452. /** Convert the relay <b>command</b> into a human-readable string. */
  453. static const char *
  454. relay_command_to_string(uint8_t command)
  455. {
  456. switch (command) {
  457. case RELAY_COMMAND_BEGIN: return "BEGIN";
  458. case RELAY_COMMAND_DATA: return "DATA";
  459. case RELAY_COMMAND_END: return "END";
  460. case RELAY_COMMAND_CONNECTED: return "CONNECTED";
  461. case RELAY_COMMAND_SENDME: return "SENDME";
  462. case RELAY_COMMAND_EXTEND: return "EXTEND";
  463. case RELAY_COMMAND_EXTENDED: return "EXTENDED";
  464. case RELAY_COMMAND_TRUNCATE: return "TRUNCATE";
  465. case RELAY_COMMAND_TRUNCATED: return "TRUNCATED";
  466. case RELAY_COMMAND_DROP: return "DROP";
  467. case RELAY_COMMAND_RESOLVE: return "RESOLVE";
  468. case RELAY_COMMAND_RESOLVED: return "RESOLVED";
  469. case RELAY_COMMAND_BEGIN_DIR: return "BEGIN_DIR";
  470. case RELAY_COMMAND_ESTABLISH_INTRO: return "ESTABLISH_INTRO";
  471. case RELAY_COMMAND_ESTABLISH_RENDEZVOUS: return "ESTABLISH_RENDEZVOUS";
  472. case RELAY_COMMAND_INTRODUCE1: return "INTRODUCE1";
  473. case RELAY_COMMAND_INTRODUCE2: return "INTRODUCE2";
  474. case RELAY_COMMAND_RENDEZVOUS1: return "RENDEZVOUS1";
  475. case RELAY_COMMAND_RENDEZVOUS2: return "RENDEZVOUS2";
  476. case RELAY_COMMAND_INTRO_ESTABLISHED: return "INTRO_ESTABLISHED";
  477. case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
  478. return "RENDEZVOUS_ESTABLISHED";
  479. case RELAY_COMMAND_INTRODUCE_ACK: return "INTRODUCE_ACK";
  480. default: return "(unrecognized)";
  481. }
  482. }
  483. /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and send
  484. * it onto the open circuit <b>circ</b>. <b>stream_id</b> is the ID on
  485. * <b>circ</b> for the stream that's sending the relay cell, or 0 if it's a
  486. * control cell. <b>cpath_layer</b> is NULL for OR->OP cells, or the
  487. * destination hop for OP->OR cells.
  488. *
  489. * If you can't send the cell, mark the circuit for close and return -1. Else
  490. * return 0.
  491. */
  492. int
  493. relay_send_command_from_edge(streamid_t stream_id, circuit_t *circ,
  494. uint8_t relay_command, const char *payload,
  495. size_t payload_len, crypt_path_t *cpath_layer)
  496. {
  497. cell_t cell;
  498. relay_header_t rh;
  499. cell_direction_t cell_direction;
  500. /* XXXX NM Split this function into a separate versions per circuit type? */
  501. tor_assert(circ);
  502. tor_assert(payload_len <= RELAY_PAYLOAD_SIZE);
  503. memset(&cell, 0, sizeof(cell_t));
  504. cell.command = CELL_RELAY;
  505. if (cpath_layer) {
  506. cell.circ_id = circ->n_circ_id;
  507. cell_direction = CELL_DIRECTION_OUT;
  508. } else if (! CIRCUIT_IS_ORIGIN(circ)) {
  509. cell.circ_id = TO_OR_CIRCUIT(circ)->p_circ_id;
  510. cell_direction = CELL_DIRECTION_IN;
  511. } else {
  512. return -1;
  513. }
  514. memset(&rh, 0, sizeof(rh));
  515. rh.command = relay_command;
  516. rh.stream_id = stream_id;
  517. rh.length = payload_len;
  518. relay_header_pack(cell.payload, &rh);
  519. if (payload_len)
  520. memcpy(cell.payload+RELAY_HEADER_SIZE, payload, payload_len);
  521. log_debug(LD_OR,"delivering %d cell %s.", relay_command,
  522. cell_direction == CELL_DIRECTION_OUT ? "forward" : "backward");
  523. /* If we are sending an END cell and this circuit is used for a tunneled
  524. * directory request, advance its state. */
  525. if (relay_command == RELAY_COMMAND_END && circ->dirreq_id)
  526. geoip_change_dirreq_state(circ->dirreq_id, DIRREQ_TUNNELED,
  527. DIRREQ_END_CELL_SENT);
  528. if (cell_direction == CELL_DIRECTION_OUT && circ->n_conn) {
  529. /* if we're using relaybandwidthrate, this conn wants priority */
  530. circ->n_conn->client_used = approx_time();
  531. }
  532. if (cell_direction == CELL_DIRECTION_OUT) {
  533. origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ);
  534. if (origin_circ->remaining_relay_early_cells > 0 &&
  535. (relay_command == RELAY_COMMAND_EXTEND ||
  536. cpath_layer != origin_circ->cpath)) {
  537. /* If we've got any relay_early cells left and (we're sending
  538. * an extend cell or we're not talking to the first hop), use
  539. * one of them. Don't worry about the conn protocol version:
  540. * append_cell_to_circuit_queue will fix it up. */
  541. cell.command = CELL_RELAY_EARLY;
  542. --origin_circ->remaining_relay_early_cells;
  543. log_debug(LD_OR, "Sending a RELAY_EARLY cell; %d remaining.",
  544. (int)origin_circ->remaining_relay_early_cells);
  545. /* Memorize the command that is sent as RELAY_EARLY cell; helps debug
  546. * task 878. */
  547. origin_circ->relay_early_commands[
  548. origin_circ->relay_early_cells_sent++] = relay_command;
  549. } else if (relay_command == RELAY_COMMAND_EXTEND) {
  550. /* If no RELAY_EARLY cells can be sent over this circuit, log which
  551. * commands have been sent as RELAY_EARLY cells before; helps debug
  552. * task 878. */
  553. smartlist_t *commands_list = smartlist_new();
  554. int i = 0;
  555. char *commands = NULL;
  556. for (; i < origin_circ->relay_early_cells_sent; i++)
  557. smartlist_add(commands_list, (char *)
  558. relay_command_to_string(origin_circ->relay_early_commands[i]));
  559. commands = smartlist_join_strings(commands_list, ",", 0, NULL);
  560. log_warn(LD_BUG, "Uh-oh. We're sending a RELAY_COMMAND_EXTEND cell, "
  561. "but we have run out of RELAY_EARLY cells on that circuit. "
  562. "Commands sent before: %s", commands);
  563. tor_free(commands);
  564. smartlist_free(commands_list);
  565. }
  566. }
  567. if (circuit_package_relay_cell(&cell, circ, cell_direction, cpath_layer,
  568. stream_id) < 0) {
  569. log_warn(LD_BUG,"circuit_package_relay_cell failed. Closing.");
  570. circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
  571. return -1;
  572. }
  573. return 0;
  574. }
  575. /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and
  576. * send it onto the open circuit <b>circ</b>. <b>fromconn</b> is the stream
  577. * that's sending the relay cell, or NULL if it's a control cell.
  578. * <b>cpath_layer</b> is NULL for OR->OP cells, or the destination hop
  579. * for OP->OR cells.
  580. *
  581. * If you can't send the cell, mark the circuit for close and
  582. * return -1. Else return 0.
  583. */
  584. int
  585. connection_edge_send_command(edge_connection_t *fromconn,
  586. uint8_t relay_command, const char *payload,
  587. size_t payload_len)
  588. {
  589. /* XXXX NM Split this function into a separate versions per circuit type? */
  590. circuit_t *circ;
  591. crypt_path_t *cpath_layer = fromconn->cpath_layer;
  592. tor_assert(fromconn);
  593. circ = fromconn->on_circuit;
  594. if (fromconn->_base.marked_for_close) {
  595. log_warn(LD_BUG,
  596. "called on conn that's already marked for close at %s:%d.",
  597. fromconn->_base.marked_for_close_file,
  598. fromconn->_base.marked_for_close);
  599. return 0;
  600. }
  601. if (!circ) {
  602. if (fromconn->_base.type == CONN_TYPE_AP) {
  603. log_info(LD_APP,"no circ. Closing conn.");
  604. connection_mark_unattached_ap(EDGE_TO_ENTRY_CONN(fromconn),
  605. END_STREAM_REASON_INTERNAL);
  606. } else {
  607. log_info(LD_EXIT,"no circ. Closing conn.");
  608. fromconn->edge_has_sent_end = 1; /* no circ to send to */
  609. fromconn->end_reason = END_STREAM_REASON_INTERNAL;
  610. connection_mark_for_close(TO_CONN(fromconn));
  611. }
  612. return -1;
  613. }
  614. return relay_send_command_from_edge(fromconn->stream_id, circ,
  615. relay_command, payload,
  616. payload_len, cpath_layer);
  617. }
  618. /** How many times will I retry a stream that fails due to DNS
  619. * resolve failure or misc error?
  620. */
  621. #define MAX_RESOLVE_FAILURES 3
  622. /** Return 1 if reason is something that you should retry if you
  623. * get the end cell before you've connected; else return 0. */
  624. static int
  625. edge_reason_is_retriable(int reason)
  626. {
  627. return reason == END_STREAM_REASON_HIBERNATING ||
  628. reason == END_STREAM_REASON_RESOURCELIMIT ||
  629. reason == END_STREAM_REASON_EXITPOLICY ||
  630. reason == END_STREAM_REASON_RESOLVEFAILED ||
  631. reason == END_STREAM_REASON_MISC ||
  632. reason == END_STREAM_REASON_NOROUTE;
  633. }
  634. /** Called when we receive an END cell on a stream that isn't open yet,
  635. * from the client side.
  636. * Arguments are as for connection_edge_process_relay_cell().
  637. */
  638. static int
  639. connection_ap_process_end_not_open(
  640. relay_header_t *rh, cell_t *cell, origin_circuit_t *circ,
  641. entry_connection_t *conn, crypt_path_t *layer_hint)
  642. {
  643. struct in_addr in;
  644. node_t *exitrouter;
  645. int reason = *(cell->payload+RELAY_HEADER_SIZE);
  646. int control_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
  647. edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
  648. (void) layer_hint; /* unused */
  649. if (rh->length > 0 && edge_reason_is_retriable(reason) &&
  650. /* avoid retry if rend */
  651. !connection_edge_is_rendezvous_stream(edge_conn)) {
  652. const char *chosen_exit_digest =
  653. circ->build_state->chosen_exit->identity_digest;
  654. log_info(LD_APP,"Address '%s' refused due to '%s'. Considering retrying.",
  655. safe_str(conn->socks_request->address),
  656. stream_end_reason_to_string(reason));
  657. exitrouter = node_get_mutable_by_id(chosen_exit_digest);
  658. switch (reason) {
  659. case END_STREAM_REASON_EXITPOLICY:
  660. if (rh->length >= 5) {
  661. uint32_t addr = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+1));
  662. int ttl;
  663. if (!addr) {
  664. log_info(LD_APP,"Address '%s' resolved to 0.0.0.0. Closing,",
  665. safe_str(conn->socks_request->address));
  666. connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
  667. return 0;
  668. }
  669. if (rh->length >= 9)
  670. ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+5));
  671. else
  672. ttl = -1;
  673. if (get_options()->ClientDNSRejectInternalAddresses &&
  674. is_internal_IP(addr, 0)) {
  675. log_info(LD_APP,"Address '%s' resolved to internal. Closing,",
  676. safe_str(conn->socks_request->address));
  677. connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
  678. return 0;
  679. }
  680. client_dns_set_addressmap(conn->socks_request->address, addr,
  681. conn->chosen_exit_name, ttl);
  682. }
  683. /* check if he *ought* to have allowed it */
  684. if (exitrouter &&
  685. (rh->length < 5 ||
  686. (tor_inet_aton(conn->socks_request->address, &in) &&
  687. !conn->chosen_exit_name))) {
  688. log_info(LD_APP,
  689. "Exitrouter %s seems to be more restrictive than its exit "
  690. "policy. Not using this router as exit for now.",
  691. node_describe(exitrouter));
  692. policies_set_node_exitpolicy_to_reject_all(exitrouter);
  693. }
  694. /* rewrite it to an IP if we learned one. */
  695. if (addressmap_rewrite(conn->socks_request->address,
  696. sizeof(conn->socks_request->address),
  697. NULL)) {
  698. control_event_stream_status(conn, STREAM_EVENT_REMAP, 0);
  699. }
  700. if (conn->chosen_exit_optional ||
  701. conn->chosen_exit_retries) {
  702. /* stop wanting a specific exit */
  703. conn->chosen_exit_optional = 0;
  704. /* A non-zero chosen_exit_retries can happen if we set a
  705. * TrackHostExits for this address under a port that the exit
  706. * relay allows, but then try the same address with a different
  707. * port that it doesn't allow to exit. We shouldn't unregister
  708. * the mapping, since it is probably still wanted on the
  709. * original port. But now we give away to the exit relay that
  710. * we probably have a TrackHostExits on it. So be it. */
  711. conn->chosen_exit_retries = 0;
  712. tor_free(conn->chosen_exit_name); /* clears it */
  713. }
  714. if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
  715. return 0;
  716. /* else, conn will get closed below */
  717. break;
  718. case END_STREAM_REASON_CONNECTREFUSED:
  719. if (!conn->chosen_exit_optional)
  720. break; /* break means it'll close, below */
  721. /* Else fall through: expire this circuit, clear the
  722. * chosen_exit_name field, and try again. */
  723. case END_STREAM_REASON_RESOLVEFAILED:
  724. case END_STREAM_REASON_TIMEOUT:
  725. case END_STREAM_REASON_MISC:
  726. case END_STREAM_REASON_NOROUTE:
  727. if (client_dns_incr_failures(conn->socks_request->address)
  728. < MAX_RESOLVE_FAILURES) {
  729. /* We haven't retried too many times; reattach the connection. */
  730. circuit_log_path(LOG_INFO,LD_APP,circ);
  731. /* Mark this circuit "unusable for new streams". */
  732. /* XXXX023 this is a kludgy way to do this. */
  733. tor_assert(circ->_base.timestamp_dirty);
  734. circ->_base.timestamp_dirty -= get_options()->MaxCircuitDirtiness;
  735. if (conn->chosen_exit_optional) {
  736. /* stop wanting a specific exit */
  737. conn->chosen_exit_optional = 0;
  738. tor_free(conn->chosen_exit_name); /* clears it */
  739. }
  740. if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
  741. return 0;
  742. /* else, conn will get closed below */
  743. } else {
  744. log_notice(LD_APP,
  745. "Have tried resolving or connecting to address '%s' "
  746. "at %d different places. Giving up.",
  747. safe_str(conn->socks_request->address),
  748. MAX_RESOLVE_FAILURES);
  749. /* clear the failures, so it will have a full try next time */
  750. client_dns_clear_failures(conn->socks_request->address);
  751. }
  752. break;
  753. case END_STREAM_REASON_HIBERNATING:
  754. case END_STREAM_REASON_RESOURCELIMIT:
  755. if (exitrouter) {
  756. policies_set_node_exitpolicy_to_reject_all(exitrouter);
  757. }
  758. if (conn->chosen_exit_optional) {
  759. /* stop wanting a specific exit */
  760. conn->chosen_exit_optional = 0;
  761. tor_free(conn->chosen_exit_name); /* clears it */
  762. }
  763. if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
  764. return 0;
  765. /* else, will close below */
  766. break;
  767. } /* end switch */
  768. log_info(LD_APP,"Giving up on retrying; conn can't be handled.");
  769. }
  770. log_info(LD_APP,
  771. "Edge got end (%s) before we're connected. Marking for close.",
  772. stream_end_reason_to_string(rh->length > 0 ? reason : -1));
  773. circuit_log_path(LOG_INFO,LD_APP,circ);
  774. /* need to test because of detach_retriable */
  775. if (!ENTRY_TO_CONN(conn)->marked_for_close)
  776. connection_mark_unattached_ap(conn, control_reason);
  777. return 0;
  778. }
  779. /** Helper: change the socks_request-&gt;address field on conn to the
  780. * dotted-quad representation of <b>new_addr</b> (given in host order),
  781. * and send an appropriate REMAP event. */
  782. static void
  783. remap_event_helper(entry_connection_t *conn, uint32_t new_addr)
  784. {
  785. struct in_addr in;
  786. in.s_addr = htonl(new_addr);
  787. tor_inet_ntoa(&in, conn->socks_request->address,
  788. sizeof(conn->socks_request->address));
  789. control_event_stream_status(conn, STREAM_EVENT_REMAP,
  790. REMAP_STREAM_SOURCE_EXIT);
  791. }
  792. /** An incoming relay cell has arrived from circuit <b>circ</b> to
  793. * stream <b>conn</b>.
  794. *
  795. * The arguments here are the same as in
  796. * connection_edge_process_relay_cell() below; this function is called
  797. * from there when <b>conn</b> is defined and not in an open state.
  798. */
  799. static int
  800. connection_edge_process_relay_cell_not_open(
  801. relay_header_t *rh, cell_t *cell, circuit_t *circ,
  802. edge_connection_t *conn, crypt_path_t *layer_hint)
  803. {
  804. if (rh->command == RELAY_COMMAND_END) {
  805. if (CIRCUIT_IS_ORIGIN(circ) && conn->_base.type == CONN_TYPE_AP) {
  806. return connection_ap_process_end_not_open(rh, cell,
  807. TO_ORIGIN_CIRCUIT(circ),
  808. EDGE_TO_ENTRY_CONN(conn),
  809. layer_hint);
  810. } else {
  811. /* we just got an 'end', don't need to send one */
  812. conn->edge_has_sent_end = 1;
  813. conn->end_reason = *(cell->payload+RELAY_HEADER_SIZE) |
  814. END_STREAM_REASON_FLAG_REMOTE;
  815. connection_mark_for_close(TO_CONN(conn));
  816. return 0;
  817. }
  818. }
  819. if (conn->_base.type == CONN_TYPE_AP &&
  820. rh->command == RELAY_COMMAND_CONNECTED) {
  821. entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
  822. tor_assert(CIRCUIT_IS_ORIGIN(circ));
  823. if (conn->_base.state != AP_CONN_STATE_CONNECT_WAIT) {
  824. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  825. "Got 'connected' while not in state connect_wait. Dropping.");
  826. return 0;
  827. }
  828. conn->_base.state = AP_CONN_STATE_OPEN;
  829. log_info(LD_APP,"'connected' received after %d seconds.",
  830. (int)(time(NULL) - conn->_base.timestamp_lastread));
  831. if (rh->length >= 4) {
  832. uint32_t addr = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE));
  833. int ttl;
  834. if (!addr || (get_options()->ClientDNSRejectInternalAddresses &&
  835. is_internal_IP(addr, 0))) {
  836. log_info(LD_APP, "...but it claims the IP address was %s. Closing.",
  837. fmt_addr32(addr));
  838. connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
  839. connection_mark_unattached_ap(entry_conn,
  840. END_STREAM_REASON_TORPROTOCOL);
  841. return 0;
  842. }
  843. if (rh->length >= 8)
  844. ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+4));
  845. else
  846. ttl = -1;
  847. client_dns_set_addressmap(entry_conn->socks_request->address, addr,
  848. entry_conn->chosen_exit_name, ttl);
  849. remap_event_helper(entry_conn, addr);
  850. }
  851. circuit_log_path(LOG_INFO,LD_APP,TO_ORIGIN_CIRCUIT(circ));
  852. /* don't send a socks reply to transparent conns */
  853. tor_assert(entry_conn->socks_request != NULL);
  854. if (!entry_conn->socks_request->has_finished)
  855. connection_ap_handshake_socks_reply(entry_conn, NULL, 0, 0);
  856. /* Was it a linked dir conn? If so, a dir request just started to
  857. * fetch something; this could be a bootstrap status milestone. */
  858. log_debug(LD_APP, "considering");
  859. if (TO_CONN(conn)->linked_conn &&
  860. TO_CONN(conn)->linked_conn->type == CONN_TYPE_DIR) {
  861. connection_t *dirconn = TO_CONN(conn)->linked_conn;
  862. log_debug(LD_APP, "it is! %d", dirconn->purpose);
  863. switch (dirconn->purpose) {
  864. case DIR_PURPOSE_FETCH_CERTIFICATE:
  865. if (consensus_is_waiting_for_certs())
  866. control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_KEYS, 0);
  867. break;
  868. case DIR_PURPOSE_FETCH_CONSENSUS:
  869. control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_STATUS, 0);
  870. break;
  871. case DIR_PURPOSE_FETCH_SERVERDESC:
  872. control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS,
  873. count_loading_descriptors_progress());
  874. break;
  875. }
  876. }
  877. /* This is definitely a success, so forget about any pending data we
  878. * had sent. */
  879. if (entry_conn->pending_optimistic_data) {
  880. generic_buffer_free(entry_conn->pending_optimistic_data);
  881. entry_conn->pending_optimistic_data = NULL;
  882. }
  883. /* handle anything that might have queued */
  884. if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
  885. /* (We already sent an end cell if possible) */
  886. connection_mark_for_close(TO_CONN(conn));
  887. return 0;
  888. }
  889. return 0;
  890. }
  891. if (conn->_base.type == CONN_TYPE_AP &&
  892. rh->command == RELAY_COMMAND_RESOLVED) {
  893. int ttl;
  894. int answer_len;
  895. uint8_t answer_type;
  896. entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
  897. if (conn->_base.state != AP_CONN_STATE_RESOLVE_WAIT) {
  898. log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a 'resolved' cell while "
  899. "not in state resolve_wait. Dropping.");
  900. return 0;
  901. }
  902. tor_assert(SOCKS_COMMAND_IS_RESOLVE(entry_conn->socks_request->command));
  903. answer_len = cell->payload[RELAY_HEADER_SIZE+1];
  904. if (rh->length < 2 || answer_len+2>rh->length) {
  905. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  906. "Dropping malformed 'resolved' cell");
  907. connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL);
  908. return 0;
  909. }
  910. answer_type = cell->payload[RELAY_HEADER_SIZE];
  911. if (rh->length >= answer_len+6)
  912. ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+
  913. 2+answer_len));
  914. else
  915. ttl = -1;
  916. if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) {
  917. uint32_t addr = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+2));
  918. if (get_options()->ClientDNSRejectInternalAddresses &&
  919. is_internal_IP(addr, 0)) {
  920. log_info(LD_APP,"Got a resolve with answer %s. Rejecting.",
  921. fmt_addr32(addr));
  922. connection_ap_handshake_socks_resolved(entry_conn,
  923. RESOLVED_TYPE_ERROR_TRANSIENT,
  924. 0, NULL, 0, TIME_MAX);
  925. connection_mark_unattached_ap(entry_conn,
  926. END_STREAM_REASON_TORPROTOCOL);
  927. return 0;
  928. }
  929. }
  930. connection_ap_handshake_socks_resolved(entry_conn,
  931. answer_type,
  932. cell->payload[RELAY_HEADER_SIZE+1], /*answer_len*/
  933. cell->payload+RELAY_HEADER_SIZE+2, /*answer*/
  934. ttl,
  935. -1);
  936. if (answer_type == RESOLVED_TYPE_IPV4 && answer_len == 4) {
  937. uint32_t addr = ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+2));
  938. remap_event_helper(entry_conn, addr);
  939. }
  940. connection_mark_unattached_ap(entry_conn,
  941. END_STREAM_REASON_DONE |
  942. END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
  943. return 0;
  944. }
  945. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  946. "Got an unexpected relay command %d, in state %d (%s). Dropping.",
  947. rh->command, conn->_base.state,
  948. conn_state_to_string(conn->_base.type, conn->_base.state));
  949. return 0; /* for forward compatibility, don't kill the circuit */
  950. // connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
  951. // connection_mark_for_close(conn);
  952. // return -1;
  953. }
  954. /** An incoming relay cell has arrived on circuit <b>circ</b>. If
  955. * <b>conn</b> is NULL this is a control cell, else <b>cell</b> is
  956. * destined for <b>conn</b>.
  957. *
  958. * If <b>layer_hint</b> is defined, then we're the origin of the
  959. * circuit, and it specifies the hop that packaged <b>cell</b>.
  960. *
  961. * Return -reason if you want to warn and tear down the circuit, else 0.
  962. */
  963. static int
  964. connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
  965. edge_connection_t *conn,
  966. crypt_path_t *layer_hint)
  967. {
  968. static int num_seen=0;
  969. relay_header_t rh;
  970. unsigned domain = layer_hint?LD_APP:LD_EXIT;
  971. int reason;
  972. int optimistic_data = 0; /* Set to 1 if we receive data on a stream
  973. * that's in the EXIT_CONN_STATE_RESOLVING
  974. * or EXIT_CONN_STATE_CONNECTING states. */
  975. tor_assert(cell);
  976. tor_assert(circ);
  977. relay_header_unpack(&rh, cell->payload);
  978. // log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id);
  979. num_seen++;
  980. log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).",
  981. num_seen, rh.command, rh.stream_id);
  982. if (rh.length > RELAY_PAYLOAD_SIZE) {
  983. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  984. "Relay cell length field too long. Closing circuit.");
  985. return - END_CIRC_REASON_TORPROTOCOL;
  986. }
  987. /* either conn is NULL, in which case we've got a control cell, or else
  988. * conn points to the recognized stream. */
  989. if (conn && !connection_state_is_open(TO_CONN(conn))) {
  990. if (conn->_base.type == CONN_TYPE_EXIT &&
  991. (conn->_base.state == EXIT_CONN_STATE_CONNECTING ||
  992. conn->_base.state == EXIT_CONN_STATE_RESOLVING) &&
  993. rh.command == RELAY_COMMAND_DATA) {
  994. /* Allow DATA cells to be delivered to an exit node in state
  995. * EXIT_CONN_STATE_CONNECTING or EXIT_CONN_STATE_RESOLVING.
  996. * This speeds up HTTP, for example. */
  997. optimistic_data = 1;
  998. } else {
  999. return connection_edge_process_relay_cell_not_open(
  1000. &rh, cell, circ, conn, layer_hint);
  1001. }
  1002. }
  1003. switch (rh.command) {
  1004. case RELAY_COMMAND_DROP:
  1005. // log_info(domain,"Got a relay-level padding cell. Dropping.");
  1006. return 0;
  1007. case RELAY_COMMAND_BEGIN:
  1008. case RELAY_COMMAND_BEGIN_DIR:
  1009. if (layer_hint &&
  1010. circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
  1011. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1012. "Relay begin request unsupported at AP. Dropping.");
  1013. return 0;
  1014. }
  1015. if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED &&
  1016. layer_hint != TO_ORIGIN_CIRCUIT(circ)->cpath->prev) {
  1017. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1018. "Relay begin request to Hidden Service "
  1019. "from intermediary node. Dropping.");
  1020. return 0;
  1021. }
  1022. if (conn) {
  1023. log_fn(LOG_PROTOCOL_WARN, domain,
  1024. "Begin cell for known stream. Dropping.");
  1025. return 0;
  1026. }
  1027. if (rh.command == RELAY_COMMAND_BEGIN_DIR) {
  1028. /* Assign this circuit and its app-ward OR connection a unique ID,
  1029. * so that we can measure download times. The local edge and dir
  1030. * connection will be assigned the same ID when they are created
  1031. * and linked. */
  1032. static uint64_t next_id = 0;
  1033. circ->dirreq_id = ++next_id;
  1034. TO_CONN(TO_OR_CIRCUIT(circ)->p_conn)->dirreq_id = circ->dirreq_id;
  1035. }
  1036. return connection_exit_begin_conn(cell, circ);
  1037. case RELAY_COMMAND_DATA:
  1038. ++stats_n_data_cells_received;
  1039. if (( layer_hint && --layer_hint->deliver_window < 0) ||
  1040. (!layer_hint && --circ->deliver_window < 0)) {
  1041. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1042. "(relay data) circ deliver_window below 0. Killing.");
  1043. if (conn) {
  1044. /* XXXX Do we actually need to do this? Will killing the circuit
  1045. * not send an END and mark the stream for close as appropriate? */
  1046. connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
  1047. connection_mark_for_close(TO_CONN(conn));
  1048. }
  1049. return -END_CIRC_REASON_TORPROTOCOL;
  1050. }
  1051. log_debug(domain,"circ deliver_window now %d.", layer_hint ?
  1052. layer_hint->deliver_window : circ->deliver_window);
  1053. circuit_consider_sending_sendme(circ, layer_hint);
  1054. if (!conn) {
  1055. log_info(domain,"data cell dropped, unknown stream (streamid %d).",
  1056. rh.stream_id);
  1057. return 0;
  1058. }
  1059. if (--conn->deliver_window < 0) { /* is it below 0 after decrement? */
  1060. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1061. "(relay data) conn deliver_window below 0. Killing.");
  1062. return -END_CIRC_REASON_TORPROTOCOL;
  1063. }
  1064. stats_n_data_bytes_received += rh.length;
  1065. connection_write_to_buf((char*)(cell->payload + RELAY_HEADER_SIZE),
  1066. rh.length, TO_CONN(conn));
  1067. if (!optimistic_data) {
  1068. /* Only send a SENDME if we're not getting optimistic data; otherwise
  1069. * a SENDME could arrive before the CONNECTED.
  1070. */
  1071. connection_edge_consider_sending_sendme(conn);
  1072. }
  1073. return 0;
  1074. case RELAY_COMMAND_END:
  1075. reason = rh.length > 0 ?
  1076. get_uint8(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC;
  1077. if (!conn) {
  1078. log_info(domain,"end cell (%s) dropped, unknown stream.",
  1079. stream_end_reason_to_string(reason));
  1080. return 0;
  1081. }
  1082. /* XXX add to this log_fn the exit node's nickname? */
  1083. log_info(domain,"%d: end cell (%s) for stream %d. Removing stream.",
  1084. conn->_base.s,
  1085. stream_end_reason_to_string(reason),
  1086. conn->stream_id);
  1087. if (conn->_base.type == CONN_TYPE_AP) {
  1088. entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
  1089. if (entry_conn->socks_request &&
  1090. !entry_conn->socks_request->has_finished)
  1091. log_warn(LD_BUG,
  1092. "open stream hasn't sent socks answer yet? Closing.");
  1093. }
  1094. /* We just *got* an end; no reason to send one. */
  1095. conn->edge_has_sent_end = 1;
  1096. if (!conn->end_reason)
  1097. conn->end_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
  1098. if (!conn->_base.marked_for_close) {
  1099. /* only mark it if not already marked. it's possible to
  1100. * get the 'end' right around when the client hangs up on us. */
  1101. connection_mark_and_flush(TO_CONN(conn));
  1102. }
  1103. return 0;
  1104. case RELAY_COMMAND_EXTEND: {
  1105. static uint64_t total_n_extend=0, total_nonearly=0;
  1106. total_n_extend++;
  1107. if (conn) {
  1108. log_fn(LOG_PROTOCOL_WARN, domain,
  1109. "'extend' cell received for non-zero stream. Dropping.");
  1110. return 0;
  1111. }
  1112. if (cell->command != CELL_RELAY_EARLY &&
  1113. !networkstatus_get_param(NULL,"AllowNonearlyExtend",0,0,1)) {
  1114. #define EARLY_WARNING_INTERVAL 3600
  1115. static ratelim_t early_warning_limit =
  1116. RATELIM_INIT(EARLY_WARNING_INTERVAL);
  1117. char *m;
  1118. if (cell->command == CELL_RELAY) {
  1119. ++total_nonearly;
  1120. if ((m = rate_limit_log(&early_warning_limit, approx_time()))) {
  1121. double percentage = ((double)total_nonearly)/total_n_extend;
  1122. percentage *= 100;
  1123. log_fn(LOG_PROTOCOL_WARN, domain, "EXTEND cell received, "
  1124. "but not via RELAY_EARLY. Dropping.%s", m);
  1125. log_fn(LOG_PROTOCOL_WARN, domain, " (We have dropped %.02f%% of "
  1126. "all EXTEND cells for this reason)", percentage);
  1127. tor_free(m);
  1128. }
  1129. } else {
  1130. log_fn(LOG_WARN, domain,
  1131. "EXTEND cell received, in a cell with type %d! Dropping.",
  1132. cell->command);
  1133. }
  1134. return 0;
  1135. }
  1136. return circuit_extend(cell, circ);
  1137. }
  1138. case RELAY_COMMAND_EXTENDED:
  1139. if (!layer_hint) {
  1140. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1141. "'extended' unsupported at non-origin. Dropping.");
  1142. return 0;
  1143. }
  1144. log_debug(domain,"Got an extended cell! Yay.");
  1145. if ((reason = circuit_finish_handshake(TO_ORIGIN_CIRCUIT(circ),
  1146. CELL_CREATED,
  1147. cell->payload+RELAY_HEADER_SIZE)) < 0) {
  1148. log_warn(domain,"circuit_finish_handshake failed.");
  1149. return reason;
  1150. }
  1151. if ((reason=circuit_send_next_onion_skin(TO_ORIGIN_CIRCUIT(circ)))<0) {
  1152. log_info(domain,"circuit_send_next_onion_skin() failed.");
  1153. return reason;
  1154. }
  1155. return 0;
  1156. case RELAY_COMMAND_TRUNCATE:
  1157. if (layer_hint) {
  1158. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1159. "'truncate' unsupported at origin. Dropping.");
  1160. return 0;
  1161. }
  1162. if (circ->n_conn) {
  1163. uint8_t trunc_reason = *(uint8_t*)(cell->payload + RELAY_HEADER_SIZE);
  1164. circuit_clear_cell_queue(circ, circ->n_conn);
  1165. connection_or_send_destroy(circ->n_circ_id, circ->n_conn,
  1166. trunc_reason);
  1167. circuit_set_n_circid_orconn(circ, 0, NULL);
  1168. }
  1169. log_debug(LD_EXIT, "Processed 'truncate', replying.");
  1170. {
  1171. char payload[1];
  1172. payload[0] = (char)END_CIRC_REASON_REQUESTED;
  1173. relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED,
  1174. payload, sizeof(payload), NULL);
  1175. }
  1176. return 0;
  1177. case RELAY_COMMAND_TRUNCATED:
  1178. if (!layer_hint) {
  1179. log_fn(LOG_PROTOCOL_WARN, LD_EXIT,
  1180. "'truncated' unsupported at non-origin. Dropping.");
  1181. return 0;
  1182. }
  1183. circuit_truncated(TO_ORIGIN_CIRCUIT(circ), layer_hint);
  1184. return 0;
  1185. case RELAY_COMMAND_CONNECTED:
  1186. if (conn) {
  1187. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1188. "'connected' unsupported while open. Closing circ.");
  1189. return -END_CIRC_REASON_TORPROTOCOL;
  1190. }
  1191. log_info(domain,
  1192. "'connected' received, no conn attached anymore. Ignoring.");
  1193. return 0;
  1194. case RELAY_COMMAND_SENDME:
  1195. if (!conn) {
  1196. if (layer_hint) {
  1197. layer_hint->package_window += CIRCWINDOW_INCREMENT;
  1198. log_debug(LD_APP,"circ-level sendme at origin, packagewindow %d.",
  1199. layer_hint->package_window);
  1200. circuit_resume_edge_reading(circ, layer_hint);
  1201. } else {
  1202. circ->package_window += CIRCWINDOW_INCREMENT;
  1203. log_debug(LD_APP,
  1204. "circ-level sendme at non-origin, packagewindow %d.",
  1205. circ->package_window);
  1206. circuit_resume_edge_reading(circ, layer_hint);
  1207. }
  1208. return 0;
  1209. }
  1210. conn->package_window += STREAMWINDOW_INCREMENT;
  1211. log_debug(domain,"stream-level sendme, packagewindow now %d.",
  1212. conn->package_window);
  1213. if (circuit_queue_streams_are_blocked(circ)) {
  1214. /* Still waiting for queue to flush; don't touch conn */
  1215. return 0;
  1216. }
  1217. connection_start_reading(TO_CONN(conn));
  1218. /* handle whatever might still be on the inbuf */
  1219. if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
  1220. /* (We already sent an end cell if possible) */
  1221. connection_mark_for_close(TO_CONN(conn));
  1222. return 0;
  1223. }
  1224. return 0;
  1225. case RELAY_COMMAND_RESOLVE:
  1226. if (layer_hint) {
  1227. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1228. "resolve request unsupported at AP; dropping.");
  1229. return 0;
  1230. } else if (conn) {
  1231. log_fn(LOG_PROTOCOL_WARN, domain,
  1232. "resolve request for known stream; dropping.");
  1233. return 0;
  1234. } else if (circ->purpose != CIRCUIT_PURPOSE_OR) {
  1235. log_fn(LOG_PROTOCOL_WARN, domain,
  1236. "resolve request on circ with purpose %d; dropping",
  1237. circ->purpose);
  1238. return 0;
  1239. }
  1240. connection_exit_begin_resolve(cell, TO_OR_CIRCUIT(circ));
  1241. return 0;
  1242. case RELAY_COMMAND_RESOLVED:
  1243. if (conn) {
  1244. log_fn(LOG_PROTOCOL_WARN, domain,
  1245. "'resolved' unsupported while open. Closing circ.");
  1246. return -END_CIRC_REASON_TORPROTOCOL;
  1247. }
  1248. log_info(domain,
  1249. "'resolved' received, no conn attached anymore. Ignoring.");
  1250. return 0;
  1251. case RELAY_COMMAND_ESTABLISH_INTRO:
  1252. case RELAY_COMMAND_ESTABLISH_RENDEZVOUS:
  1253. case RELAY_COMMAND_INTRODUCE1:
  1254. case RELAY_COMMAND_INTRODUCE2:
  1255. case RELAY_COMMAND_INTRODUCE_ACK:
  1256. case RELAY_COMMAND_RENDEZVOUS1:
  1257. case RELAY_COMMAND_RENDEZVOUS2:
  1258. case RELAY_COMMAND_INTRO_ESTABLISHED:
  1259. case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
  1260. rend_process_relay_cell(circ, layer_hint,
  1261. rh.command, rh.length,
  1262. cell->payload+RELAY_HEADER_SIZE);
  1263. return 0;
  1264. }
  1265. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1266. "Received unknown relay command %d. Perhaps the other side is using "
  1267. "a newer version of Tor? Dropping.",
  1268. rh.command);
  1269. return 0; /* for forward compatibility, don't kill the circuit */
  1270. }
  1271. /** How many relay_data cells have we built, ever? */
  1272. uint64_t stats_n_data_cells_packaged = 0;
  1273. /** How many bytes of data have we put in relay_data cells have we built,
  1274. * ever? This would be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if
  1275. * every relay cell we ever sent were completely full of data. */
  1276. uint64_t stats_n_data_bytes_packaged = 0;
  1277. /** How many relay_data cells have we received, ever? */
  1278. uint64_t stats_n_data_cells_received = 0;
  1279. /** How many bytes of data have we received relay_data cells, ever? This would
  1280. * be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if every relay cell we
  1281. * ever received were completely full of data. */
  1282. uint64_t stats_n_data_bytes_received = 0;
  1283. /** If <b>conn</b> has an entire relay payload of bytes on its inbuf (or
  1284. * <b>package_partial</b> is true), and the appropriate package windows aren't
  1285. * empty, grab a cell and send it down the circuit.
  1286. *
  1287. * If *<b>max_cells</b> is given, package no more than max_cells. Decrement
  1288. * *<b>max_cells</b> by the number of cells packaged.
  1289. *
  1290. * Return -1 (and send a RELAY_COMMAND_END cell if necessary) if conn should
  1291. * be marked for close, else return 0.
  1292. */
  1293. int
  1294. connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
  1295. int *max_cells)
  1296. {
  1297. size_t bytes_to_process, length;
  1298. char payload[CELL_PAYLOAD_SIZE];
  1299. circuit_t *circ;
  1300. const unsigned domain = conn->_base.type == CONN_TYPE_AP ? LD_APP : LD_EXIT;
  1301. int sending_from_optimistic = 0;
  1302. const int sending_optimistically =
  1303. conn->_base.type == CONN_TYPE_AP &&
  1304. conn->_base.state != AP_CONN_STATE_OPEN;
  1305. entry_connection_t *entry_conn =
  1306. conn->_base.type == CONN_TYPE_AP ? EDGE_TO_ENTRY_CONN(conn) : NULL;
  1307. crypt_path_t *cpath_layer = conn->cpath_layer;
  1308. tor_assert(conn);
  1309. if (conn->_base.marked_for_close) {
  1310. log_warn(LD_BUG,
  1311. "called on conn that's already marked for close at %s:%d.",
  1312. conn->_base.marked_for_close_file, conn->_base.marked_for_close);
  1313. return 0;
  1314. }
  1315. if (max_cells && *max_cells <= 0)
  1316. return 0;
  1317. repeat_connection_edge_package_raw_inbuf:
  1318. circ = circuit_get_by_edge_conn(conn);
  1319. if (!circ) {
  1320. log_info(domain,"conn has no circuit! Closing.");
  1321. conn->end_reason = END_STREAM_REASON_CANT_ATTACH;
  1322. return -1;
  1323. }
  1324. if (circuit_consider_stop_edge_reading(circ, cpath_layer))
  1325. return 0;
  1326. if (conn->package_window <= 0) {
  1327. log_info(domain,"called with package_window %d. Skipping.",
  1328. conn->package_window);
  1329. connection_stop_reading(TO_CONN(conn));
  1330. return 0;
  1331. }
  1332. sending_from_optimistic = entry_conn &&
  1333. entry_conn->sending_optimistic_data != NULL;
  1334. if (PREDICT_UNLIKELY(sending_from_optimistic)) {
  1335. bytes_to_process = generic_buffer_len(entry_conn->sending_optimistic_data);
  1336. if (PREDICT_UNLIKELY(!bytes_to_process)) {
  1337. log_warn(LD_BUG, "sending_optimistic_data was non-NULL but empty");
  1338. bytes_to_process = connection_get_inbuf_len(TO_CONN(conn));
  1339. sending_from_optimistic = 0;
  1340. }
  1341. } else {
  1342. bytes_to_process = connection_get_inbuf_len(TO_CONN(conn));
  1343. }
  1344. if (!bytes_to_process)
  1345. return 0;
  1346. if (!package_partial && bytes_to_process < RELAY_PAYLOAD_SIZE)
  1347. return 0;
  1348. if (bytes_to_process > RELAY_PAYLOAD_SIZE) {
  1349. length = RELAY_PAYLOAD_SIZE;
  1350. } else {
  1351. length = bytes_to_process;
  1352. }
  1353. stats_n_data_bytes_packaged += length;
  1354. stats_n_data_cells_packaged += 1;
  1355. if (PREDICT_UNLIKELY(sending_from_optimistic)) {
  1356. /* XXX023 We could be more efficient here by sometimes packing
  1357. * previously-sent optimistic data in the same cell with data
  1358. * from the inbuf. */
  1359. generic_buffer_get(entry_conn->sending_optimistic_data, payload, length);
  1360. if (!generic_buffer_len(entry_conn->sending_optimistic_data)) {
  1361. generic_buffer_free(entry_conn->sending_optimistic_data);
  1362. entry_conn->sending_optimistic_data = NULL;
  1363. }
  1364. } else {
  1365. connection_fetch_from_buf(payload, length, TO_CONN(conn));
  1366. }
  1367. log_debug(domain,"(%d) Packaging %d bytes (%d waiting).", conn->_base.s,
  1368. (int)length, (int)connection_get_inbuf_len(TO_CONN(conn)));
  1369. if (sending_optimistically && !sending_from_optimistic) {
  1370. /* This is new optimistic data; remember it in case we need to detach and
  1371. retry */
  1372. if (!entry_conn->pending_optimistic_data)
  1373. entry_conn->pending_optimistic_data = generic_buffer_new();
  1374. generic_buffer_add(entry_conn->pending_optimistic_data, payload, length);
  1375. }
  1376. if (connection_edge_send_command(conn, RELAY_COMMAND_DATA,
  1377. payload, length) < 0 )
  1378. /* circuit got marked for close, don't continue, don't need to mark conn */
  1379. return 0;
  1380. if (!cpath_layer) { /* non-rendezvous exit */
  1381. tor_assert(circ->package_window > 0);
  1382. circ->package_window--;
  1383. } else { /* we're an AP, or an exit on a rendezvous circ */
  1384. tor_assert(cpath_layer->package_window > 0);
  1385. cpath_layer->package_window--;
  1386. }
  1387. if (--conn->package_window <= 0) { /* is it 0 after decrement? */
  1388. connection_stop_reading(TO_CONN(conn));
  1389. log_debug(domain,"conn->package_window reached 0.");
  1390. circuit_consider_stop_edge_reading(circ, cpath_layer);
  1391. return 0; /* don't process the inbuf any more */
  1392. }
  1393. log_debug(domain,"conn->package_window is now %d",conn->package_window);
  1394. if (max_cells) {
  1395. *max_cells -= 1;
  1396. if (*max_cells <= 0)
  1397. return 0;
  1398. }
  1399. /* handle more if there's more, or return 0 if there isn't */
  1400. goto repeat_connection_edge_package_raw_inbuf;
  1401. }
  1402. /** Called when we've just received a relay data cell, when
  1403. * we've just finished flushing all bytes to stream <b>conn</b>,
  1404. * or when we've flushed *some* bytes to the stream <b>conn</b>.
  1405. *
  1406. * If conn->outbuf is not too full, and our deliver window is
  1407. * low, send back a suitable number of stream-level sendme cells.
  1408. */
  1409. void
  1410. connection_edge_consider_sending_sendme(edge_connection_t *conn)
  1411. {
  1412. circuit_t *circ;
  1413. if (connection_outbuf_too_full(TO_CONN(conn)))
  1414. return;
  1415. circ = circuit_get_by_edge_conn(conn);
  1416. if (!circ) {
  1417. /* this can legitimately happen if the destroy has already
  1418. * arrived and torn down the circuit */
  1419. log_info(LD_APP,"No circuit associated with conn. Skipping.");
  1420. return;
  1421. }
  1422. while (conn->deliver_window <= STREAMWINDOW_START - STREAMWINDOW_INCREMENT) {
  1423. log_debug(conn->_base.type == CONN_TYPE_AP ?LD_APP:LD_EXIT,
  1424. "Outbuf %d, Queuing stream sendme.",
  1425. (int)conn->_base.outbuf_flushlen);
  1426. conn->deliver_window += STREAMWINDOW_INCREMENT;
  1427. if (connection_edge_send_command(conn, RELAY_COMMAND_SENDME,
  1428. NULL, 0) < 0) {
  1429. log_warn(LD_APP,"connection_edge_send_command failed. Skipping.");
  1430. return; /* the circuit's closed, don't continue */
  1431. }
  1432. }
  1433. }
  1434. /** The circuit <b>circ</b> has received a circuit-level sendme
  1435. * (on hop <b>layer_hint</b>, if we're the OP). Go through all the
  1436. * attached streams and let them resume reading and packaging, if
  1437. * their stream windows allow it.
  1438. */
  1439. static void
  1440. circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
  1441. {
  1442. if (circuit_queue_streams_are_blocked(circ)) {
  1443. log_debug(layer_hint?LD_APP:LD_EXIT,"Too big queue, no resuming");
  1444. return;
  1445. }
  1446. log_debug(layer_hint?LD_APP:LD_EXIT,"resuming");
  1447. if (CIRCUIT_IS_ORIGIN(circ))
  1448. circuit_resume_edge_reading_helper(TO_ORIGIN_CIRCUIT(circ)->p_streams,
  1449. circ, layer_hint);
  1450. else
  1451. circuit_resume_edge_reading_helper(TO_OR_CIRCUIT(circ)->n_streams,
  1452. circ, layer_hint);
  1453. }
  1454. /** A helper function for circuit_resume_edge_reading() above.
  1455. * The arguments are the same, except that <b>conn</b> is the head
  1456. * of a linked list of edge streams that should each be considered.
  1457. */
  1458. static int
  1459. circuit_resume_edge_reading_helper(edge_connection_t *first_conn,
  1460. circuit_t *circ,
  1461. crypt_path_t *layer_hint)
  1462. {
  1463. edge_connection_t *conn;
  1464. int n_packaging_streams, n_streams_left;
  1465. int packaged_this_round;
  1466. int cells_on_queue;
  1467. int cells_per_conn;
  1468. edge_connection_t *chosen_stream = NULL;
  1469. /* How many cells do we have space for? It will be the minimum of
  1470. * the number needed to exhaust the package window, and the minimum
  1471. * needed to fill the cell queue. */
  1472. int max_to_package = circ->package_window;
  1473. if (CIRCUIT_IS_ORIGIN(circ)) {
  1474. cells_on_queue = circ->n_conn_cells.n;
  1475. } else {
  1476. or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
  1477. cells_on_queue = or_circ->p_conn_cells.n;
  1478. }
  1479. if (CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue < max_to_package)
  1480. max_to_package = CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue;
  1481. /* Once we used to start listening on the streams in the order they
  1482. * appeared in the linked list. That leads to starvation on the
  1483. * streams that appeared later on the list, since the first streams
  1484. * would always get to read first. Instead, we just pick a random
  1485. * stream on the list, and enable reading for streams starting at that
  1486. * point (and wrapping around as if the list were circular). It would
  1487. * probably be better to actually remember which streams we've
  1488. * serviced in the past, but this is simple and effective. */
  1489. /* Select a stream uniformly at random from the linked list. We
  1490. * don't need cryptographic randomness here. */
  1491. {
  1492. int num_streams = 0;
  1493. for (conn = first_conn; conn; conn = conn->next_stream) {
  1494. num_streams++;
  1495. if ((tor_weak_random() % num_streams)==0)
  1496. chosen_stream = conn;
  1497. /* Invariant: chosen_stream has been chosen uniformly at random from
  1498. * among the first num_streams streams on first_conn. */
  1499. }
  1500. }
  1501. /* Count how many non-marked streams there are that have anything on
  1502. * their inbuf, and enable reading on all of the connections. */
  1503. n_packaging_streams = 0;
  1504. /* Activate reading starting from the chosen stream */
  1505. for (conn=chosen_stream; conn; conn = conn->next_stream) {
  1506. /* Start reading for the streams starting from here */
  1507. if (conn->_base.marked_for_close || conn->package_window <= 0)
  1508. continue;
  1509. if (!layer_hint || conn->cpath_layer == layer_hint) {
  1510. connection_start_reading(TO_CONN(conn));
  1511. if (connection_get_inbuf_len(TO_CONN(conn)) > 0)
  1512. ++n_packaging_streams;
  1513. }
  1514. }
  1515. /* Go back and do the ones we skipped, circular-style */
  1516. for (conn = first_conn; conn != chosen_stream; conn = conn->next_stream) {
  1517. if (conn->_base.marked_for_close || conn->package_window <= 0)
  1518. continue;
  1519. if (!layer_hint || conn->cpath_layer == layer_hint) {
  1520. connection_start_reading(TO_CONN(conn));
  1521. if (connection_get_inbuf_len(TO_CONN(conn)) > 0)
  1522. ++n_packaging_streams;
  1523. }
  1524. }
  1525. if (n_packaging_streams == 0) /* avoid divide-by-zero */
  1526. return 0;
  1527. again:
  1528. cells_per_conn = CEIL_DIV(max_to_package, n_packaging_streams);
  1529. packaged_this_round = 0;
  1530. n_streams_left = 0;
  1531. /* Iterate over all connections. Package up to cells_per_conn cells on
  1532. * each. Update packaged_this_round with the total number of cells
  1533. * packaged, and n_streams_left with the number that still have data to
  1534. * package.
  1535. */
  1536. for (conn=first_conn; conn; conn=conn->next_stream) {
  1537. if (conn->_base.marked_for_close || conn->package_window <= 0)
  1538. continue;
  1539. if (!layer_hint || conn->cpath_layer == layer_hint) {
  1540. int n = cells_per_conn, r;
  1541. /* handle whatever might still be on the inbuf */
  1542. r = connection_edge_package_raw_inbuf(conn, 1, &n);
  1543. /* Note how many we packaged */
  1544. packaged_this_round += (cells_per_conn-n);
  1545. if (r<0) {
  1546. /* Problem while packaging. (We already sent an end cell if
  1547. * possible) */
  1548. connection_mark_for_close(TO_CONN(conn));
  1549. continue;
  1550. }
  1551. /* If there's still data to read, we'll be coming back to this stream. */
  1552. if (connection_get_inbuf_len(TO_CONN(conn)))
  1553. ++n_streams_left;
  1554. /* If the circuit won't accept any more data, return without looking
  1555. * at any more of the streams. Any connections that should be stopped
  1556. * have already been stopped by connection_edge_package_raw_inbuf. */
  1557. if (circuit_consider_stop_edge_reading(circ, layer_hint))
  1558. return -1;
  1559. /* XXXX should we also stop immediately if we fill up the cell queue?
  1560. * Probably. */
  1561. }
  1562. }
  1563. /* If we made progress, and we are willing to package more, and there are
  1564. * any streams left that want to package stuff... try again!
  1565. */
  1566. if (packaged_this_round && packaged_this_round < max_to_package &&
  1567. n_streams_left) {
  1568. max_to_package -= packaged_this_round;
  1569. n_packaging_streams = n_streams_left;
  1570. goto again;
  1571. }
  1572. return 0;
  1573. }
  1574. /** Check if the package window for <b>circ</b> is empty (at
  1575. * hop <b>layer_hint</b> if it's defined).
  1576. *
  1577. * If yes, tell edge streams to stop reading and return 1.
  1578. * Else return 0.
  1579. */
  1580. static int
  1581. circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
  1582. {
  1583. edge_connection_t *conn = NULL;
  1584. unsigned domain = layer_hint ? LD_APP : LD_EXIT;
  1585. if (!layer_hint) {
  1586. or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
  1587. log_debug(domain,"considering circ->package_window %d",
  1588. circ->package_window);
  1589. if (circ->package_window <= 0) {
  1590. log_debug(domain,"yes, not-at-origin. stopped.");
  1591. for (conn = or_circ->n_streams; conn; conn=conn->next_stream)
  1592. connection_stop_reading(TO_CONN(conn));
  1593. return 1;
  1594. }
  1595. return 0;
  1596. }
  1597. /* else, layer hint is defined, use it */
  1598. log_debug(domain,"considering layer_hint->package_window %d",
  1599. layer_hint->package_window);
  1600. if (layer_hint->package_window <= 0) {
  1601. log_debug(domain,"yes, at-origin. stopped.");
  1602. for (conn = TO_ORIGIN_CIRCUIT(circ)->p_streams; conn;
  1603. conn=conn->next_stream) {
  1604. if (conn->cpath_layer == layer_hint)
  1605. connection_stop_reading(TO_CONN(conn));
  1606. }
  1607. return 1;
  1608. }
  1609. return 0;
  1610. }
  1611. /** Check if the deliver_window for circuit <b>circ</b> (at hop
  1612. * <b>layer_hint</b> if it's defined) is low enough that we should
  1613. * send a circuit-level sendme back down the circuit. If so, send
  1614. * enough sendmes that the window would be overfull if we sent any
  1615. * more.
  1616. */
  1617. static void
  1618. circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint)
  1619. {
  1620. // log_fn(LOG_INFO,"Considering: layer_hint is %s",
  1621. // layer_hint ? "defined" : "null");
  1622. while ((layer_hint ? layer_hint->deliver_window : circ->deliver_window) <=
  1623. CIRCWINDOW_START - CIRCWINDOW_INCREMENT) {
  1624. log_debug(LD_CIRC,"Queuing circuit sendme.");
  1625. if (layer_hint)
  1626. layer_hint->deliver_window += CIRCWINDOW_INCREMENT;
  1627. else
  1628. circ->deliver_window += CIRCWINDOW_INCREMENT;
  1629. if (relay_send_command_from_edge(0, circ, RELAY_COMMAND_SENDME,
  1630. NULL, 0, layer_hint) < 0) {
  1631. log_warn(LD_CIRC,
  1632. "relay_send_command_from_edge failed. Circuit's closed.");
  1633. return; /* the circuit's closed, don't continue */
  1634. }
  1635. }
  1636. }
  1637. #ifdef ACTIVE_CIRCUITS_PARANOIA
  1638. #define assert_active_circuits_ok_paranoid(conn) \
  1639. assert_active_circuits_ok(conn)
  1640. #else
  1641. #define assert_active_circuits_ok_paranoid(conn)
  1642. #endif
  1643. /** The total number of cells we have allocated from the memory pool. */
  1644. static int total_cells_allocated = 0;
  1645. /** A memory pool to allocate packed_cell_t objects. */
  1646. static mp_pool_t *cell_pool = NULL;
  1647. /** Memory pool to allocate insertion_time_elem_t objects used for cell
  1648. * statistics. */
  1649. static mp_pool_t *it_pool = NULL;
  1650. /** Allocate structures to hold cells. */
  1651. void
  1652. init_cell_pool(void)
  1653. {
  1654. tor_assert(!cell_pool);
  1655. cell_pool = mp_pool_new(sizeof(packed_cell_t), 128*1024);
  1656. }
  1657. /** Free all storage used to hold cells (and insertion times if we measure
  1658. * cell statistics). */
  1659. void
  1660. free_cell_pool(void)
  1661. {
  1662. /* Maybe we haven't called init_cell_pool yet; need to check for it. */
  1663. if (cell_pool) {
  1664. mp_pool_destroy(cell_pool);
  1665. cell_pool = NULL;
  1666. }
  1667. if (it_pool) {
  1668. mp_pool_destroy(it_pool);
  1669. it_pool = NULL;
  1670. }
  1671. }
  1672. /** Free excess storage in cell pool. */
  1673. void
  1674. clean_cell_pool(void)
  1675. {
  1676. tor_assert(cell_pool);
  1677. mp_pool_clean(cell_pool, 0, 1);
  1678. }
  1679. /** Release storage held by <b>cell</b>. */
  1680. static INLINE void
  1681. packed_cell_free_unchecked(packed_cell_t *cell)
  1682. {
  1683. --total_cells_allocated;
  1684. mp_pool_release(cell);
  1685. }
  1686. /** Allocate and return a new packed_cell_t. */
  1687. static INLINE packed_cell_t *
  1688. packed_cell_alloc(void)
  1689. {
  1690. ++total_cells_allocated;
  1691. return mp_pool_get(cell_pool);
  1692. }
  1693. /** Log current statistics for cell pool allocation at log level
  1694. * <b>severity</b>. */
  1695. void
  1696. dump_cell_pool_usage(int severity)
  1697. {
  1698. circuit_t *c;
  1699. int n_circs = 0;
  1700. int n_cells = 0;
  1701. for (c = _circuit_get_global_list(); c; c = c->next) {
  1702. n_cells += c->n_conn_cells.n;
  1703. if (!CIRCUIT_IS_ORIGIN(c))
  1704. n_cells += TO_OR_CIRCUIT(c)->p_conn_cells.n;
  1705. ++n_circs;
  1706. }
  1707. log(severity, LD_MM, "%d cells allocated on %d circuits. %d cells leaked.",
  1708. n_cells, n_circs, total_cells_allocated - n_cells);
  1709. mp_pool_log_status(cell_pool, severity);
  1710. }
  1711. /** Allocate a new copy of packed <b>cell</b>. */
  1712. static INLINE packed_cell_t *
  1713. packed_cell_copy(const cell_t *cell)
  1714. {
  1715. packed_cell_t *c = packed_cell_alloc();
  1716. cell_pack(c, cell);
  1717. c->next = NULL;
  1718. return c;
  1719. }
  1720. /** Append <b>cell</b> to the end of <b>queue</b>. */
  1721. void
  1722. cell_queue_append(cell_queue_t *queue, packed_cell_t *cell)
  1723. {
  1724. if (queue->tail) {
  1725. tor_assert(!queue->tail->next);
  1726. queue->tail->next = cell;
  1727. } else {
  1728. queue->head = cell;
  1729. }
  1730. queue->tail = cell;
  1731. cell->next = NULL;
  1732. ++queue->n;
  1733. }
  1734. /** Append a newly allocated copy of <b>cell</b> to the end of <b>queue</b> */
  1735. void
  1736. cell_queue_append_packed_copy(cell_queue_t *queue, const cell_t *cell)
  1737. {
  1738. packed_cell_t *copy = packed_cell_copy(cell);
  1739. /* Remember the time when this cell was put in the queue. */
  1740. if (get_options()->CellStatistics) {
  1741. struct timeval now;
  1742. uint32_t added;
  1743. insertion_time_queue_t *it_queue = queue->insertion_times;
  1744. if (!it_pool)
  1745. it_pool = mp_pool_new(sizeof(insertion_time_elem_t), 1024);
  1746. tor_gettimeofday_cached(&now);
  1747. #define SECONDS_IN_A_DAY 86400L
  1748. added = (uint32_t)(((now.tv_sec % SECONDS_IN_A_DAY) * 100L)
  1749. + ((uint32_t)now.tv_usec / (uint32_t)10000L));
  1750. if (!it_queue) {
  1751. it_queue = tor_malloc_zero(sizeof(insertion_time_queue_t));
  1752. queue->insertion_times = it_queue;
  1753. }
  1754. if (it_queue->last && it_queue->last->insertion_time == added) {
  1755. it_queue->last->counter++;
  1756. } else {
  1757. insertion_time_elem_t *elem = mp_pool_get(it_pool);
  1758. elem->next = NULL;
  1759. elem->insertion_time = added;
  1760. elem->counter = 1;
  1761. if (it_queue->last) {
  1762. it_queue->last->next = elem;
  1763. it_queue->last = elem;
  1764. } else {
  1765. it_queue->first = it_queue->last = elem;
  1766. }
  1767. }
  1768. }
  1769. cell_queue_append(queue, copy);
  1770. }
  1771. /** Remove and free every cell in <b>queue</b>. */
  1772. void
  1773. cell_queue_clear(cell_queue_t *queue)
  1774. {
  1775. packed_cell_t *cell, *next;
  1776. cell = queue->head;
  1777. while (cell) {
  1778. next = cell->next;
  1779. packed_cell_free_unchecked(cell);
  1780. cell = next;
  1781. }
  1782. queue->head = queue->tail = NULL;
  1783. queue->n = 0;
  1784. if (queue->insertion_times) {
  1785. while (queue->insertion_times->first) {
  1786. insertion_time_elem_t *elem = queue->insertion_times->first;
  1787. queue->insertion_times->first = elem->next;
  1788. mp_pool_release(elem);
  1789. }
  1790. tor_free(queue->insertion_times);
  1791. }
  1792. }
  1793. /** Extract and return the cell at the head of <b>queue</b>; return NULL if
  1794. * <b>queue</b> is empty. */
  1795. static INLINE packed_cell_t *
  1796. cell_queue_pop(cell_queue_t *queue)
  1797. {
  1798. packed_cell_t *cell = queue->head;
  1799. if (!cell)
  1800. return NULL;
  1801. queue->head = cell->next;
  1802. if (cell == queue->tail) {
  1803. tor_assert(!queue->head);
  1804. queue->tail = NULL;
  1805. }
  1806. --queue->n;
  1807. return cell;
  1808. }
  1809. /** Return a pointer to the "next_active_on_{n,p}_conn" pointer of <b>circ</b>,
  1810. * depending on whether <b>conn</b> matches n_conn or p_conn. */
  1811. static INLINE circuit_t **
  1812. next_circ_on_conn_p(circuit_t *circ, or_connection_t *conn)
  1813. {
  1814. tor_assert(circ);
  1815. tor_assert(conn);
  1816. if (conn == circ->n_conn) {
  1817. return &circ->next_active_on_n_conn;
  1818. } else {
  1819. or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
  1820. tor_assert(conn == orcirc->p_conn);
  1821. return &orcirc->next_active_on_p_conn;
  1822. }
  1823. }
  1824. /** Return a pointer to the "prev_active_on_{n,p}_conn" pointer of <b>circ</b>,
  1825. * depending on whether <b>conn</b> matches n_conn or p_conn. */
  1826. static INLINE circuit_t **
  1827. prev_circ_on_conn_p(circuit_t *circ, or_connection_t *conn)
  1828. {
  1829. tor_assert(circ);
  1830. tor_assert(conn);
  1831. if (conn == circ->n_conn) {
  1832. return &circ->prev_active_on_n_conn;
  1833. } else {
  1834. or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
  1835. tor_assert(conn == orcirc->p_conn);
  1836. return &orcirc->prev_active_on_p_conn;
  1837. }
  1838. }
  1839. /** Helper for sorting cell_ewma_t values in their priority queue. */
  1840. static int
  1841. compare_cell_ewma_counts(const void *p1, const void *p2)
  1842. {
  1843. const cell_ewma_t *e1=p1, *e2=p2;
  1844. if (e1->cell_count < e2->cell_count)
  1845. return -1;
  1846. else if (e1->cell_count > e2->cell_count)
  1847. return 1;
  1848. else
  1849. return 0;
  1850. }
  1851. /** Given a cell_ewma_t, return a pointer to the circuit containing it. */
  1852. static circuit_t *
  1853. cell_ewma_to_circuit(cell_ewma_t *ewma)
  1854. {
  1855. if (ewma->is_for_p_conn) {
  1856. /* This is an or_circuit_t's p_cell_ewma. */
  1857. or_circuit_t *orcirc = SUBTYPE_P(ewma, or_circuit_t, p_cell_ewma);
  1858. return TO_CIRCUIT(orcirc);
  1859. } else {
  1860. /* This is some circuit's n_cell_ewma. */
  1861. return SUBTYPE_P(ewma, circuit_t, n_cell_ewma);
  1862. }
  1863. }
  1864. /* ==== Functions for scaling cell_ewma_t ====
  1865. When choosing which cells to relay first, we favor circuits that have been
  1866. quiet recently. This gives better latency on connections that aren't
  1867. pushing lots of data, and makes the network feel more interactive.
  1868. Conceptually, we take an exponentially weighted mean average of the number
  1869. of cells a circuit has sent, and allow active circuits (those with cells to
  1870. relay) to send cells in reverse order of their exponentially-weighted mean
  1871. average (EWMA) cell count. [That is, a cell sent N seconds ago 'counts'
  1872. F^N times as much as a cell sent now, for 0<F<1.0, and we favor the
  1873. circuit that has sent the fewest cells]
  1874. If 'double' had infinite precision, we could do this simply by counting a
  1875. cell sent at startup as having weight 1.0, and a cell sent N seconds later
  1876. as having weight F^-N. This way, we would never need to re-scale
  1877. any already-sent cells.
  1878. To prevent double from overflowing, we could count a cell sent now as
  1879. having weight 1.0 and a cell sent N seconds ago as having weight F^N.
  1880. This, however, would mean we'd need to re-scale *ALL* old circuits every
  1881. time we wanted to send a cell.
  1882. So as a compromise, we divide time into 'ticks' (currently, 10-second
  1883. increments) and say that a cell sent at the start of a current tick is
  1884. worth 1.0, a cell sent N seconds before the start of the current tick is
  1885. worth F^N, and a cell sent N seconds after the start of the current tick is
  1886. worth F^-N. This way we don't overflow, and we don't need to constantly
  1887. rescale.
  1888. */
  1889. /** How long does a tick last (seconds)? */
  1890. #define EWMA_TICK_LEN 10
  1891. /** The default per-tick scale factor, if it hasn't been overridden by a
  1892. * consensus or a configuration setting. zero means "disabled". */
  1893. #define EWMA_DEFAULT_HALFLIFE 0.0
  1894. /** Given a timeval <b>now</b>, compute the cell_ewma tick in which it occurs
  1895. * and the fraction of the tick that has elapsed between the start of the tick
  1896. * and <b>now</b>. Return the former and store the latter in
  1897. * *<b>remainder_out</b>.
  1898. *
  1899. * These tick values are not meant to be shared between Tor instances, or used
  1900. * for other purposes. */
  1901. static unsigned
  1902. cell_ewma_tick_from_timeval(const struct timeval *now,
  1903. double *remainder_out)
  1904. {
  1905. unsigned res = (unsigned) (now->tv_sec / EWMA_TICK_LEN);
  1906. /* rem */
  1907. double rem = (now->tv_sec % EWMA_TICK_LEN) +
  1908. ((double)(now->tv_usec)) / 1.0e6;
  1909. *remainder_out = rem / EWMA_TICK_LEN;
  1910. return res;
  1911. }
  1912. /** Compute and return the current cell_ewma tick. */
  1913. unsigned
  1914. cell_ewma_get_tick(void)
  1915. {
  1916. return ((unsigned)approx_time() / EWMA_TICK_LEN);
  1917. }
  1918. /** The per-tick scale factor to be used when computing cell-count EWMA
  1919. * values. (A cell sent N ticks before the start of the current tick
  1920. * has value ewma_scale_factor ** N.)
  1921. */
  1922. static double ewma_scale_factor = 0.1;
  1923. /* DOCDOC ewma_enabled */
  1924. static int ewma_enabled = 0;
  1925. #define EPSILON 0.00001
  1926. #define LOG_ONEHALF -0.69314718055994529
  1927. /** Adjust the global cell scale factor based on <b>options</b> */
  1928. void
  1929. cell_ewma_set_scale_factor(const or_options_t *options,
  1930. const networkstatus_t *consensus)
  1931. {
  1932. int32_t halflife_ms;
  1933. double halflife;
  1934. const char *source;
  1935. if (options && options->CircuitPriorityHalflife >= -EPSILON) {
  1936. halflife = options->CircuitPriorityHalflife;
  1937. source = "CircuitPriorityHalflife in configuration";
  1938. } else if (consensus && (halflife_ms = networkstatus_get_param(
  1939. consensus, "CircuitPriorityHalflifeMsec",
  1940. -1, -1, INT32_MAX)) >= 0) {
  1941. halflife = ((double)halflife_ms)/1000.0;
  1942. source = "CircuitPriorityHalflifeMsec in consensus";
  1943. } else {
  1944. halflife = EWMA_DEFAULT_HALFLIFE;
  1945. source = "Default value";
  1946. }
  1947. if (halflife <= EPSILON) {
  1948. /* The cell EWMA algorithm is disabled. */
  1949. ewma_scale_factor = 0.1;
  1950. ewma_enabled = 0;
  1951. log_info(LD_OR,
  1952. "Disabled cell_ewma algorithm because of value in %s",
  1953. source);
  1954. } else {
  1955. /* convert halflife into halflife-per-tick. */
  1956. halflife /= EWMA_TICK_LEN;
  1957. /* compute per-tick scale factor. */
  1958. ewma_scale_factor = exp( LOG_ONEHALF / halflife );
  1959. ewma_enabled = 1;
  1960. log_info(LD_OR,
  1961. "Enabled cell_ewma algorithm because of value in %s; "
  1962. "scale factor is %f per %d seconds",
  1963. source, ewma_scale_factor, EWMA_TICK_LEN);
  1964. }
  1965. }
  1966. /** Return the multiplier necessary to convert the value of a cell sent in
  1967. * 'from_tick' to one sent in 'to_tick'. */
  1968. static INLINE double
  1969. get_scale_factor(unsigned from_tick, unsigned to_tick)
  1970. {
  1971. /* This math can wrap around, but that's okay: unsigned overflow is
  1972. well-defined */
  1973. int diff = (int)(to_tick - from_tick);
  1974. return pow(ewma_scale_factor, diff);
  1975. }
  1976. /** Adjust the cell count of <b>ewma</b> so that it is scaled with respect to
  1977. * <b>cur_tick</b> */
  1978. static void
  1979. scale_single_cell_ewma(cell_ewma_t *ewma, unsigned cur_tick)
  1980. {
  1981. double factor = get_scale_factor(ewma->last_adjusted_tick, cur_tick);
  1982. ewma->cell_count *= factor;
  1983. ewma->last_adjusted_tick = cur_tick;
  1984. }
  1985. /** Adjust the cell count of every active circuit on <b>conn</b> so
  1986. * that they are scaled with respect to <b>cur_tick</b> */
  1987. static void
  1988. scale_active_circuits(or_connection_t *conn, unsigned cur_tick)
  1989. {
  1990. double factor = get_scale_factor(
  1991. conn->active_circuit_pqueue_last_recalibrated,
  1992. cur_tick);
  1993. /** Ordinarily it isn't okay to change the value of an element in a heap,
  1994. * but it's okay here, since we are preserving the order. */
  1995. SMARTLIST_FOREACH(conn->active_circuit_pqueue, cell_ewma_t *, e, {
  1996. tor_assert(e->last_adjusted_tick ==
  1997. conn->active_circuit_pqueue_last_recalibrated);
  1998. e->cell_count *= factor;
  1999. e->last_adjusted_tick = cur_tick;
  2000. });
  2001. conn->active_circuit_pqueue_last_recalibrated = cur_tick;
  2002. }
  2003. /** Rescale <b>ewma</b> to the same scale as <b>conn</b>, and add it to
  2004. * <b>conn</b>'s priority queue of active circuits */
  2005. static void
  2006. add_cell_ewma_to_conn(or_connection_t *conn, cell_ewma_t *ewma)
  2007. {
  2008. tor_assert(ewma->heap_index == -1);
  2009. scale_single_cell_ewma(ewma,
  2010. conn->active_circuit_pqueue_last_recalibrated);
  2011. smartlist_pqueue_add(conn->active_circuit_pqueue,
  2012. compare_cell_ewma_counts,
  2013. STRUCT_OFFSET(cell_ewma_t, heap_index),
  2014. ewma);
  2015. }
  2016. /** Remove <b>ewma</b> from <b>conn</b>'s priority queue of active circuits */
  2017. static void
  2018. remove_cell_ewma_from_conn(or_connection_t *conn, cell_ewma_t *ewma)
  2019. {
  2020. tor_assert(ewma->heap_index != -1);
  2021. smartlist_pqueue_remove(conn->active_circuit_pqueue,
  2022. compare_cell_ewma_counts,
  2023. STRUCT_OFFSET(cell_ewma_t, heap_index),
  2024. ewma);
  2025. }
  2026. /** Remove and return the first cell_ewma_t from conn's priority queue of
  2027. * active circuits. Requires that the priority queue is nonempty. */
  2028. static cell_ewma_t *
  2029. pop_first_cell_ewma_from_conn(or_connection_t *conn)
  2030. {
  2031. return smartlist_pqueue_pop(conn->active_circuit_pqueue,
  2032. compare_cell_ewma_counts,
  2033. STRUCT_OFFSET(cell_ewma_t, heap_index));
  2034. }
  2035. /** Add <b>circ</b> to the list of circuits with pending cells on
  2036. * <b>conn</b>. No effect if <b>circ</b> is already linked. */
  2037. void
  2038. make_circuit_active_on_conn(circuit_t *circ, or_connection_t *conn)
  2039. {
  2040. circuit_t **nextp = next_circ_on_conn_p(circ, conn);
  2041. circuit_t **prevp = prev_circ_on_conn_p(circ, conn);
  2042. if (*nextp && *prevp) {
  2043. /* Already active. */
  2044. return;
  2045. }
  2046. assert_active_circuits_ok_paranoid(conn);
  2047. if (! conn->active_circuits) {
  2048. conn->active_circuits = circ;
  2049. *prevp = *nextp = circ;
  2050. } else {
  2051. circuit_t *head = conn->active_circuits;
  2052. circuit_t *old_tail = *prev_circ_on_conn_p(head, conn);
  2053. *next_circ_on_conn_p(old_tail, conn) = circ;
  2054. *nextp = head;
  2055. *prev_circ_on_conn_p(head, conn) = circ;
  2056. *prevp = old_tail;
  2057. }
  2058. if (circ->n_conn == conn) {
  2059. add_cell_ewma_to_conn(conn, &circ->n_cell_ewma);
  2060. } else {
  2061. or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
  2062. tor_assert(conn == orcirc->p_conn);
  2063. add_cell_ewma_to_conn(conn, &orcirc->p_cell_ewma);
  2064. }
  2065. assert_active_circuits_ok_paranoid(conn);
  2066. }
  2067. /** Remove <b>circ</b> from the list of circuits with pending cells on
  2068. * <b>conn</b>. No effect if <b>circ</b> is already unlinked. */
  2069. void
  2070. make_circuit_inactive_on_conn(circuit_t *circ, or_connection_t *conn)
  2071. {
  2072. circuit_t **nextp = next_circ_on_conn_p(circ, conn);
  2073. circuit_t **prevp = prev_circ_on_conn_p(circ, conn);
  2074. circuit_t *next = *nextp, *prev = *prevp;
  2075. if (!next && !prev) {
  2076. /* Already inactive. */
  2077. return;
  2078. }
  2079. assert_active_circuits_ok_paranoid(conn);
  2080. tor_assert(next && prev);
  2081. tor_assert(*prev_circ_on_conn_p(next, conn) == circ);
  2082. tor_assert(*next_circ_on_conn_p(prev, conn) == circ);
  2083. if (next == circ) {
  2084. conn->active_circuits = NULL;
  2085. } else {
  2086. *prev_circ_on_conn_p(next, conn) = prev;
  2087. *next_circ_on_conn_p(prev, conn) = next;
  2088. if (conn->active_circuits == circ)
  2089. conn->active_circuits = next;
  2090. }
  2091. *prevp = *nextp = NULL;
  2092. if (circ->n_conn == conn) {
  2093. remove_cell_ewma_from_conn(conn, &circ->n_cell_ewma);
  2094. } else {
  2095. or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
  2096. tor_assert(conn == orcirc->p_conn);
  2097. remove_cell_ewma_from_conn(conn, &orcirc->p_cell_ewma);
  2098. }
  2099. assert_active_circuits_ok_paranoid(conn);
  2100. }
  2101. /** Remove all circuits from the list of circuits with pending cells on
  2102. * <b>conn</b>. */
  2103. void
  2104. connection_or_unlink_all_active_circs(or_connection_t *orconn)
  2105. {
  2106. circuit_t *head = orconn->active_circuits;
  2107. circuit_t *cur = head;
  2108. if (! head)
  2109. return;
  2110. do {
  2111. circuit_t *next = *next_circ_on_conn_p(cur, orconn);
  2112. *prev_circ_on_conn_p(cur, orconn) = NULL;
  2113. *next_circ_on_conn_p(cur, orconn) = NULL;
  2114. cur = next;
  2115. } while (cur != head);
  2116. orconn->active_circuits = NULL;
  2117. SMARTLIST_FOREACH(orconn->active_circuit_pqueue, cell_ewma_t *, e,
  2118. e->heap_index = -1);
  2119. smartlist_clear(orconn->active_circuit_pqueue);
  2120. }
  2121. /** Block (if <b>block</b> is true) or unblock (if <b>block</b> is false)
  2122. * every edge connection that is using <b>circ</b> to write to <b>orconn</b>,
  2123. * and start or stop reading as appropriate.
  2124. *
  2125. * If <b>stream_id</b> is nonzero, block only the edge connection whose
  2126. * stream_id matches it.
  2127. *
  2128. * Returns the number of streams whose status we changed.
  2129. */
  2130. static int
  2131. set_streams_blocked_on_circ(circuit_t *circ, or_connection_t *orconn,
  2132. int block, streamid_t stream_id)
  2133. {
  2134. edge_connection_t *edge = NULL;
  2135. int n = 0;
  2136. if (circ->n_conn == orconn) {
  2137. circ->streams_blocked_on_n_conn = block;
  2138. if (CIRCUIT_IS_ORIGIN(circ))
  2139. edge = TO_ORIGIN_CIRCUIT(circ)->p_streams;
  2140. } else {
  2141. circ->streams_blocked_on_p_conn = block;
  2142. tor_assert(!CIRCUIT_IS_ORIGIN(circ));
  2143. edge = TO_OR_CIRCUIT(circ)->n_streams;
  2144. }
  2145. for (; edge; edge = edge->next_stream) {
  2146. connection_t *conn = TO_CONN(edge);
  2147. if (stream_id && edge->stream_id != stream_id)
  2148. continue;
  2149. if (edge->edge_blocked_on_circ != block) {
  2150. ++n;
  2151. edge->edge_blocked_on_circ = block;
  2152. }
  2153. if (!conn->read_event && !HAS_BUFFEREVENT(conn)) {
  2154. /* This connection is a placeholder for something; probably a DNS
  2155. * request. It can't actually stop or start reading.*/
  2156. continue;
  2157. }
  2158. if (block) {
  2159. if (connection_is_reading(conn))
  2160. connection_stop_reading(conn);
  2161. } else {
  2162. /* Is this right? */
  2163. if (!connection_is_reading(conn))
  2164. connection_start_reading(conn);
  2165. }
  2166. }
  2167. return n;
  2168. }
  2169. /** Pull as many cells as possible (but no more than <b>max</b>) from the
  2170. * queue of the first active circuit on <b>conn</b>, and write them to
  2171. * <b>conn</b>-&gt;outbuf. Return the number of cells written. Advance
  2172. * the active circuit pointer to the next active circuit in the ring. */
  2173. int
  2174. connection_or_flush_from_first_active_circuit(or_connection_t *conn, int max,
  2175. time_t now)
  2176. {
  2177. int n_flushed;
  2178. cell_queue_t *queue;
  2179. circuit_t *circ;
  2180. int streams_blocked;
  2181. /* The current (hi-res) time */
  2182. struct timeval now_hires;
  2183. /* The EWMA cell counter for the circuit we're flushing. */
  2184. cell_ewma_t *cell_ewma = NULL;
  2185. double ewma_increment = -1;
  2186. circ = conn->active_circuits;
  2187. if (!circ) return 0;
  2188. assert_active_circuits_ok_paranoid(conn);
  2189. /* See if we're doing the ewma circuit selection algorithm. */
  2190. if (ewma_enabled) {
  2191. unsigned tick;
  2192. double fractional_tick;
  2193. tor_gettimeofday_cached(&now_hires);
  2194. tick = cell_ewma_tick_from_timeval(&now_hires, &fractional_tick);
  2195. if (tick != conn->active_circuit_pqueue_last_recalibrated) {
  2196. scale_active_circuits(conn, tick);
  2197. }
  2198. ewma_increment = pow(ewma_scale_factor, -fractional_tick);
  2199. cell_ewma = smartlist_get(conn->active_circuit_pqueue, 0);
  2200. circ = cell_ewma_to_circuit(cell_ewma);
  2201. }
  2202. if (circ->n_conn == conn) {
  2203. queue = &circ->n_conn_cells;
  2204. streams_blocked = circ->streams_blocked_on_n_conn;
  2205. } else {
  2206. queue = &TO_OR_CIRCUIT(circ)->p_conn_cells;
  2207. streams_blocked = circ->streams_blocked_on_p_conn;
  2208. }
  2209. tor_assert(*next_circ_on_conn_p(circ,conn));
  2210. for (n_flushed = 0; n_flushed < max && queue->head; ) {
  2211. packed_cell_t *cell = cell_queue_pop(queue);
  2212. tor_assert(*next_circ_on_conn_p(circ,conn));
  2213. /* Calculate the exact time that this cell has spent in the queue. */
  2214. if (get_options()->CellStatistics && !CIRCUIT_IS_ORIGIN(circ)) {
  2215. struct timeval tvnow;
  2216. uint32_t flushed;
  2217. uint32_t cell_waiting_time;
  2218. insertion_time_queue_t *it_queue = queue->insertion_times;
  2219. tor_gettimeofday_cached(&tvnow);
  2220. flushed = (uint32_t)((tvnow.tv_sec % SECONDS_IN_A_DAY) * 100L +
  2221. (uint32_t)tvnow.tv_usec / (uint32_t)10000L);
  2222. if (!it_queue || !it_queue->first) {
  2223. log_info(LD_GENERAL, "Cannot determine insertion time of cell. "
  2224. "Looks like the CellStatistics option was "
  2225. "recently enabled.");
  2226. } else {
  2227. or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
  2228. insertion_time_elem_t *elem = it_queue->first;
  2229. cell_waiting_time =
  2230. (uint32_t)((flushed * 10L + SECONDS_IN_A_DAY * 1000L -
  2231. elem->insertion_time * 10L) %
  2232. (SECONDS_IN_A_DAY * 1000L));
  2233. #undef SECONDS_IN_A_DAY
  2234. elem->counter--;
  2235. if (elem->counter < 1) {
  2236. it_queue->first = elem->next;
  2237. if (elem == it_queue->last)
  2238. it_queue->last = NULL;
  2239. mp_pool_release(elem);
  2240. }
  2241. orcirc->total_cell_waiting_time += cell_waiting_time;
  2242. orcirc->processed_cells++;
  2243. }
  2244. }
  2245. /* If we just flushed our queue and this circuit is used for a
  2246. * tunneled directory request, possibly advance its state. */
  2247. if (queue->n == 0 && TO_CONN(conn)->dirreq_id)
  2248. geoip_change_dirreq_state(TO_CONN(conn)->dirreq_id,
  2249. DIRREQ_TUNNELED,
  2250. DIRREQ_CIRC_QUEUE_FLUSHED);
  2251. connection_write_to_buf(cell->body, CELL_NETWORK_SIZE, TO_CONN(conn));
  2252. packed_cell_free_unchecked(cell);
  2253. ++n_flushed;
  2254. if (cell_ewma) {
  2255. cell_ewma_t *tmp;
  2256. cell_ewma->cell_count += ewma_increment;
  2257. /* We pop and re-add the cell_ewma_t here, not above, since we need to
  2258. * re-add it immediately to keep the priority queue consistent with
  2259. * the linked-list implementation */
  2260. tmp = pop_first_cell_ewma_from_conn(conn);
  2261. tor_assert(tmp == cell_ewma);
  2262. add_cell_ewma_to_conn(conn, cell_ewma);
  2263. }
  2264. if (circ != conn->active_circuits) {
  2265. /* If this happens, the current circuit just got made inactive by
  2266. * a call in connection_write_to_buf(). That's nothing to worry about:
  2267. * circuit_make_inactive_on_conn() already advanced conn->active_circuits
  2268. * for us.
  2269. */
  2270. assert_active_circuits_ok_paranoid(conn);
  2271. goto done;
  2272. }
  2273. }
  2274. tor_assert(*next_circ_on_conn_p(circ,conn));
  2275. assert_active_circuits_ok_paranoid(conn);
  2276. conn->active_circuits = *next_circ_on_conn_p(circ, conn);
  2277. /* Is the cell queue low enough to unblock all the streams that are waiting
  2278. * to write to this circuit? */
  2279. if (streams_blocked && queue->n <= CELL_QUEUE_LOWWATER_SIZE)
  2280. set_streams_blocked_on_circ(circ, conn, 0, 0); /* unblock streams */
  2281. /* Did we just run out of cells on this circuit's queue? */
  2282. if (queue->n == 0) {
  2283. log_debug(LD_GENERAL, "Made a circuit inactive.");
  2284. make_circuit_inactive_on_conn(circ, conn);
  2285. }
  2286. done:
  2287. if (n_flushed)
  2288. conn->timestamp_last_added_nonpadding = now;
  2289. return n_flushed;
  2290. }
  2291. /** Add <b>cell</b> to the queue of <b>circ</b> writing to <b>orconn</b>
  2292. * transmitting in <b>direction</b>. */
  2293. void
  2294. append_cell_to_circuit_queue(circuit_t *circ, or_connection_t *orconn,
  2295. cell_t *cell, cell_direction_t direction,
  2296. streamid_t fromstream)
  2297. {
  2298. cell_queue_t *queue;
  2299. int streams_blocked;
  2300. if (circ->marked_for_close)
  2301. return;
  2302. if (direction == CELL_DIRECTION_OUT) {
  2303. queue = &circ->n_conn_cells;
  2304. streams_blocked = circ->streams_blocked_on_n_conn;
  2305. } else {
  2306. or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
  2307. queue = &orcirc->p_conn_cells;
  2308. streams_blocked = circ->streams_blocked_on_p_conn;
  2309. }
  2310. cell_queue_append_packed_copy(queue, cell);
  2311. /* If we have too many cells on the circuit, we should stop reading from
  2312. * the edge streams for a while. */
  2313. if (!streams_blocked && queue->n >= CELL_QUEUE_HIGHWATER_SIZE)
  2314. set_streams_blocked_on_circ(circ, orconn, 1, 0); /* block streams */
  2315. if (streams_blocked && fromstream) {
  2316. /* This edge connection is apparently not blocked; block it. */
  2317. set_streams_blocked_on_circ(circ, orconn, 1, fromstream);
  2318. }
  2319. if (queue->n == 1) {
  2320. /* This was the first cell added to the queue. We need to make this
  2321. * circuit active. */
  2322. log_debug(LD_GENERAL, "Made a circuit active.");
  2323. make_circuit_active_on_conn(circ, orconn);
  2324. }
  2325. if (! connection_get_outbuf_len(TO_CONN(orconn))) {
  2326. /* There is no data at all waiting to be sent on the outbuf. Add a
  2327. * cell, so that we can notice when it gets flushed, flushed_some can
  2328. * get called, and we can start putting more data onto the buffer then.
  2329. */
  2330. log_debug(LD_GENERAL, "Primed a buffer.");
  2331. connection_or_flush_from_first_active_circuit(orconn, 1, approx_time());
  2332. }
  2333. }
  2334. /** Append an encoded value of <b>addr</b> to <b>payload_out</b>, which must
  2335. * have at least 18 bytes of free space. The encoding is, as specified in
  2336. * tor-spec.txt:
  2337. * RESOLVED_TYPE_IPV4 or RESOLVED_TYPE_IPV6 [1 byte]
  2338. * LENGTH [1 byte]
  2339. * ADDRESS [length bytes]
  2340. * Return the number of bytes added, or -1 on error */
  2341. int
  2342. append_address_to_payload(uint8_t *payload_out, const tor_addr_t *addr)
  2343. {
  2344. uint32_t a;
  2345. switch (tor_addr_family(addr)) {
  2346. case AF_INET:
  2347. payload_out[0] = RESOLVED_TYPE_IPV4;
  2348. payload_out[1] = 4;
  2349. a = tor_addr_to_ipv4n(addr);
  2350. memcpy(payload_out+2, &a, 4);
  2351. return 6;
  2352. case AF_INET6:
  2353. payload_out[0] = RESOLVED_TYPE_IPV6;
  2354. payload_out[1] = 16;
  2355. memcpy(payload_out+2, tor_addr_to_in6_addr8(addr), 16);
  2356. return 18;
  2357. case AF_UNSPEC:
  2358. default:
  2359. return -1;
  2360. }
  2361. }
  2362. /** Given <b>payload_len</b> bytes at <b>payload</b>, starting with an address
  2363. * encoded as by append_address_to_payload(), try to decode the address into
  2364. * *<b>addr_out</b>. Return the next byte in the payload after the address on
  2365. * success, or NULL on failure. */
  2366. const uint8_t *
  2367. decode_address_from_payload(tor_addr_t *addr_out, const uint8_t *payload,
  2368. int payload_len)
  2369. {
  2370. if (payload_len < 2)
  2371. return NULL;
  2372. if (payload_len < 2+payload[1])
  2373. return NULL;
  2374. switch (payload[0]) {
  2375. case RESOLVED_TYPE_IPV4:
  2376. if (payload[1] != 4)
  2377. return NULL;
  2378. tor_addr_from_ipv4n(addr_out, get_uint32(payload+2));
  2379. break;
  2380. case RESOLVED_TYPE_IPV6:
  2381. if (payload[1] != 16)
  2382. return NULL;
  2383. tor_addr_from_ipv6_bytes(addr_out, (char*)(payload+2));
  2384. break;
  2385. default:
  2386. tor_addr_make_unspec(addr_out);
  2387. break;
  2388. }
  2389. return payload + 2 + payload[1];
  2390. }
  2391. /** Remove all the cells queued on <b>circ</b> for <b>orconn</b>. */
  2392. void
  2393. circuit_clear_cell_queue(circuit_t *circ, or_connection_t *orconn)
  2394. {
  2395. cell_queue_t *queue;
  2396. if (circ->n_conn == orconn) {
  2397. queue = &circ->n_conn_cells;
  2398. } else {
  2399. or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
  2400. tor_assert(orcirc->p_conn == orconn);
  2401. queue = &orcirc->p_conn_cells;
  2402. }
  2403. if (queue->n)
  2404. make_circuit_inactive_on_conn(circ,orconn);
  2405. cell_queue_clear(queue);
  2406. }
  2407. /** Fail with an assert if the active circuits ring on <b>orconn</b> is
  2408. * corrupt. */
  2409. void
  2410. assert_active_circuits_ok(or_connection_t *orconn)
  2411. {
  2412. circuit_t *head = orconn->active_circuits;
  2413. circuit_t *cur = head;
  2414. int n = 0;
  2415. if (! head)
  2416. return;
  2417. do {
  2418. circuit_t *next = *next_circ_on_conn_p(cur, orconn);
  2419. circuit_t *prev = *prev_circ_on_conn_p(cur, orconn);
  2420. cell_ewma_t *ewma;
  2421. tor_assert(next);
  2422. tor_assert(prev);
  2423. tor_assert(*next_circ_on_conn_p(prev, orconn) == cur);
  2424. tor_assert(*prev_circ_on_conn_p(next, orconn) == cur);
  2425. if (orconn == cur->n_conn) {
  2426. ewma = &cur->n_cell_ewma;
  2427. tor_assert(!ewma->is_for_p_conn);
  2428. } else {
  2429. ewma = &TO_OR_CIRCUIT(cur)->p_cell_ewma;
  2430. tor_assert(ewma->is_for_p_conn);
  2431. }
  2432. tor_assert(ewma->heap_index != -1);
  2433. tor_assert(ewma == smartlist_get(orconn->active_circuit_pqueue,
  2434. ewma->heap_index));
  2435. n++;
  2436. cur = next;
  2437. } while (cur != head);
  2438. tor_assert(n == smartlist_len(orconn->active_circuit_pqueue));
  2439. }
  2440. /** Return 1 if we shouldn't restart reading on this circuit, even if
  2441. * we get a SENDME. Else return 0.
  2442. */
  2443. static int
  2444. circuit_queue_streams_are_blocked(circuit_t *circ)
  2445. {
  2446. if (CIRCUIT_IS_ORIGIN(circ)) {
  2447. return circ->streams_blocked_on_n_conn;
  2448. } else {
  2449. return circ->streams_blocked_on_p_conn;
  2450. }
  2451. }