relay.c 107 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067
  1. /* Copyright (c) 2001 Matej Pfajfar.
  2. * Copyright (c) 2001-2004, Roger Dingledine.
  3. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  4. * Copyright (c) 2007-2016, The Tor Project, Inc. */
  5. /* See LICENSE for licensing information */
  6. /**
  7. * \file relay.c
  8. * \brief Handle relay cell encryption/decryption, plus packaging and
  9. * receiving from circuits, plus queuing on circuits.
  10. **/
  11. #define RELAY_PRIVATE
  12. #include "or.h"
  13. #include "addressmap.h"
  14. #include "buffers.h"
  15. #include "channel.h"
  16. #include "circpathbias.h"
  17. #include "circuitbuild.h"
  18. #include "circuitlist.h"
  19. #include "circuituse.h"
  20. #include "config.h"
  21. #include "connection.h"
  22. #include "connection_edge.h"
  23. #include "connection_or.h"
  24. #include "control.h"
  25. #include "geoip.h"
  26. #include "main.h"
  27. #include "networkstatus.h"
  28. #include "nodelist.h"
  29. #include "onion.h"
  30. #include "policies.h"
  31. #include "reasons.h"
  32. #include "relay.h"
  33. #include "rendcache.h"
  34. #include "rendcommon.h"
  35. #include "router.h"
  36. #include "routerlist.h"
  37. #include "routerparse.h"
  38. #include "scheduler.h"
  39. static edge_connection_t *relay_lookup_conn(circuit_t *circ, cell_t *cell,
  40. cell_direction_t cell_direction,
  41. crypt_path_t *layer_hint);
  42. static int connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
  43. edge_connection_t *conn,
  44. crypt_path_t *layer_hint);
  45. static void circuit_consider_sending_sendme(circuit_t *circ,
  46. crypt_path_t *layer_hint);
  47. static void circuit_resume_edge_reading(circuit_t *circ,
  48. crypt_path_t *layer_hint);
  49. static int circuit_resume_edge_reading_helper(edge_connection_t *conn,
  50. circuit_t *circ,
  51. crypt_path_t *layer_hint);
  52. static int circuit_consider_stop_edge_reading(circuit_t *circ,
  53. crypt_path_t *layer_hint);
  54. static int circuit_queue_streams_are_blocked(circuit_t *circ);
  55. static void adjust_exit_policy_from_exitpolicy_failure(origin_circuit_t *circ,
  56. entry_connection_t *conn,
  57. node_t *node,
  58. const tor_addr_t *addr);
  59. #if 0
  60. static int get_max_middle_cells(void);
  61. #endif
  62. /** Stop reading on edge connections when we have this many cells
  63. * waiting on the appropriate queue. */
  64. #define CELL_QUEUE_HIGHWATER_SIZE 256
  65. /** Start reading from edge connections again when we get down to this many
  66. * cells. */
  67. #define CELL_QUEUE_LOWWATER_SIZE 64
  68. /** Stats: how many relay cells have originated at this hop, or have
  69. * been relayed onward (not recognized at this hop)?
  70. */
  71. uint64_t stats_n_relay_cells_relayed = 0;
  72. /** Stats: how many relay cells have been delivered to streams at this
  73. * hop?
  74. */
  75. uint64_t stats_n_relay_cells_delivered = 0;
  76. /** Used to tell which stream to read from first on a circuit. */
  77. static tor_weak_rng_t stream_choice_rng = TOR_WEAK_RNG_INIT;
  78. /** Update digest from the payload of cell. Assign integrity part to
  79. * cell.
  80. */
  81. static void
  82. relay_set_digest(crypto_digest_t *digest, cell_t *cell)
  83. {
  84. char integrity[4];
  85. relay_header_t rh;
  86. crypto_digest_add_bytes(digest, (char*)cell->payload, CELL_PAYLOAD_SIZE);
  87. crypto_digest_get_digest(digest, integrity, 4);
  88. // log_fn(LOG_DEBUG,"Putting digest of %u %u %u %u into relay cell.",
  89. // integrity[0], integrity[1], integrity[2], integrity[3]);
  90. relay_header_unpack(&rh, cell->payload);
  91. memcpy(rh.integrity, integrity, 4);
  92. relay_header_pack(cell->payload, &rh);
  93. }
  94. /** Does the digest for this circuit indicate that this cell is for us?
  95. *
  96. * Update digest from the payload of cell (with the integrity part set
  97. * to 0). If the integrity part is valid, return 1, else restore digest
  98. * and cell to their original state and return 0.
  99. */
  100. static int
  101. relay_digest_matches(crypto_digest_t *digest, cell_t *cell)
  102. {
  103. uint32_t received_integrity, calculated_integrity;
  104. relay_header_t rh;
  105. crypto_digest_t *backup_digest=NULL;
  106. backup_digest = crypto_digest_dup(digest);
  107. relay_header_unpack(&rh, cell->payload);
  108. memcpy(&received_integrity, rh.integrity, 4);
  109. memset(rh.integrity, 0, 4);
  110. relay_header_pack(cell->payload, &rh);
  111. // log_fn(LOG_DEBUG,"Reading digest of %u %u %u %u from relay cell.",
  112. // received_integrity[0], received_integrity[1],
  113. // received_integrity[2], received_integrity[3]);
  114. crypto_digest_add_bytes(digest, (char*) cell->payload, CELL_PAYLOAD_SIZE);
  115. crypto_digest_get_digest(digest, (char*) &calculated_integrity, 4);
  116. if (calculated_integrity != received_integrity) {
  117. // log_fn(LOG_INFO,"Recognized=0 but bad digest. Not recognizing.");
  118. // (%d vs %d).", received_integrity, calculated_integrity);
  119. /* restore digest to its old form */
  120. crypto_digest_assign(digest, backup_digest);
  121. /* restore the relay header */
  122. memcpy(rh.integrity, &received_integrity, 4);
  123. relay_header_pack(cell->payload, &rh);
  124. crypto_digest_free(backup_digest);
  125. return 0;
  126. }
  127. crypto_digest_free(backup_digest);
  128. return 1;
  129. }
  130. /** Apply <b>cipher</b> to CELL_PAYLOAD_SIZE bytes of <b>in</b>
  131. * (in place).
  132. *
  133. * If <b>encrypt_mode</b> is 1 then encrypt, else decrypt.
  134. *
  135. * Returns 0.
  136. */
  137. static int
  138. relay_crypt_one_payload(crypto_cipher_t *cipher, uint8_t *in,
  139. int encrypt_mode)
  140. {
  141. (void)encrypt_mode;
  142. crypto_cipher_crypt_inplace(cipher, (char*) in, CELL_PAYLOAD_SIZE);
  143. return 0;
  144. }
  145. /** Receive a relay cell:
  146. * - Crypt it (encrypt if headed toward the origin or if we <b>are</b> the
  147. * origin; decrypt if we're headed toward the exit).
  148. * - Check if recognized (if exitward).
  149. * - If recognized and the digest checks out, then find if there's a stream
  150. * that the cell is intended for, and deliver it to the right
  151. * connection_edge.
  152. * - If not recognized, then we need to relay it: append it to the appropriate
  153. * cell_queue on <b>circ</b>.
  154. *
  155. * Return -<b>reason</b> on failure.
  156. */
  157. int
  158. circuit_receive_relay_cell(cell_t *cell, circuit_t *circ,
  159. cell_direction_t cell_direction)
  160. {
  161. channel_t *chan = NULL;
  162. crypt_path_t *layer_hint=NULL;
  163. char recognized=0;
  164. int reason;
  165. tor_assert(cell);
  166. tor_assert(circ);
  167. tor_assert(cell_direction == CELL_DIRECTION_OUT ||
  168. cell_direction == CELL_DIRECTION_IN);
  169. if (circ->marked_for_close)
  170. return 0;
  171. if (relay_crypt(circ, cell, cell_direction, &layer_hint, &recognized) < 0) {
  172. log_warn(LD_BUG,"relay crypt failed. Dropping connection.");
  173. return -END_CIRC_REASON_INTERNAL;
  174. }
  175. if (recognized) {
  176. edge_connection_t *conn = NULL;
  177. if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) {
  178. pathbias_check_probe_response(circ, cell);
  179. /* We need to drop this cell no matter what to avoid code that expects
  180. * a certain purpose (such as the hidserv code). */
  181. return 0;
  182. }
  183. conn = relay_lookup_conn(circ, cell, cell_direction, layer_hint);
  184. if (cell_direction == CELL_DIRECTION_OUT) {
  185. ++stats_n_relay_cells_delivered;
  186. log_debug(LD_OR,"Sending away from origin.");
  187. if ((reason=connection_edge_process_relay_cell(cell, circ, conn, NULL))
  188. < 0) {
  189. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  190. "connection_edge_process_relay_cell (away from origin) "
  191. "failed.");
  192. return reason;
  193. }
  194. }
  195. if (cell_direction == CELL_DIRECTION_IN) {
  196. ++stats_n_relay_cells_delivered;
  197. log_debug(LD_OR,"Sending to origin.");
  198. if ((reason = connection_edge_process_relay_cell(cell, circ, conn,
  199. layer_hint)) < 0) {
  200. log_warn(LD_OR,
  201. "connection_edge_process_relay_cell (at origin) failed.");
  202. return reason;
  203. }
  204. }
  205. return 0;
  206. }
  207. /* not recognized. pass it on. */
  208. if (cell_direction == CELL_DIRECTION_OUT) {
  209. cell->circ_id = circ->n_circ_id; /* switch it */
  210. chan = circ->n_chan;
  211. } else if (! CIRCUIT_IS_ORIGIN(circ)) {
  212. cell->circ_id = TO_OR_CIRCUIT(circ)->p_circ_id; /* switch it */
  213. chan = TO_OR_CIRCUIT(circ)->p_chan;
  214. } else {
  215. log_fn(LOG_PROTOCOL_WARN, LD_OR,
  216. "Dropping unrecognized inbound cell on origin circuit.");
  217. /* If we see unrecognized cells on path bias testing circs,
  218. * it's bad mojo. Those circuits need to die.
  219. * XXX: Shouldn't they always die? */
  220. if (circ->purpose == CIRCUIT_PURPOSE_PATH_BIAS_TESTING) {
  221. TO_ORIGIN_CIRCUIT(circ)->path_state = PATH_STATE_USE_FAILED;
  222. return -END_CIRC_REASON_TORPROTOCOL;
  223. } else {
  224. return 0;
  225. }
  226. }
  227. if (!chan) {
  228. // XXXX Can this splice stuff be done more cleanly?
  229. if (! CIRCUIT_IS_ORIGIN(circ) &&
  230. TO_OR_CIRCUIT(circ)->rend_splice &&
  231. cell_direction == CELL_DIRECTION_OUT) {
  232. or_circuit_t *splice_ = TO_OR_CIRCUIT(circ)->rend_splice;
  233. tor_assert(circ->purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED);
  234. tor_assert(splice_->base_.purpose == CIRCUIT_PURPOSE_REND_ESTABLISHED);
  235. cell->circ_id = splice_->p_circ_id;
  236. cell->command = CELL_RELAY; /* can't be relay_early anyway */
  237. if ((reason = circuit_receive_relay_cell(cell, TO_CIRCUIT(splice_),
  238. CELL_DIRECTION_IN)) < 0) {
  239. log_warn(LD_REND, "Error relaying cell across rendezvous; closing "
  240. "circuits");
  241. /* XXXX Do this here, or just return -1? */
  242. circuit_mark_for_close(circ, -reason);
  243. return reason;
  244. }
  245. return 0;
  246. }
  247. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  248. "Didn't recognize cell, but circ stops here! Closing circ.");
  249. return -END_CIRC_REASON_TORPROTOCOL;
  250. }
  251. log_debug(LD_OR,"Passing on unrecognized cell.");
  252. ++stats_n_relay_cells_relayed; /* XXXX no longer quite accurate {cells}
  253. * we might kill the circ before we relay
  254. * the cells. */
  255. append_cell_to_circuit_queue(circ, chan, cell, cell_direction, 0);
  256. return 0;
  257. }
  258. /** Do the appropriate en/decryptions for <b>cell</b> arriving on
  259. * <b>circ</b> in direction <b>cell_direction</b>.
  260. *
  261. * If cell_direction == CELL_DIRECTION_IN:
  262. * - If we're at the origin (we're the OP), for hops 1..N,
  263. * decrypt cell. If recognized, stop.
  264. * - Else (we're not the OP), encrypt one hop. Cell is not recognized.
  265. *
  266. * If cell_direction == CELL_DIRECTION_OUT:
  267. * - decrypt one hop. Check if recognized.
  268. *
  269. * If cell is recognized, set *recognized to 1, and set
  270. * *layer_hint to the hop that recognized it.
  271. *
  272. * Return -1 to indicate that we should mark the circuit for close,
  273. * else return 0.
  274. */
  275. int
  276. relay_crypt(circuit_t *circ, cell_t *cell, cell_direction_t cell_direction,
  277. crypt_path_t **layer_hint, char *recognized)
  278. {
  279. relay_header_t rh;
  280. tor_assert(circ);
  281. tor_assert(cell);
  282. tor_assert(recognized);
  283. tor_assert(cell_direction == CELL_DIRECTION_IN ||
  284. cell_direction == CELL_DIRECTION_OUT);
  285. if (cell_direction == CELL_DIRECTION_IN) {
  286. if (CIRCUIT_IS_ORIGIN(circ)) { /* We're at the beginning of the circuit.
  287. * We'll want to do layered decrypts. */
  288. crypt_path_t *thishop, *cpath = TO_ORIGIN_CIRCUIT(circ)->cpath;
  289. thishop = cpath;
  290. if (thishop->state != CPATH_STATE_OPEN) {
  291. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  292. "Relay cell before first created cell? Closing.");
  293. return -1;
  294. }
  295. do { /* Remember: cpath is in forward order, that is, first hop first. */
  296. tor_assert(thishop);
  297. if (relay_crypt_one_payload(thishop->b_crypto, cell->payload, 0) < 0)
  298. return -1;
  299. relay_header_unpack(&rh, cell->payload);
  300. if (rh.recognized == 0) {
  301. /* it's possibly recognized. have to check digest to be sure. */
  302. if (relay_digest_matches(thishop->b_digest, cell)) {
  303. *recognized = 1;
  304. *layer_hint = thishop;
  305. return 0;
  306. }
  307. }
  308. thishop = thishop->next;
  309. } while (thishop != cpath && thishop->state == CPATH_STATE_OPEN);
  310. log_fn(LOG_PROTOCOL_WARN, LD_OR,
  311. "Incoming cell at client not recognized. Closing.");
  312. return -1;
  313. } else { /* we're in the middle. Just one crypt. */
  314. if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->p_crypto,
  315. cell->payload, 1) < 0)
  316. return -1;
  317. // log_fn(LOG_DEBUG,"Skipping recognized check, because we're not "
  318. // "the client.");
  319. }
  320. } else /* cell_direction == CELL_DIRECTION_OUT */ {
  321. /* we're in the middle. Just one crypt. */
  322. if (relay_crypt_one_payload(TO_OR_CIRCUIT(circ)->n_crypto,
  323. cell->payload, 0) < 0)
  324. return -1;
  325. relay_header_unpack(&rh, cell->payload);
  326. if (rh.recognized == 0) {
  327. /* it's possibly recognized. have to check digest to be sure. */
  328. if (relay_digest_matches(TO_OR_CIRCUIT(circ)->n_digest, cell)) {
  329. *recognized = 1;
  330. return 0;
  331. }
  332. }
  333. }
  334. return 0;
  335. }
  336. /** Package a relay cell from an edge:
  337. * - Encrypt it to the right layer
  338. * - Append it to the appropriate cell_queue on <b>circ</b>.
  339. */
  340. static int
  341. circuit_package_relay_cell(cell_t *cell, circuit_t *circ,
  342. cell_direction_t cell_direction,
  343. crypt_path_t *layer_hint, streamid_t on_stream,
  344. const char *filename, int lineno)
  345. {
  346. channel_t *chan; /* where to send the cell */
  347. if (circ->marked_for_close) {
  348. /* Circuit is marked; send nothing. */
  349. return 0;
  350. }
  351. if (cell_direction == CELL_DIRECTION_OUT) {
  352. crypt_path_t *thishop; /* counter for repeated crypts */
  353. chan = circ->n_chan;
  354. if (!chan) {
  355. log_warn(LD_BUG,"outgoing relay cell sent from %s:%d has n_chan==NULL."
  356. " Dropping.", filename, lineno);
  357. return 0; /* just drop it */
  358. }
  359. if (!CIRCUIT_IS_ORIGIN(circ)) {
  360. log_warn(LD_BUG,"outgoing relay cell sent from %s:%d on non-origin "
  361. "circ. Dropping.", filename, lineno);
  362. return 0; /* just drop it */
  363. }
  364. relay_set_digest(layer_hint->f_digest, cell);
  365. thishop = layer_hint;
  366. /* moving from farthest to nearest hop */
  367. do {
  368. tor_assert(thishop);
  369. /* XXXX RD This is a bug, right? */
  370. log_debug(LD_OR,"crypting a layer of the relay cell.");
  371. if (relay_crypt_one_payload(thishop->f_crypto, cell->payload, 1) < 0) {
  372. return -1;
  373. }
  374. thishop = thishop->prev;
  375. } while (thishop != TO_ORIGIN_CIRCUIT(circ)->cpath->prev);
  376. } else { /* incoming cell */
  377. or_circuit_t *or_circ;
  378. if (CIRCUIT_IS_ORIGIN(circ)) {
  379. /* We should never package an _incoming_ cell from the circuit
  380. * origin; that means we messed up somewhere. */
  381. log_warn(LD_BUG,"incoming relay cell at origin circuit. Dropping.");
  382. assert_circuit_ok(circ);
  383. return 0; /* just drop it */
  384. }
  385. or_circ = TO_OR_CIRCUIT(circ);
  386. chan = or_circ->p_chan;
  387. relay_set_digest(or_circ->p_digest, cell);
  388. if (relay_crypt_one_payload(or_circ->p_crypto, cell->payload, 1) < 0)
  389. return -1;
  390. }
  391. ++stats_n_relay_cells_relayed;
  392. append_cell_to_circuit_queue(circ, chan, cell, cell_direction, on_stream);
  393. return 0;
  394. }
  395. /** If cell's stream_id matches the stream_id of any conn that's
  396. * attached to circ, return that conn, else return NULL.
  397. */
  398. static edge_connection_t *
  399. relay_lookup_conn(circuit_t *circ, cell_t *cell,
  400. cell_direction_t cell_direction, crypt_path_t *layer_hint)
  401. {
  402. edge_connection_t *tmpconn;
  403. relay_header_t rh;
  404. relay_header_unpack(&rh, cell->payload);
  405. if (!rh.stream_id)
  406. return NULL;
  407. /* IN or OUT cells could have come from either direction, now
  408. * that we allow rendezvous *to* an OP.
  409. */
  410. if (CIRCUIT_IS_ORIGIN(circ)) {
  411. for (tmpconn = TO_ORIGIN_CIRCUIT(circ)->p_streams; tmpconn;
  412. tmpconn=tmpconn->next_stream) {
  413. if (rh.stream_id == tmpconn->stream_id &&
  414. !tmpconn->base_.marked_for_close &&
  415. tmpconn->cpath_layer == layer_hint) {
  416. log_debug(LD_APP,"found conn for stream %d.", rh.stream_id);
  417. return tmpconn;
  418. }
  419. }
  420. } else {
  421. for (tmpconn = TO_OR_CIRCUIT(circ)->n_streams; tmpconn;
  422. tmpconn=tmpconn->next_stream) {
  423. if (rh.stream_id == tmpconn->stream_id &&
  424. !tmpconn->base_.marked_for_close) {
  425. log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id);
  426. if (cell_direction == CELL_DIRECTION_OUT ||
  427. connection_edge_is_rendezvous_stream(tmpconn))
  428. return tmpconn;
  429. }
  430. }
  431. for (tmpconn = TO_OR_CIRCUIT(circ)->resolving_streams; tmpconn;
  432. tmpconn=tmpconn->next_stream) {
  433. if (rh.stream_id == tmpconn->stream_id &&
  434. !tmpconn->base_.marked_for_close) {
  435. log_debug(LD_EXIT,"found conn for stream %d.", rh.stream_id);
  436. return tmpconn;
  437. }
  438. }
  439. }
  440. return NULL; /* probably a begin relay cell */
  441. }
  442. /** Pack the relay_header_t host-order structure <b>src</b> into
  443. * network-order in the buffer <b>dest</b>. See tor-spec.txt for details
  444. * about the wire format.
  445. */
  446. void
  447. relay_header_pack(uint8_t *dest, const relay_header_t *src)
  448. {
  449. set_uint8(dest, src->command);
  450. set_uint16(dest+1, htons(src->recognized));
  451. set_uint16(dest+3, htons(src->stream_id));
  452. memcpy(dest+5, src->integrity, 4);
  453. set_uint16(dest+9, htons(src->length));
  454. }
  455. /** Unpack the network-order buffer <b>src</b> into a host-order
  456. * relay_header_t structure <b>dest</b>.
  457. */
  458. void
  459. relay_header_unpack(relay_header_t *dest, const uint8_t *src)
  460. {
  461. dest->command = get_uint8(src);
  462. dest->recognized = ntohs(get_uint16(src+1));
  463. dest->stream_id = ntohs(get_uint16(src+3));
  464. memcpy(dest->integrity, src+5, 4);
  465. dest->length = ntohs(get_uint16(src+9));
  466. }
  467. /** Convert the relay <b>command</b> into a human-readable string. */
  468. static const char *
  469. relay_command_to_string(uint8_t command)
  470. {
  471. static char buf[64];
  472. switch (command) {
  473. case RELAY_COMMAND_BEGIN: return "BEGIN";
  474. case RELAY_COMMAND_DATA: return "DATA";
  475. case RELAY_COMMAND_END: return "END";
  476. case RELAY_COMMAND_CONNECTED: return "CONNECTED";
  477. case RELAY_COMMAND_SENDME: return "SENDME";
  478. case RELAY_COMMAND_EXTEND: return "EXTEND";
  479. case RELAY_COMMAND_EXTENDED: return "EXTENDED";
  480. case RELAY_COMMAND_TRUNCATE: return "TRUNCATE";
  481. case RELAY_COMMAND_TRUNCATED: return "TRUNCATED";
  482. case RELAY_COMMAND_DROP: return "DROP";
  483. case RELAY_COMMAND_RESOLVE: return "RESOLVE";
  484. case RELAY_COMMAND_RESOLVED: return "RESOLVED";
  485. case RELAY_COMMAND_BEGIN_DIR: return "BEGIN_DIR";
  486. case RELAY_COMMAND_ESTABLISH_INTRO: return "ESTABLISH_INTRO";
  487. case RELAY_COMMAND_ESTABLISH_RENDEZVOUS: return "ESTABLISH_RENDEZVOUS";
  488. case RELAY_COMMAND_INTRODUCE1: return "INTRODUCE1";
  489. case RELAY_COMMAND_INTRODUCE2: return "INTRODUCE2";
  490. case RELAY_COMMAND_RENDEZVOUS1: return "RENDEZVOUS1";
  491. case RELAY_COMMAND_RENDEZVOUS2: return "RENDEZVOUS2";
  492. case RELAY_COMMAND_INTRO_ESTABLISHED: return "INTRO_ESTABLISHED";
  493. case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
  494. return "RENDEZVOUS_ESTABLISHED";
  495. case RELAY_COMMAND_INTRODUCE_ACK: return "INTRODUCE_ACK";
  496. case RELAY_COMMAND_EXTEND2: return "EXTEND2";
  497. case RELAY_COMMAND_EXTENDED2: return "EXTENDED2";
  498. default:
  499. tor_snprintf(buf, sizeof(buf), "Unrecognized relay command %u",
  500. (unsigned)command);
  501. return buf;
  502. }
  503. }
  504. /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and send
  505. * it onto the open circuit <b>circ</b>. <b>stream_id</b> is the ID on
  506. * <b>circ</b> for the stream that's sending the relay cell, or 0 if it's a
  507. * control cell. <b>cpath_layer</b> is NULL for OR->OP cells, or the
  508. * destination hop for OP->OR cells.
  509. *
  510. * If you can't send the cell, mark the circuit for close and return -1. Else
  511. * return 0.
  512. */
  513. int
  514. relay_send_command_from_edge_(streamid_t stream_id, circuit_t *circ,
  515. uint8_t relay_command, const char *payload,
  516. size_t payload_len, crypt_path_t *cpath_layer,
  517. const char *filename, int lineno)
  518. {
  519. cell_t cell;
  520. relay_header_t rh;
  521. cell_direction_t cell_direction;
  522. /* XXXX NM Split this function into a separate versions per circuit type? */
  523. tor_assert(circ);
  524. tor_assert(payload_len <= RELAY_PAYLOAD_SIZE);
  525. memset(&cell, 0, sizeof(cell_t));
  526. cell.command = CELL_RELAY;
  527. if (cpath_layer) {
  528. cell.circ_id = circ->n_circ_id;
  529. cell_direction = CELL_DIRECTION_OUT;
  530. } else if (! CIRCUIT_IS_ORIGIN(circ)) {
  531. cell.circ_id = TO_OR_CIRCUIT(circ)->p_circ_id;
  532. cell_direction = CELL_DIRECTION_IN;
  533. } else {
  534. return -1;
  535. }
  536. memset(&rh, 0, sizeof(rh));
  537. rh.command = relay_command;
  538. rh.stream_id = stream_id;
  539. rh.length = payload_len;
  540. relay_header_pack(cell.payload, &rh);
  541. if (payload_len)
  542. memcpy(cell.payload+RELAY_HEADER_SIZE, payload, payload_len);
  543. log_debug(LD_OR,"delivering %d cell %s.", relay_command,
  544. cell_direction == CELL_DIRECTION_OUT ? "forward" : "backward");
  545. /* If we are sending an END cell and this circuit is used for a tunneled
  546. * directory request, advance its state. */
  547. if (relay_command == RELAY_COMMAND_END && circ->dirreq_id)
  548. geoip_change_dirreq_state(circ->dirreq_id, DIRREQ_TUNNELED,
  549. DIRREQ_END_CELL_SENT);
  550. if (cell_direction == CELL_DIRECTION_OUT && circ->n_chan) {
  551. /* if we're using relaybandwidthrate, this conn wants priority */
  552. channel_timestamp_client(circ->n_chan);
  553. }
  554. if (cell_direction == CELL_DIRECTION_OUT) {
  555. origin_circuit_t *origin_circ = TO_ORIGIN_CIRCUIT(circ);
  556. if (origin_circ->remaining_relay_early_cells > 0 &&
  557. (relay_command == RELAY_COMMAND_EXTEND ||
  558. relay_command == RELAY_COMMAND_EXTEND2 ||
  559. cpath_layer != origin_circ->cpath)) {
  560. /* If we've got any relay_early cells left and (we're sending
  561. * an extend cell or we're not talking to the first hop), use
  562. * one of them. Don't worry about the conn protocol version:
  563. * append_cell_to_circuit_queue will fix it up. */
  564. cell.command = CELL_RELAY_EARLY;
  565. --origin_circ->remaining_relay_early_cells;
  566. log_debug(LD_OR, "Sending a RELAY_EARLY cell; %d remaining.",
  567. (int)origin_circ->remaining_relay_early_cells);
  568. /* Memorize the command that is sent as RELAY_EARLY cell; helps debug
  569. * task 878. */
  570. origin_circ->relay_early_commands[
  571. origin_circ->relay_early_cells_sent++] = relay_command;
  572. } else if (relay_command == RELAY_COMMAND_EXTEND ||
  573. relay_command == RELAY_COMMAND_EXTEND2) {
  574. /* If no RELAY_EARLY cells can be sent over this circuit, log which
  575. * commands have been sent as RELAY_EARLY cells before; helps debug
  576. * task 878. */
  577. smartlist_t *commands_list = smartlist_new();
  578. int i = 0;
  579. char *commands = NULL;
  580. for (; i < origin_circ->relay_early_cells_sent; i++)
  581. smartlist_add(commands_list, (char *)
  582. relay_command_to_string(origin_circ->relay_early_commands[i]));
  583. commands = smartlist_join_strings(commands_list, ",", 0, NULL);
  584. log_warn(LD_BUG, "Uh-oh. We're sending a RELAY_COMMAND_EXTEND cell, "
  585. "but we have run out of RELAY_EARLY cells on that circuit. "
  586. "Commands sent before: %s", commands);
  587. tor_free(commands);
  588. smartlist_free(commands_list);
  589. }
  590. }
  591. if (circuit_package_relay_cell(&cell, circ, cell_direction, cpath_layer,
  592. stream_id, filename, lineno) < 0) {
  593. log_warn(LD_BUG,"circuit_package_relay_cell failed. Closing.");
  594. circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
  595. return -1;
  596. }
  597. return 0;
  598. }
  599. /** Make a relay cell out of <b>relay_command</b> and <b>payload</b>, and
  600. * send it onto the open circuit <b>circ</b>. <b>fromconn</b> is the stream
  601. * that's sending the relay cell, or NULL if it's a control cell.
  602. * <b>cpath_layer</b> is NULL for OR->OP cells, or the destination hop
  603. * for OP->OR cells.
  604. *
  605. * If you can't send the cell, mark the circuit for close and
  606. * return -1. Else return 0.
  607. */
  608. int
  609. connection_edge_send_command(edge_connection_t *fromconn,
  610. uint8_t relay_command, const char *payload,
  611. size_t payload_len)
  612. {
  613. /* XXXX NM Split this function into a separate versions per circuit type? */
  614. circuit_t *circ;
  615. crypt_path_t *cpath_layer = fromconn->cpath_layer;
  616. tor_assert(fromconn);
  617. circ = fromconn->on_circuit;
  618. if (fromconn->base_.marked_for_close) {
  619. log_warn(LD_BUG,
  620. "called on conn that's already marked for close at %s:%d.",
  621. fromconn->base_.marked_for_close_file,
  622. fromconn->base_.marked_for_close);
  623. return 0;
  624. }
  625. if (!circ) {
  626. if (fromconn->base_.type == CONN_TYPE_AP) {
  627. log_info(LD_APP,"no circ. Closing conn.");
  628. connection_mark_unattached_ap(EDGE_TO_ENTRY_CONN(fromconn),
  629. END_STREAM_REASON_INTERNAL);
  630. } else {
  631. log_info(LD_EXIT,"no circ. Closing conn.");
  632. fromconn->edge_has_sent_end = 1; /* no circ to send to */
  633. fromconn->end_reason = END_STREAM_REASON_INTERNAL;
  634. connection_mark_for_close(TO_CONN(fromconn));
  635. }
  636. return -1;
  637. }
  638. if (circ->marked_for_close) {
  639. /* The circuit has been marked, but not freed yet. When it's freed, it
  640. * will mark this connection for close. */
  641. return -1;
  642. }
  643. return relay_send_command_from_edge(fromconn->stream_id, circ,
  644. relay_command, payload,
  645. payload_len, cpath_layer);
  646. }
  647. /** How many times will I retry a stream that fails due to DNS
  648. * resolve failure or misc error?
  649. */
  650. #define MAX_RESOLVE_FAILURES 3
  651. /** Return 1 if reason is something that you should retry if you
  652. * get the end cell before you've connected; else return 0. */
  653. static int
  654. edge_reason_is_retriable(int reason)
  655. {
  656. return reason == END_STREAM_REASON_HIBERNATING ||
  657. reason == END_STREAM_REASON_RESOURCELIMIT ||
  658. reason == END_STREAM_REASON_EXITPOLICY ||
  659. reason == END_STREAM_REASON_RESOLVEFAILED ||
  660. reason == END_STREAM_REASON_MISC ||
  661. reason == END_STREAM_REASON_NOROUTE;
  662. }
  663. /** Called when we receive an END cell on a stream that isn't open yet,
  664. * from the client side.
  665. * Arguments are as for connection_edge_process_relay_cell().
  666. */
  667. static int
  668. connection_ap_process_end_not_open(
  669. relay_header_t *rh, cell_t *cell, origin_circuit_t *circ,
  670. entry_connection_t *conn, crypt_path_t *layer_hint)
  671. {
  672. node_t *exitrouter;
  673. int reason = *(cell->payload+RELAY_HEADER_SIZE);
  674. int control_reason;
  675. edge_connection_t *edge_conn = ENTRY_TO_EDGE_CONN(conn);
  676. (void) layer_hint; /* unused */
  677. if (rh->length > 0) {
  678. if (reason == END_STREAM_REASON_TORPROTOCOL ||
  679. reason == END_STREAM_REASON_DESTROY) {
  680. /* Both of these reasons could mean a failed tag
  681. * hit the exit and it complained. Do not probe.
  682. * Fail the circuit. */
  683. circ->path_state = PATH_STATE_USE_FAILED;
  684. return -END_CIRC_REASON_TORPROTOCOL;
  685. } else if (reason == END_STREAM_REASON_INTERNAL) {
  686. /* We can't infer success or failure, since older Tors report
  687. * ENETUNREACH as END_STREAM_REASON_INTERNAL. */
  688. } else {
  689. /* Path bias: If we get a valid reason code from the exit,
  690. * it wasn't due to tagging.
  691. *
  692. * We rely on recognized+digest being strong enough to make
  693. * tags unlikely to allow us to get tagged, yet 'recognized'
  694. * reason codes here. */
  695. pathbias_mark_use_success(circ);
  696. }
  697. }
  698. if (rh->length == 0) {
  699. reason = END_STREAM_REASON_MISC;
  700. }
  701. control_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
  702. if (edge_reason_is_retriable(reason) &&
  703. /* avoid retry if rend */
  704. !connection_edge_is_rendezvous_stream(edge_conn)) {
  705. const char *chosen_exit_digest =
  706. circ->build_state->chosen_exit->identity_digest;
  707. log_info(LD_APP,"Address '%s' refused due to '%s'. Considering retrying.",
  708. safe_str(conn->socks_request->address),
  709. stream_end_reason_to_string(reason));
  710. exitrouter = node_get_mutable_by_id(chosen_exit_digest);
  711. switch (reason) {
  712. case END_STREAM_REASON_EXITPOLICY: {
  713. tor_addr_t addr;
  714. tor_addr_make_unspec(&addr);
  715. if (rh->length >= 5) {
  716. int ttl = -1;
  717. tor_addr_make_unspec(&addr);
  718. if (rh->length == 5 || rh->length == 9) {
  719. tor_addr_from_ipv4n(&addr,
  720. get_uint32(cell->payload+RELAY_HEADER_SIZE+1));
  721. if (rh->length == 9)
  722. ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+5));
  723. } else if (rh->length == 17 || rh->length == 21) {
  724. tor_addr_from_ipv6_bytes(&addr,
  725. (char*)(cell->payload+RELAY_HEADER_SIZE+1));
  726. if (rh->length == 21)
  727. ttl = (int)ntohl(get_uint32(cell->payload+RELAY_HEADER_SIZE+17));
  728. }
  729. if (tor_addr_is_null(&addr)) {
  730. log_info(LD_APP,"Address '%s' resolved to 0.0.0.0. Closing,",
  731. safe_str(conn->socks_request->address));
  732. connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
  733. return 0;
  734. }
  735. if ((tor_addr_family(&addr) == AF_INET &&
  736. !conn->entry_cfg.ipv4_traffic) ||
  737. (tor_addr_family(&addr) == AF_INET6 &&
  738. !conn->entry_cfg.ipv6_traffic)) {
  739. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  740. "Got an EXITPOLICY failure on a connection with a "
  741. "mismatched family. Closing.");
  742. connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
  743. return 0;
  744. }
  745. if (get_options()->ClientDNSRejectInternalAddresses &&
  746. tor_addr_is_internal(&addr, 0)) {
  747. log_info(LD_APP,"Address '%s' resolved to internal. Closing,",
  748. safe_str(conn->socks_request->address));
  749. connection_mark_unattached_ap(conn, END_STREAM_REASON_TORPROTOCOL);
  750. return 0;
  751. }
  752. client_dns_set_addressmap(conn,
  753. conn->socks_request->address, &addr,
  754. conn->chosen_exit_name, ttl);
  755. {
  756. char new_addr[TOR_ADDR_BUF_LEN];
  757. tor_addr_to_str(new_addr, &addr, sizeof(new_addr), 1);
  758. if (strcmp(conn->socks_request->address, new_addr)) {
  759. strlcpy(conn->socks_request->address, new_addr,
  760. sizeof(conn->socks_request->address));
  761. control_event_stream_status(conn, STREAM_EVENT_REMAP, 0);
  762. }
  763. }
  764. }
  765. /* check if the exit *ought* to have allowed it */
  766. adjust_exit_policy_from_exitpolicy_failure(circ,
  767. conn,
  768. exitrouter,
  769. &addr);
  770. if (conn->chosen_exit_optional ||
  771. conn->chosen_exit_retries) {
  772. /* stop wanting a specific exit */
  773. conn->chosen_exit_optional = 0;
  774. /* A non-zero chosen_exit_retries can happen if we set a
  775. * TrackHostExits for this address under a port that the exit
  776. * relay allows, but then try the same address with a different
  777. * port that it doesn't allow to exit. We shouldn't unregister
  778. * the mapping, since it is probably still wanted on the
  779. * original port. But now we give away to the exit relay that
  780. * we probably have a TrackHostExits on it. So be it. */
  781. conn->chosen_exit_retries = 0;
  782. tor_free(conn->chosen_exit_name); /* clears it */
  783. }
  784. if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
  785. return 0;
  786. /* else, conn will get closed below */
  787. break;
  788. }
  789. case END_STREAM_REASON_CONNECTREFUSED:
  790. if (!conn->chosen_exit_optional)
  791. break; /* break means it'll close, below */
  792. /* Else fall through: expire this circuit, clear the
  793. * chosen_exit_name field, and try again. */
  794. /* Falls through. */
  795. case END_STREAM_REASON_RESOLVEFAILED:
  796. case END_STREAM_REASON_TIMEOUT:
  797. case END_STREAM_REASON_MISC:
  798. case END_STREAM_REASON_NOROUTE:
  799. if (client_dns_incr_failures(conn->socks_request->address)
  800. < MAX_RESOLVE_FAILURES) {
  801. /* We haven't retried too many times; reattach the connection. */
  802. circuit_log_path(LOG_INFO,LD_APP,circ);
  803. /* Mark this circuit "unusable for new streams". */
  804. mark_circuit_unusable_for_new_conns(circ);
  805. if (conn->chosen_exit_optional) {
  806. /* stop wanting a specific exit */
  807. conn->chosen_exit_optional = 0;
  808. tor_free(conn->chosen_exit_name); /* clears it */
  809. }
  810. if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
  811. return 0;
  812. /* else, conn will get closed below */
  813. } else {
  814. log_notice(LD_APP,
  815. "Have tried resolving or connecting to address '%s' "
  816. "at %d different places. Giving up.",
  817. safe_str(conn->socks_request->address),
  818. MAX_RESOLVE_FAILURES);
  819. /* clear the failures, so it will have a full try next time */
  820. client_dns_clear_failures(conn->socks_request->address);
  821. }
  822. break;
  823. case END_STREAM_REASON_HIBERNATING:
  824. case END_STREAM_REASON_RESOURCELIMIT:
  825. if (exitrouter) {
  826. policies_set_node_exitpolicy_to_reject_all(exitrouter);
  827. }
  828. if (conn->chosen_exit_optional) {
  829. /* stop wanting a specific exit */
  830. conn->chosen_exit_optional = 0;
  831. tor_free(conn->chosen_exit_name); /* clears it */
  832. }
  833. if (connection_ap_detach_retriable(conn, circ, control_reason) >= 0)
  834. return 0;
  835. /* else, will close below */
  836. break;
  837. } /* end switch */
  838. log_info(LD_APP,"Giving up on retrying; conn can't be handled.");
  839. }
  840. log_info(LD_APP,
  841. "Edge got end (%s) before we're connected. Marking for close.",
  842. stream_end_reason_to_string(rh->length > 0 ? reason : -1));
  843. circuit_log_path(LOG_INFO,LD_APP,circ);
  844. /* need to test because of detach_retriable */
  845. if (!ENTRY_TO_CONN(conn)->marked_for_close)
  846. connection_mark_unattached_ap(conn, control_reason);
  847. return 0;
  848. }
  849. /** Called when we have gotten an END_REASON_EXITPOLICY failure on <b>circ</b>
  850. * for <b>conn</b>, while attempting to connect via <b>node</b>. If the node
  851. * told us which address it rejected, then <b>addr</b> is that address;
  852. * otherwise it is AF_UNSPEC.
  853. *
  854. * If we are sure the node should have allowed this address, mark the node as
  855. * having a reject *:* exit policy. Otherwise, mark the circuit as unusable
  856. * for this particular address.
  857. **/
  858. static void
  859. adjust_exit_policy_from_exitpolicy_failure(origin_circuit_t *circ,
  860. entry_connection_t *conn,
  861. node_t *node,
  862. const tor_addr_t *addr)
  863. {
  864. int make_reject_all = 0;
  865. const sa_family_t family = tor_addr_family(addr);
  866. if (node) {
  867. tor_addr_t tmp;
  868. int asked_for_family = tor_addr_parse(&tmp, conn->socks_request->address);
  869. if (family == AF_UNSPEC) {
  870. make_reject_all = 1;
  871. } else if (node_exit_policy_is_exact(node, family) &&
  872. asked_for_family != -1 && !conn->chosen_exit_name) {
  873. make_reject_all = 1;
  874. }
  875. if (make_reject_all) {
  876. log_info(LD_APP,
  877. "Exitrouter %s seems to be more restrictive than its exit "
  878. "policy. Not using this router as exit for now.",
  879. node_describe(node));
  880. policies_set_node_exitpolicy_to_reject_all(node);
  881. }
  882. }
  883. if (family != AF_UNSPEC)
  884. addr_policy_append_reject_addr(&circ->prepend_policy, addr);
  885. }
  886. /** Helper: change the socks_request-&gt;address field on conn to the
  887. * dotted-quad representation of <b>new_addr</b>,
  888. * and send an appropriate REMAP event. */
  889. static void
  890. remap_event_helper(entry_connection_t *conn, const tor_addr_t *new_addr)
  891. {
  892. tor_addr_to_str(conn->socks_request->address, new_addr,
  893. sizeof(conn->socks_request->address),
  894. 1);
  895. control_event_stream_status(conn, STREAM_EVENT_REMAP,
  896. REMAP_STREAM_SOURCE_EXIT);
  897. }
  898. /** Extract the contents of a connected cell in <b>cell</b>, whose relay
  899. * header has already been parsed into <b>rh</b>. On success, set
  900. * <b>addr_out</b> to the address we're connected to, and <b>ttl_out</b> to
  901. * the ttl of that address, in seconds, and return 0. On failure, return
  902. * -1. */
  903. STATIC int
  904. connected_cell_parse(const relay_header_t *rh, const cell_t *cell,
  905. tor_addr_t *addr_out, int *ttl_out)
  906. {
  907. uint32_t bytes;
  908. const uint8_t *payload = cell->payload + RELAY_HEADER_SIZE;
  909. tor_addr_make_unspec(addr_out);
  910. *ttl_out = -1;
  911. if (rh->length == 0)
  912. return 0;
  913. if (rh->length < 4)
  914. return -1;
  915. bytes = ntohl(get_uint32(payload));
  916. /* If bytes is 0, this is maybe a v6 address. Otherwise it's a v4 address */
  917. if (bytes != 0) {
  918. /* v4 address */
  919. tor_addr_from_ipv4h(addr_out, bytes);
  920. if (rh->length >= 8) {
  921. bytes = ntohl(get_uint32(payload + 4));
  922. if (bytes <= INT32_MAX)
  923. *ttl_out = bytes;
  924. }
  925. } else {
  926. if (rh->length < 25) /* 4 bytes of 0s, 1 addr, 16 ipv4, 4 ttl. */
  927. return -1;
  928. if (get_uint8(payload + 4) != 6)
  929. return -1;
  930. tor_addr_from_ipv6_bytes(addr_out, (char*)(payload + 5));
  931. bytes = ntohl(get_uint32(payload + 21));
  932. if (bytes <= INT32_MAX)
  933. *ttl_out = (int) bytes;
  934. }
  935. return 0;
  936. }
  937. /** Drop all storage held by <b>addr</b>. */
  938. STATIC void
  939. address_ttl_free(address_ttl_t *addr)
  940. {
  941. if (!addr)
  942. return;
  943. tor_free(addr->hostname);
  944. tor_free(addr);
  945. }
  946. /** Parse a resolved cell in <b>cell</b>, with parsed header in <b>rh</b>.
  947. * Return -1 on parse error. On success, add one or more newly allocated
  948. * address_ttl_t to <b>addresses_out</b>; set *<b>errcode_out</b> to
  949. * one of 0, RESOLVED_TYPE_ERROR, or RESOLVED_TYPE_ERROR_TRANSIENT, and
  950. * return 0. */
  951. STATIC int
  952. resolved_cell_parse(const cell_t *cell, const relay_header_t *rh,
  953. smartlist_t *addresses_out, int *errcode_out)
  954. {
  955. const uint8_t *cp;
  956. uint8_t answer_type;
  957. size_t answer_len;
  958. address_ttl_t *addr;
  959. size_t remaining;
  960. int errcode = 0;
  961. smartlist_t *addrs;
  962. tor_assert(cell);
  963. tor_assert(rh);
  964. tor_assert(addresses_out);
  965. tor_assert(errcode_out);
  966. *errcode_out = 0;
  967. if (rh->length > RELAY_PAYLOAD_SIZE)
  968. return -1;
  969. addrs = smartlist_new();
  970. cp = cell->payload + RELAY_HEADER_SIZE;
  971. remaining = rh->length;
  972. while (remaining) {
  973. const uint8_t *cp_orig = cp;
  974. if (remaining < 2)
  975. goto err;
  976. answer_type = *cp++;
  977. answer_len = *cp++;
  978. if (remaining < 2 + answer_len + 4) {
  979. goto err;
  980. }
  981. if (answer_type == RESOLVED_TYPE_IPV4) {
  982. if (answer_len != 4) {
  983. goto err;
  984. }
  985. addr = tor_malloc_zero(sizeof(*addr));
  986. tor_addr_from_ipv4n(&addr->addr, get_uint32(cp));
  987. cp += 4;
  988. addr->ttl = ntohl(get_uint32(cp));
  989. cp += 4;
  990. smartlist_add(addrs, addr);
  991. } else if (answer_type == RESOLVED_TYPE_IPV6) {
  992. if (answer_len != 16)
  993. goto err;
  994. addr = tor_malloc_zero(sizeof(*addr));
  995. tor_addr_from_ipv6_bytes(&addr->addr, (const char*) cp);
  996. cp += 16;
  997. addr->ttl = ntohl(get_uint32(cp));
  998. cp += 4;
  999. smartlist_add(addrs, addr);
  1000. } else if (answer_type == RESOLVED_TYPE_HOSTNAME) {
  1001. if (answer_len == 0) {
  1002. goto err;
  1003. }
  1004. addr = tor_malloc_zero(sizeof(*addr));
  1005. addr->hostname = tor_memdup_nulterm(cp, answer_len);
  1006. cp += answer_len;
  1007. addr->ttl = ntohl(get_uint32(cp));
  1008. cp += 4;
  1009. smartlist_add(addrs, addr);
  1010. } else if (answer_type == RESOLVED_TYPE_ERROR_TRANSIENT ||
  1011. answer_type == RESOLVED_TYPE_ERROR) {
  1012. errcode = answer_type;
  1013. /* Ignore the error contents */
  1014. cp += answer_len + 4;
  1015. } else {
  1016. cp += answer_len + 4;
  1017. }
  1018. tor_assert(((ssize_t)remaining) >= (cp - cp_orig));
  1019. remaining -= (cp - cp_orig);
  1020. }
  1021. if (errcode && smartlist_len(addrs) == 0) {
  1022. /* Report an error only if there were no results. */
  1023. *errcode_out = errcode;
  1024. }
  1025. smartlist_add_all(addresses_out, addrs);
  1026. smartlist_free(addrs);
  1027. return 0;
  1028. err:
  1029. /* On parse error, don't report any results */
  1030. SMARTLIST_FOREACH(addrs, address_ttl_t *, a, address_ttl_free(a));
  1031. smartlist_free(addrs);
  1032. return -1;
  1033. }
  1034. /** Helper for connection_edge_process_resolved_cell: given an error code,
  1035. * an entry_connection, and a list of address_ttl_t *, report the best answer
  1036. * to the entry_connection. */
  1037. static void
  1038. connection_ap_handshake_socks_got_resolved_cell(entry_connection_t *conn,
  1039. int error_code,
  1040. smartlist_t *results)
  1041. {
  1042. address_ttl_t *addr_ipv4 = NULL;
  1043. address_ttl_t *addr_ipv6 = NULL;
  1044. address_ttl_t *addr_hostname = NULL;
  1045. address_ttl_t *addr_best = NULL;
  1046. /* If it's an error code, that's easy. */
  1047. if (error_code) {
  1048. tor_assert(error_code == RESOLVED_TYPE_ERROR ||
  1049. error_code == RESOLVED_TYPE_ERROR_TRANSIENT);
  1050. connection_ap_handshake_socks_resolved(conn,
  1051. error_code,0,NULL,-1,-1);
  1052. return;
  1053. }
  1054. /* Get the first answer of each type. */
  1055. SMARTLIST_FOREACH_BEGIN(results, address_ttl_t *, addr) {
  1056. if (addr->hostname) {
  1057. if (!addr_hostname) {
  1058. addr_hostname = addr;
  1059. }
  1060. } else if (tor_addr_family(&addr->addr) == AF_INET) {
  1061. if (!addr_ipv4 && conn->entry_cfg.ipv4_traffic) {
  1062. addr_ipv4 = addr;
  1063. }
  1064. } else if (tor_addr_family(&addr->addr) == AF_INET6) {
  1065. if (!addr_ipv6 && conn->entry_cfg.ipv6_traffic) {
  1066. addr_ipv6 = addr;
  1067. }
  1068. }
  1069. } SMARTLIST_FOREACH_END(addr);
  1070. /* Now figure out which type we wanted to deliver. */
  1071. if (conn->socks_request->command == SOCKS_COMMAND_RESOLVE_PTR) {
  1072. if (addr_hostname) {
  1073. connection_ap_handshake_socks_resolved(conn,
  1074. RESOLVED_TYPE_HOSTNAME,
  1075. strlen(addr_hostname->hostname),
  1076. (uint8_t*)addr_hostname->hostname,
  1077. addr_hostname->ttl,-1);
  1078. } else {
  1079. connection_ap_handshake_socks_resolved(conn,
  1080. RESOLVED_TYPE_ERROR,0,NULL,-1,-1);
  1081. }
  1082. return;
  1083. }
  1084. if (conn->entry_cfg.prefer_ipv6) {
  1085. addr_best = addr_ipv6 ? addr_ipv6 : addr_ipv4;
  1086. } else {
  1087. addr_best = addr_ipv4 ? addr_ipv4 : addr_ipv6;
  1088. }
  1089. /* Now convert it to the ugly old interface */
  1090. if (! addr_best) {
  1091. connection_ap_handshake_socks_resolved(conn,
  1092. RESOLVED_TYPE_ERROR,0,NULL,-1,-1);
  1093. return;
  1094. }
  1095. connection_ap_handshake_socks_resolved_addr(conn,
  1096. &addr_best->addr,
  1097. addr_best->ttl,
  1098. -1);
  1099. remap_event_helper(conn, &addr_best->addr);
  1100. }
  1101. /** Handle a RELAY_COMMAND_RESOLVED cell that we received on a non-open AP
  1102. * stream. */
  1103. STATIC int
  1104. connection_edge_process_resolved_cell(edge_connection_t *conn,
  1105. const cell_t *cell,
  1106. const relay_header_t *rh)
  1107. {
  1108. entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
  1109. smartlist_t *resolved_addresses = NULL;
  1110. int errcode = 0;
  1111. if (conn->base_.state != AP_CONN_STATE_RESOLVE_WAIT) {
  1112. log_fn(LOG_PROTOCOL_WARN, LD_APP, "Got a 'resolved' cell while "
  1113. "not in state resolve_wait. Dropping.");
  1114. return 0;
  1115. }
  1116. tor_assert(SOCKS_COMMAND_IS_RESOLVE(entry_conn->socks_request->command));
  1117. resolved_addresses = smartlist_new();
  1118. if (resolved_cell_parse(cell, rh, resolved_addresses, &errcode)) {
  1119. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1120. "Dropping malformed 'resolved' cell");
  1121. connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL);
  1122. goto done;
  1123. }
  1124. if (get_options()->ClientDNSRejectInternalAddresses) {
  1125. int orig_len = smartlist_len(resolved_addresses);
  1126. SMARTLIST_FOREACH_BEGIN(resolved_addresses, address_ttl_t *, addr) {
  1127. if (addr->hostname == NULL && tor_addr_is_internal(&addr->addr, 0)) {
  1128. log_info(LD_APP, "Got a resolved cell with answer %s; dropping that "
  1129. "answer.",
  1130. safe_str_client(fmt_addr(&addr->addr)));
  1131. address_ttl_free(addr);
  1132. SMARTLIST_DEL_CURRENT(resolved_addresses, addr);
  1133. }
  1134. } SMARTLIST_FOREACH_END(addr);
  1135. if (orig_len && smartlist_len(resolved_addresses) == 0) {
  1136. log_info(LD_APP, "Got a resolved cell with only private addresses; "
  1137. "dropping it.");
  1138. connection_ap_handshake_socks_resolved(entry_conn,
  1139. RESOLVED_TYPE_ERROR_TRANSIENT,
  1140. 0, NULL, 0, TIME_MAX);
  1141. connection_mark_unattached_ap(entry_conn,
  1142. END_STREAM_REASON_TORPROTOCOL);
  1143. goto done;
  1144. }
  1145. }
  1146. connection_ap_handshake_socks_got_resolved_cell(entry_conn,
  1147. errcode,
  1148. resolved_addresses);
  1149. connection_mark_unattached_ap(entry_conn,
  1150. END_STREAM_REASON_DONE |
  1151. END_STREAM_REASON_FLAG_ALREADY_SOCKS_REPLIED);
  1152. done:
  1153. SMARTLIST_FOREACH(resolved_addresses, address_ttl_t *, addr,
  1154. address_ttl_free(addr));
  1155. smartlist_free(resolved_addresses);
  1156. return 0;
  1157. }
  1158. /** An incoming relay cell has arrived from circuit <b>circ</b> to
  1159. * stream <b>conn</b>.
  1160. *
  1161. * The arguments here are the same as in
  1162. * connection_edge_process_relay_cell() below; this function is called
  1163. * from there when <b>conn</b> is defined and not in an open state.
  1164. */
  1165. static int
  1166. connection_edge_process_relay_cell_not_open(
  1167. relay_header_t *rh, cell_t *cell, circuit_t *circ,
  1168. edge_connection_t *conn, crypt_path_t *layer_hint)
  1169. {
  1170. if (rh->command == RELAY_COMMAND_END) {
  1171. if (CIRCUIT_IS_ORIGIN(circ) && conn->base_.type == CONN_TYPE_AP) {
  1172. return connection_ap_process_end_not_open(rh, cell,
  1173. TO_ORIGIN_CIRCUIT(circ),
  1174. EDGE_TO_ENTRY_CONN(conn),
  1175. layer_hint);
  1176. } else {
  1177. /* we just got an 'end', don't need to send one */
  1178. conn->edge_has_sent_end = 1;
  1179. conn->end_reason = *(cell->payload+RELAY_HEADER_SIZE) |
  1180. END_STREAM_REASON_FLAG_REMOTE;
  1181. connection_mark_for_close(TO_CONN(conn));
  1182. return 0;
  1183. }
  1184. }
  1185. if (conn->base_.type == CONN_TYPE_AP &&
  1186. rh->command == RELAY_COMMAND_CONNECTED) {
  1187. tor_addr_t addr;
  1188. int ttl;
  1189. entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
  1190. tor_assert(CIRCUIT_IS_ORIGIN(circ));
  1191. if (conn->base_.state != AP_CONN_STATE_CONNECT_WAIT) {
  1192. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1193. "Got 'connected' while not in state connect_wait. Dropping.");
  1194. return 0;
  1195. }
  1196. CONNECTION_AP_EXPECT_NONPENDING(entry_conn);
  1197. conn->base_.state = AP_CONN_STATE_OPEN;
  1198. log_info(LD_APP,"'connected' received for circid %u streamid %d "
  1199. "after %d seconds.",
  1200. (unsigned)circ->n_circ_id,
  1201. rh->stream_id,
  1202. (int)(time(NULL) - conn->base_.timestamp_lastread));
  1203. if (connected_cell_parse(rh, cell, &addr, &ttl) < 0) {
  1204. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1205. "Got a badly formatted connected cell. Closing.");
  1206. connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
  1207. connection_mark_unattached_ap(entry_conn, END_STREAM_REASON_TORPROTOCOL);
  1208. return 0;
  1209. }
  1210. if (tor_addr_family(&addr) != AF_UNSPEC) {
  1211. const sa_family_t family = tor_addr_family(&addr);
  1212. if (tor_addr_is_null(&addr) ||
  1213. (get_options()->ClientDNSRejectInternalAddresses &&
  1214. tor_addr_is_internal(&addr, 0))) {
  1215. log_info(LD_APP, "...but it claims the IP address was %s. Closing.",
  1216. fmt_addr(&addr));
  1217. connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
  1218. connection_mark_unattached_ap(entry_conn,
  1219. END_STREAM_REASON_TORPROTOCOL);
  1220. return 0;
  1221. }
  1222. if ((family == AF_INET && ! entry_conn->entry_cfg.ipv4_traffic) ||
  1223. (family == AF_INET6 && ! entry_conn->entry_cfg.ipv6_traffic)) {
  1224. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1225. "Got a connected cell to %s with unsupported address family."
  1226. " Closing.", fmt_addr(&addr));
  1227. connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
  1228. connection_mark_unattached_ap(entry_conn,
  1229. END_STREAM_REASON_TORPROTOCOL);
  1230. return 0;
  1231. }
  1232. client_dns_set_addressmap(entry_conn,
  1233. entry_conn->socks_request->address, &addr,
  1234. entry_conn->chosen_exit_name, ttl);
  1235. remap_event_helper(entry_conn, &addr);
  1236. }
  1237. circuit_log_path(LOG_INFO,LD_APP,TO_ORIGIN_CIRCUIT(circ));
  1238. /* don't send a socks reply to transparent conns */
  1239. tor_assert(entry_conn->socks_request != NULL);
  1240. if (!entry_conn->socks_request->has_finished)
  1241. connection_ap_handshake_socks_reply(entry_conn, NULL, 0, 0);
  1242. /* Was it a linked dir conn? If so, a dir request just started to
  1243. * fetch something; this could be a bootstrap status milestone. */
  1244. log_debug(LD_APP, "considering");
  1245. if (TO_CONN(conn)->linked_conn &&
  1246. TO_CONN(conn)->linked_conn->type == CONN_TYPE_DIR) {
  1247. connection_t *dirconn = TO_CONN(conn)->linked_conn;
  1248. log_debug(LD_APP, "it is! %d", dirconn->purpose);
  1249. switch (dirconn->purpose) {
  1250. case DIR_PURPOSE_FETCH_CERTIFICATE:
  1251. if (consensus_is_waiting_for_certs())
  1252. control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_KEYS, 0);
  1253. break;
  1254. case DIR_PURPOSE_FETCH_CONSENSUS:
  1255. control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_STATUS, 0);
  1256. break;
  1257. case DIR_PURPOSE_FETCH_SERVERDESC:
  1258. case DIR_PURPOSE_FETCH_MICRODESC:
  1259. if (TO_DIR_CONN(dirconn)->router_purpose == ROUTER_PURPOSE_GENERAL)
  1260. control_event_bootstrap(BOOTSTRAP_STATUS_LOADING_DESCRIPTORS,
  1261. count_loading_descriptors_progress());
  1262. break;
  1263. }
  1264. }
  1265. /* This is definitely a success, so forget about any pending data we
  1266. * had sent. */
  1267. if (entry_conn->pending_optimistic_data) {
  1268. buf_free(entry_conn->pending_optimistic_data);
  1269. entry_conn->pending_optimistic_data = NULL;
  1270. }
  1271. /* handle anything that might have queued */
  1272. if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
  1273. /* (We already sent an end cell if possible) */
  1274. connection_mark_for_close(TO_CONN(conn));
  1275. return 0;
  1276. }
  1277. return 0;
  1278. }
  1279. if (conn->base_.type == CONN_TYPE_AP &&
  1280. rh->command == RELAY_COMMAND_RESOLVED) {
  1281. return connection_edge_process_resolved_cell(conn, cell, rh);
  1282. }
  1283. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1284. "Got an unexpected relay command %d, in state %d (%s). Dropping.",
  1285. rh->command, conn->base_.state,
  1286. conn_state_to_string(conn->base_.type, conn->base_.state));
  1287. return 0; /* for forward compatibility, don't kill the circuit */
  1288. // connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
  1289. // connection_mark_for_close(conn);
  1290. // return -1;
  1291. }
  1292. /** An incoming relay cell has arrived on circuit <b>circ</b>. If
  1293. * <b>conn</b> is NULL this is a control cell, else <b>cell</b> is
  1294. * destined for <b>conn</b>.
  1295. *
  1296. * If <b>layer_hint</b> is defined, then we're the origin of the
  1297. * circuit, and it specifies the hop that packaged <b>cell</b>.
  1298. *
  1299. * Return -reason if you want to warn and tear down the circuit, else 0.
  1300. */
  1301. static int
  1302. connection_edge_process_relay_cell(cell_t *cell, circuit_t *circ,
  1303. edge_connection_t *conn,
  1304. crypt_path_t *layer_hint)
  1305. {
  1306. static int num_seen=0;
  1307. relay_header_t rh;
  1308. unsigned domain = layer_hint?LD_APP:LD_EXIT;
  1309. int reason;
  1310. int optimistic_data = 0; /* Set to 1 if we receive data on a stream
  1311. * that's in the EXIT_CONN_STATE_RESOLVING
  1312. * or EXIT_CONN_STATE_CONNECTING states. */
  1313. tor_assert(cell);
  1314. tor_assert(circ);
  1315. relay_header_unpack(&rh, cell->payload);
  1316. // log_fn(LOG_DEBUG,"command %d stream %d", rh.command, rh.stream_id);
  1317. num_seen++;
  1318. log_debug(domain, "Now seen %d relay cells here (command %d, stream %d).",
  1319. num_seen, rh.command, rh.stream_id);
  1320. if (rh.length > RELAY_PAYLOAD_SIZE) {
  1321. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1322. "Relay cell length field too long. Closing circuit.");
  1323. return - END_CIRC_REASON_TORPROTOCOL;
  1324. }
  1325. if (rh.stream_id == 0) {
  1326. switch (rh.command) {
  1327. case RELAY_COMMAND_BEGIN:
  1328. case RELAY_COMMAND_CONNECTED:
  1329. case RELAY_COMMAND_END:
  1330. case RELAY_COMMAND_RESOLVE:
  1331. case RELAY_COMMAND_RESOLVED:
  1332. case RELAY_COMMAND_BEGIN_DIR:
  1333. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay command %d with zero "
  1334. "stream_id. Dropping.", (int)rh.command);
  1335. return 0;
  1336. default:
  1337. ;
  1338. }
  1339. }
  1340. /* either conn is NULL, in which case we've got a control cell, or else
  1341. * conn points to the recognized stream. */
  1342. if (conn && !connection_state_is_open(TO_CONN(conn))) {
  1343. if (conn->base_.type == CONN_TYPE_EXIT &&
  1344. (conn->base_.state == EXIT_CONN_STATE_CONNECTING ||
  1345. conn->base_.state == EXIT_CONN_STATE_RESOLVING) &&
  1346. rh.command == RELAY_COMMAND_DATA) {
  1347. /* Allow DATA cells to be delivered to an exit node in state
  1348. * EXIT_CONN_STATE_CONNECTING or EXIT_CONN_STATE_RESOLVING.
  1349. * This speeds up HTTP, for example. */
  1350. optimistic_data = 1;
  1351. } else if (rh.stream_id == 0 && rh.command == RELAY_COMMAND_DATA) {
  1352. log_warn(LD_BUG, "Somehow I had a connection that matched a "
  1353. "data cell with stream ID 0.");
  1354. } else {
  1355. return connection_edge_process_relay_cell_not_open(
  1356. &rh, cell, circ, conn, layer_hint);
  1357. }
  1358. }
  1359. switch (rh.command) {
  1360. case RELAY_COMMAND_DROP:
  1361. // log_info(domain,"Got a relay-level padding cell. Dropping.");
  1362. return 0;
  1363. case RELAY_COMMAND_BEGIN:
  1364. case RELAY_COMMAND_BEGIN_DIR:
  1365. if (layer_hint &&
  1366. circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
  1367. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1368. "Relay begin request unsupported at AP. Dropping.");
  1369. return 0;
  1370. }
  1371. if (circ->purpose == CIRCUIT_PURPOSE_S_REND_JOINED &&
  1372. layer_hint != TO_ORIGIN_CIRCUIT(circ)->cpath->prev) {
  1373. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1374. "Relay begin request to Hidden Service "
  1375. "from intermediary node. Dropping.");
  1376. return 0;
  1377. }
  1378. if (conn) {
  1379. log_fn(LOG_PROTOCOL_WARN, domain,
  1380. "Begin cell for known stream. Dropping.");
  1381. return 0;
  1382. }
  1383. if (rh.command == RELAY_COMMAND_BEGIN_DIR &&
  1384. circ->purpose != CIRCUIT_PURPOSE_S_REND_JOINED) {
  1385. /* Assign this circuit and its app-ward OR connection a unique ID,
  1386. * so that we can measure download times. The local edge and dir
  1387. * connection will be assigned the same ID when they are created
  1388. * and linked. */
  1389. static uint64_t next_id = 0;
  1390. circ->dirreq_id = ++next_id;
  1391. TO_OR_CIRCUIT(circ)->p_chan->dirreq_id = circ->dirreq_id;
  1392. }
  1393. return connection_exit_begin_conn(cell, circ);
  1394. case RELAY_COMMAND_DATA:
  1395. ++stats_n_data_cells_received;
  1396. if (( layer_hint && --layer_hint->deliver_window < 0) ||
  1397. (!layer_hint && --circ->deliver_window < 0)) {
  1398. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1399. "(relay data) circ deliver_window below 0. Killing.");
  1400. if (conn) {
  1401. /* XXXX Do we actually need to do this? Will killing the circuit
  1402. * not send an END and mark the stream for close as appropriate? */
  1403. connection_edge_end(conn, END_STREAM_REASON_TORPROTOCOL);
  1404. connection_mark_for_close(TO_CONN(conn));
  1405. }
  1406. return -END_CIRC_REASON_TORPROTOCOL;
  1407. }
  1408. log_debug(domain,"circ deliver_window now %d.", layer_hint ?
  1409. layer_hint->deliver_window : circ->deliver_window);
  1410. circuit_consider_sending_sendme(circ, layer_hint);
  1411. if (rh.stream_id == 0) {
  1412. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL, "Relay data cell with zero "
  1413. "stream_id. Dropping.");
  1414. return 0;
  1415. } else if (!conn) {
  1416. log_info(domain,"data cell dropped, unknown stream (streamid %d).",
  1417. rh.stream_id);
  1418. return 0;
  1419. }
  1420. if (--conn->deliver_window < 0) { /* is it below 0 after decrement? */
  1421. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1422. "(relay data) conn deliver_window below 0. Killing.");
  1423. return -END_CIRC_REASON_TORPROTOCOL;
  1424. }
  1425. stats_n_data_bytes_received += rh.length;
  1426. connection_write_to_buf((char*)(cell->payload + RELAY_HEADER_SIZE),
  1427. rh.length, TO_CONN(conn));
  1428. if (!optimistic_data) {
  1429. /* Only send a SENDME if we're not getting optimistic data; otherwise
  1430. * a SENDME could arrive before the CONNECTED.
  1431. */
  1432. connection_edge_consider_sending_sendme(conn);
  1433. }
  1434. return 0;
  1435. case RELAY_COMMAND_END:
  1436. reason = rh.length > 0 ?
  1437. get_uint8(cell->payload+RELAY_HEADER_SIZE) : END_STREAM_REASON_MISC;
  1438. if (!conn) {
  1439. log_info(domain,"end cell (%s) dropped, unknown stream.",
  1440. stream_end_reason_to_string(reason));
  1441. return 0;
  1442. }
  1443. /* XXX add to this log_fn the exit node's nickname? */
  1444. log_info(domain,TOR_SOCKET_T_FORMAT": end cell (%s) for stream %d. "
  1445. "Removing stream.",
  1446. conn->base_.s,
  1447. stream_end_reason_to_string(reason),
  1448. conn->stream_id);
  1449. if (conn->base_.type == CONN_TYPE_AP) {
  1450. entry_connection_t *entry_conn = EDGE_TO_ENTRY_CONN(conn);
  1451. if (entry_conn->socks_request &&
  1452. !entry_conn->socks_request->has_finished)
  1453. log_warn(LD_BUG,
  1454. "open stream hasn't sent socks answer yet? Closing.");
  1455. }
  1456. /* We just *got* an end; no reason to send one. */
  1457. conn->edge_has_sent_end = 1;
  1458. if (!conn->end_reason)
  1459. conn->end_reason = reason | END_STREAM_REASON_FLAG_REMOTE;
  1460. if (!conn->base_.marked_for_close) {
  1461. /* only mark it if not already marked. it's possible to
  1462. * get the 'end' right around when the client hangs up on us. */
  1463. connection_mark_and_flush(TO_CONN(conn));
  1464. }
  1465. return 0;
  1466. case RELAY_COMMAND_EXTEND:
  1467. case RELAY_COMMAND_EXTEND2: {
  1468. static uint64_t total_n_extend=0, total_nonearly=0;
  1469. total_n_extend++;
  1470. if (rh.stream_id) {
  1471. log_fn(LOG_PROTOCOL_WARN, domain,
  1472. "'extend' cell received for non-zero stream. Dropping.");
  1473. return 0;
  1474. }
  1475. if (cell->command != CELL_RELAY_EARLY &&
  1476. !networkstatus_get_param(NULL,"AllowNonearlyExtend",0,0,1)) {
  1477. #define EARLY_WARNING_INTERVAL 3600
  1478. static ratelim_t early_warning_limit =
  1479. RATELIM_INIT(EARLY_WARNING_INTERVAL);
  1480. char *m;
  1481. if (cell->command == CELL_RELAY) {
  1482. ++total_nonearly;
  1483. if ((m = rate_limit_log(&early_warning_limit, approx_time()))) {
  1484. double percentage = ((double)total_nonearly)/total_n_extend;
  1485. percentage *= 100;
  1486. log_fn(LOG_PROTOCOL_WARN, domain, "EXTEND cell received, "
  1487. "but not via RELAY_EARLY. Dropping.%s", m);
  1488. log_fn(LOG_PROTOCOL_WARN, domain, " (We have dropped %.02f%% of "
  1489. "all EXTEND cells for this reason)", percentage);
  1490. tor_free(m);
  1491. }
  1492. } else {
  1493. log_fn(LOG_WARN, domain,
  1494. "EXTEND cell received, in a cell with type %d! Dropping.",
  1495. cell->command);
  1496. }
  1497. return 0;
  1498. }
  1499. return circuit_extend(cell, circ);
  1500. }
  1501. case RELAY_COMMAND_EXTENDED:
  1502. case RELAY_COMMAND_EXTENDED2:
  1503. if (!layer_hint) {
  1504. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1505. "'extended' unsupported at non-origin. Dropping.");
  1506. return 0;
  1507. }
  1508. log_debug(domain,"Got an extended cell! Yay.");
  1509. {
  1510. extended_cell_t extended_cell;
  1511. if (extended_cell_parse(&extended_cell, rh.command,
  1512. (const uint8_t*)cell->payload+RELAY_HEADER_SIZE,
  1513. rh.length)<0) {
  1514. log_warn(LD_PROTOCOL,
  1515. "Can't parse EXTENDED cell; killing circuit.");
  1516. return -END_CIRC_REASON_TORPROTOCOL;
  1517. }
  1518. if ((reason = circuit_finish_handshake(TO_ORIGIN_CIRCUIT(circ),
  1519. &extended_cell.created_cell)) < 0) {
  1520. circuit_mark_for_close(circ, -reason);
  1521. return 0; /* We don't want to cause a warning, so we mark the circuit
  1522. * here. */
  1523. }
  1524. }
  1525. if ((reason=circuit_send_next_onion_skin(TO_ORIGIN_CIRCUIT(circ)))<0) {
  1526. log_info(domain,"circuit_send_next_onion_skin() failed.");
  1527. return reason;
  1528. }
  1529. return 0;
  1530. case RELAY_COMMAND_TRUNCATE:
  1531. if (layer_hint) {
  1532. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1533. "'truncate' unsupported at origin. Dropping.");
  1534. return 0;
  1535. }
  1536. if (circ->n_hop) {
  1537. if (circ->n_chan)
  1538. log_warn(LD_BUG, "n_chan and n_hop set on the same circuit!");
  1539. extend_info_free(circ->n_hop);
  1540. circ->n_hop = NULL;
  1541. tor_free(circ->n_chan_create_cell);
  1542. circuit_set_state(circ, CIRCUIT_STATE_OPEN);
  1543. }
  1544. if (circ->n_chan) {
  1545. uint8_t trunc_reason = get_uint8(cell->payload + RELAY_HEADER_SIZE);
  1546. circuit_clear_cell_queue(circ, circ->n_chan);
  1547. channel_send_destroy(circ->n_circ_id, circ->n_chan,
  1548. trunc_reason);
  1549. circuit_set_n_circid_chan(circ, 0, NULL);
  1550. }
  1551. log_debug(LD_EXIT, "Processed 'truncate', replying.");
  1552. {
  1553. char payload[1];
  1554. payload[0] = (char)END_CIRC_REASON_REQUESTED;
  1555. relay_send_command_from_edge(0, circ, RELAY_COMMAND_TRUNCATED,
  1556. payload, sizeof(payload), NULL);
  1557. }
  1558. return 0;
  1559. case RELAY_COMMAND_TRUNCATED:
  1560. if (!layer_hint) {
  1561. log_fn(LOG_PROTOCOL_WARN, LD_EXIT,
  1562. "'truncated' unsupported at non-origin. Dropping.");
  1563. return 0;
  1564. }
  1565. circuit_truncated(TO_ORIGIN_CIRCUIT(circ), layer_hint,
  1566. get_uint8(cell->payload + RELAY_HEADER_SIZE));
  1567. return 0;
  1568. case RELAY_COMMAND_CONNECTED:
  1569. if (conn) {
  1570. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1571. "'connected' unsupported while open. Closing circ.");
  1572. return -END_CIRC_REASON_TORPROTOCOL;
  1573. }
  1574. log_info(domain,
  1575. "'connected' received on circid %u for streamid %d, "
  1576. "no conn attached anymore. Ignoring.",
  1577. (unsigned)circ->n_circ_id, rh.stream_id);
  1578. return 0;
  1579. case RELAY_COMMAND_SENDME:
  1580. if (!rh.stream_id) {
  1581. if (layer_hint) {
  1582. if (layer_hint->package_window + CIRCWINDOW_INCREMENT >
  1583. CIRCWINDOW_START_MAX) {
  1584. static struct ratelim_t exit_warn_ratelim = RATELIM_INIT(600);
  1585. log_fn_ratelim(&exit_warn_ratelim, LOG_WARN, LD_PROTOCOL,
  1586. "Unexpected sendme cell from exit relay. "
  1587. "Closing circ.");
  1588. return -END_CIRC_REASON_TORPROTOCOL;
  1589. }
  1590. layer_hint->package_window += CIRCWINDOW_INCREMENT;
  1591. log_debug(LD_APP,"circ-level sendme at origin, packagewindow %d.",
  1592. layer_hint->package_window);
  1593. circuit_resume_edge_reading(circ, layer_hint);
  1594. } else {
  1595. if (circ->package_window + CIRCWINDOW_INCREMENT >
  1596. CIRCWINDOW_START_MAX) {
  1597. static struct ratelim_t client_warn_ratelim = RATELIM_INIT(600);
  1598. log_fn_ratelim(&client_warn_ratelim,LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1599. "Unexpected sendme cell from client. "
  1600. "Closing circ (window %d).",
  1601. circ->package_window);
  1602. return -END_CIRC_REASON_TORPROTOCOL;
  1603. }
  1604. circ->package_window += CIRCWINDOW_INCREMENT;
  1605. log_debug(LD_APP,
  1606. "circ-level sendme at non-origin, packagewindow %d.",
  1607. circ->package_window);
  1608. circuit_resume_edge_reading(circ, layer_hint);
  1609. }
  1610. return 0;
  1611. }
  1612. if (!conn) {
  1613. log_info(domain,"sendme cell dropped, unknown stream (streamid %d).",
  1614. rh.stream_id);
  1615. return 0;
  1616. }
  1617. conn->package_window += STREAMWINDOW_INCREMENT;
  1618. log_debug(domain,"stream-level sendme, packagewindow now %d.",
  1619. conn->package_window);
  1620. if (circuit_queue_streams_are_blocked(circ)) {
  1621. /* Still waiting for queue to flush; don't touch conn */
  1622. return 0;
  1623. }
  1624. connection_start_reading(TO_CONN(conn));
  1625. /* handle whatever might still be on the inbuf */
  1626. if (connection_edge_package_raw_inbuf(conn, 1, NULL) < 0) {
  1627. /* (We already sent an end cell if possible) */
  1628. connection_mark_for_close(TO_CONN(conn));
  1629. return 0;
  1630. }
  1631. return 0;
  1632. case RELAY_COMMAND_RESOLVE:
  1633. if (layer_hint) {
  1634. log_fn(LOG_PROTOCOL_WARN, LD_APP,
  1635. "resolve request unsupported at AP; dropping.");
  1636. return 0;
  1637. } else if (conn) {
  1638. log_fn(LOG_PROTOCOL_WARN, domain,
  1639. "resolve request for known stream; dropping.");
  1640. return 0;
  1641. } else if (circ->purpose != CIRCUIT_PURPOSE_OR) {
  1642. log_fn(LOG_PROTOCOL_WARN, domain,
  1643. "resolve request on circ with purpose %d; dropping",
  1644. circ->purpose);
  1645. return 0;
  1646. }
  1647. connection_exit_begin_resolve(cell, TO_OR_CIRCUIT(circ));
  1648. return 0;
  1649. case RELAY_COMMAND_RESOLVED:
  1650. if (conn) {
  1651. log_fn(LOG_PROTOCOL_WARN, domain,
  1652. "'resolved' unsupported while open. Closing circ.");
  1653. return -END_CIRC_REASON_TORPROTOCOL;
  1654. }
  1655. log_info(domain,
  1656. "'resolved' received, no conn attached anymore. Ignoring.");
  1657. return 0;
  1658. case RELAY_COMMAND_ESTABLISH_INTRO:
  1659. case RELAY_COMMAND_ESTABLISH_RENDEZVOUS:
  1660. case RELAY_COMMAND_INTRODUCE1:
  1661. case RELAY_COMMAND_INTRODUCE2:
  1662. case RELAY_COMMAND_INTRODUCE_ACK:
  1663. case RELAY_COMMAND_RENDEZVOUS1:
  1664. case RELAY_COMMAND_RENDEZVOUS2:
  1665. case RELAY_COMMAND_INTRO_ESTABLISHED:
  1666. case RELAY_COMMAND_RENDEZVOUS_ESTABLISHED:
  1667. rend_process_relay_cell(circ, layer_hint,
  1668. rh.command, rh.length,
  1669. cell->payload+RELAY_HEADER_SIZE);
  1670. return 0;
  1671. }
  1672. log_fn(LOG_PROTOCOL_WARN, LD_PROTOCOL,
  1673. "Received unknown relay command %d. Perhaps the other side is using "
  1674. "a newer version of Tor? Dropping.",
  1675. rh.command);
  1676. return 0; /* for forward compatibility, don't kill the circuit */
  1677. }
  1678. /** How many relay_data cells have we built, ever? */
  1679. uint64_t stats_n_data_cells_packaged = 0;
  1680. /** How many bytes of data have we put in relay_data cells have we built,
  1681. * ever? This would be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if
  1682. * every relay cell we ever sent were completely full of data. */
  1683. uint64_t stats_n_data_bytes_packaged = 0;
  1684. /** How many relay_data cells have we received, ever? */
  1685. uint64_t stats_n_data_cells_received = 0;
  1686. /** How many bytes of data have we received relay_data cells, ever? This would
  1687. * be RELAY_PAYLOAD_SIZE*stats_n_data_cells_packaged if every relay cell we
  1688. * ever received were completely full of data. */
  1689. uint64_t stats_n_data_bytes_received = 0;
  1690. /** If <b>conn</b> has an entire relay payload of bytes on its inbuf (or
  1691. * <b>package_partial</b> is true), and the appropriate package windows aren't
  1692. * empty, grab a cell and send it down the circuit.
  1693. *
  1694. * If *<b>max_cells</b> is given, package no more than max_cells. Decrement
  1695. * *<b>max_cells</b> by the number of cells packaged.
  1696. *
  1697. * Return -1 (and send a RELAY_COMMAND_END cell if necessary) if conn should
  1698. * be marked for close, else return 0.
  1699. */
  1700. int
  1701. connection_edge_package_raw_inbuf(edge_connection_t *conn, int package_partial,
  1702. int *max_cells)
  1703. {
  1704. size_t bytes_to_process, length;
  1705. char payload[CELL_PAYLOAD_SIZE];
  1706. circuit_t *circ;
  1707. const unsigned domain = conn->base_.type == CONN_TYPE_AP ? LD_APP : LD_EXIT;
  1708. int sending_from_optimistic = 0;
  1709. entry_connection_t *entry_conn =
  1710. conn->base_.type == CONN_TYPE_AP ? EDGE_TO_ENTRY_CONN(conn) : NULL;
  1711. const int sending_optimistically =
  1712. entry_conn &&
  1713. conn->base_.type == CONN_TYPE_AP &&
  1714. conn->base_.state != AP_CONN_STATE_OPEN;
  1715. crypt_path_t *cpath_layer = conn->cpath_layer;
  1716. tor_assert(conn);
  1717. if (conn->base_.marked_for_close) {
  1718. log_warn(LD_BUG,
  1719. "called on conn that's already marked for close at %s:%d.",
  1720. conn->base_.marked_for_close_file, conn->base_.marked_for_close);
  1721. return 0;
  1722. }
  1723. if (max_cells && *max_cells <= 0)
  1724. return 0;
  1725. repeat_connection_edge_package_raw_inbuf:
  1726. circ = circuit_get_by_edge_conn(conn);
  1727. if (!circ) {
  1728. log_info(domain,"conn has no circuit! Closing.");
  1729. conn->end_reason = END_STREAM_REASON_CANT_ATTACH;
  1730. return -1;
  1731. }
  1732. if (circuit_consider_stop_edge_reading(circ, cpath_layer))
  1733. return 0;
  1734. if (conn->package_window <= 0) {
  1735. log_info(domain,"called with package_window %d. Skipping.",
  1736. conn->package_window);
  1737. connection_stop_reading(TO_CONN(conn));
  1738. return 0;
  1739. }
  1740. sending_from_optimistic = entry_conn &&
  1741. entry_conn->sending_optimistic_data != NULL;
  1742. if (PREDICT_UNLIKELY(sending_from_optimistic)) {
  1743. bytes_to_process = buf_datalen(entry_conn->sending_optimistic_data);
  1744. if (PREDICT_UNLIKELY(!bytes_to_process)) {
  1745. log_warn(LD_BUG, "sending_optimistic_data was non-NULL but empty");
  1746. bytes_to_process = connection_get_inbuf_len(TO_CONN(conn));
  1747. sending_from_optimistic = 0;
  1748. }
  1749. } else {
  1750. bytes_to_process = connection_get_inbuf_len(TO_CONN(conn));
  1751. }
  1752. if (!bytes_to_process)
  1753. return 0;
  1754. if (!package_partial && bytes_to_process < RELAY_PAYLOAD_SIZE)
  1755. return 0;
  1756. if (bytes_to_process > RELAY_PAYLOAD_SIZE) {
  1757. length = RELAY_PAYLOAD_SIZE;
  1758. } else {
  1759. length = bytes_to_process;
  1760. }
  1761. stats_n_data_bytes_packaged += length;
  1762. stats_n_data_cells_packaged += 1;
  1763. if (PREDICT_UNLIKELY(sending_from_optimistic)) {
  1764. /* XXXX We could be more efficient here by sometimes packing
  1765. * previously-sent optimistic data in the same cell with data
  1766. * from the inbuf. */
  1767. fetch_from_buf(payload, length, entry_conn->sending_optimistic_data);
  1768. if (!buf_datalen(entry_conn->sending_optimistic_data)) {
  1769. buf_free(entry_conn->sending_optimistic_data);
  1770. entry_conn->sending_optimistic_data = NULL;
  1771. }
  1772. } else {
  1773. connection_fetch_from_buf(payload, length, TO_CONN(conn));
  1774. }
  1775. log_debug(domain,TOR_SOCKET_T_FORMAT": Packaging %d bytes (%d waiting).",
  1776. conn->base_.s,
  1777. (int)length, (int)connection_get_inbuf_len(TO_CONN(conn)));
  1778. if (sending_optimistically && !sending_from_optimistic) {
  1779. /* This is new optimistic data; remember it in case we need to detach and
  1780. retry */
  1781. if (!entry_conn->pending_optimistic_data)
  1782. entry_conn->pending_optimistic_data = buf_new();
  1783. write_to_buf(payload, length, entry_conn->pending_optimistic_data);
  1784. }
  1785. if (connection_edge_send_command(conn, RELAY_COMMAND_DATA,
  1786. payload, length) < 0 )
  1787. /* circuit got marked for close, don't continue, don't need to mark conn */
  1788. return 0;
  1789. if (!cpath_layer) { /* non-rendezvous exit */
  1790. tor_assert(circ->package_window > 0);
  1791. circ->package_window--;
  1792. } else { /* we're an AP, or an exit on a rendezvous circ */
  1793. tor_assert(cpath_layer->package_window > 0);
  1794. cpath_layer->package_window--;
  1795. }
  1796. if (--conn->package_window <= 0) { /* is it 0 after decrement? */
  1797. connection_stop_reading(TO_CONN(conn));
  1798. log_debug(domain,"conn->package_window reached 0.");
  1799. circuit_consider_stop_edge_reading(circ, cpath_layer);
  1800. return 0; /* don't process the inbuf any more */
  1801. }
  1802. log_debug(domain,"conn->package_window is now %d",conn->package_window);
  1803. if (max_cells) {
  1804. *max_cells -= 1;
  1805. if (*max_cells <= 0)
  1806. return 0;
  1807. }
  1808. /* handle more if there's more, or return 0 if there isn't */
  1809. goto repeat_connection_edge_package_raw_inbuf;
  1810. }
  1811. /** Called when we've just received a relay data cell, when
  1812. * we've just finished flushing all bytes to stream <b>conn</b>,
  1813. * or when we've flushed *some* bytes to the stream <b>conn</b>.
  1814. *
  1815. * If conn->outbuf is not too full, and our deliver window is
  1816. * low, send back a suitable number of stream-level sendme cells.
  1817. */
  1818. void
  1819. connection_edge_consider_sending_sendme(edge_connection_t *conn)
  1820. {
  1821. circuit_t *circ;
  1822. if (connection_outbuf_too_full(TO_CONN(conn)))
  1823. return;
  1824. circ = circuit_get_by_edge_conn(conn);
  1825. if (!circ) {
  1826. /* this can legitimately happen if the destroy has already
  1827. * arrived and torn down the circuit */
  1828. log_info(LD_APP,"No circuit associated with conn. Skipping.");
  1829. return;
  1830. }
  1831. while (conn->deliver_window <= STREAMWINDOW_START - STREAMWINDOW_INCREMENT) {
  1832. log_debug(conn->base_.type == CONN_TYPE_AP ?LD_APP:LD_EXIT,
  1833. "Outbuf %d, Queuing stream sendme.",
  1834. (int)conn->base_.outbuf_flushlen);
  1835. conn->deliver_window += STREAMWINDOW_INCREMENT;
  1836. if (connection_edge_send_command(conn, RELAY_COMMAND_SENDME,
  1837. NULL, 0) < 0) {
  1838. log_warn(LD_APP,"connection_edge_send_command failed. Skipping.");
  1839. return; /* the circuit's closed, don't continue */
  1840. }
  1841. }
  1842. }
  1843. /** The circuit <b>circ</b> has received a circuit-level sendme
  1844. * (on hop <b>layer_hint</b>, if we're the OP). Go through all the
  1845. * attached streams and let them resume reading and packaging, if
  1846. * their stream windows allow it.
  1847. */
  1848. static void
  1849. circuit_resume_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
  1850. {
  1851. if (circuit_queue_streams_are_blocked(circ)) {
  1852. log_debug(layer_hint?LD_APP:LD_EXIT,"Too big queue, no resuming");
  1853. return;
  1854. }
  1855. log_debug(layer_hint?LD_APP:LD_EXIT,"resuming");
  1856. if (CIRCUIT_IS_ORIGIN(circ))
  1857. circuit_resume_edge_reading_helper(TO_ORIGIN_CIRCUIT(circ)->p_streams,
  1858. circ, layer_hint);
  1859. else
  1860. circuit_resume_edge_reading_helper(TO_OR_CIRCUIT(circ)->n_streams,
  1861. circ, layer_hint);
  1862. }
  1863. void
  1864. stream_choice_seed_weak_rng(void)
  1865. {
  1866. crypto_seed_weak_rng(&stream_choice_rng);
  1867. }
  1868. /** A helper function for circuit_resume_edge_reading() above.
  1869. * The arguments are the same, except that <b>conn</b> is the head
  1870. * of a linked list of edge streams that should each be considered.
  1871. */
  1872. static int
  1873. circuit_resume_edge_reading_helper(edge_connection_t *first_conn,
  1874. circuit_t *circ,
  1875. crypt_path_t *layer_hint)
  1876. {
  1877. edge_connection_t *conn;
  1878. int n_packaging_streams, n_streams_left;
  1879. int packaged_this_round;
  1880. int cells_on_queue;
  1881. int cells_per_conn;
  1882. edge_connection_t *chosen_stream = NULL;
  1883. int max_to_package;
  1884. if (first_conn == NULL) {
  1885. /* Don't bother to try to do the rest of this if there are no connections
  1886. * to resume. */
  1887. return 0;
  1888. }
  1889. /* How many cells do we have space for? It will be the minimum of
  1890. * the number needed to exhaust the package window, and the minimum
  1891. * needed to fill the cell queue. */
  1892. max_to_package = circ->package_window;
  1893. if (CIRCUIT_IS_ORIGIN(circ)) {
  1894. cells_on_queue = circ->n_chan_cells.n;
  1895. } else {
  1896. or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
  1897. cells_on_queue = or_circ->p_chan_cells.n;
  1898. }
  1899. if (CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue < max_to_package)
  1900. max_to_package = CELL_QUEUE_HIGHWATER_SIZE - cells_on_queue;
  1901. /* Once we used to start listening on the streams in the order they
  1902. * appeared in the linked list. That leads to starvation on the
  1903. * streams that appeared later on the list, since the first streams
  1904. * would always get to read first. Instead, we just pick a random
  1905. * stream on the list, and enable reading for streams starting at that
  1906. * point (and wrapping around as if the list were circular). It would
  1907. * probably be better to actually remember which streams we've
  1908. * serviced in the past, but this is simple and effective. */
  1909. /* Select a stream uniformly at random from the linked list. We
  1910. * don't need cryptographic randomness here. */
  1911. {
  1912. int num_streams = 0;
  1913. for (conn = first_conn; conn; conn = conn->next_stream) {
  1914. num_streams++;
  1915. if (tor_weak_random_one_in_n(&stream_choice_rng, num_streams)) {
  1916. chosen_stream = conn;
  1917. }
  1918. /* Invariant: chosen_stream has been chosen uniformly at random from
  1919. * among the first num_streams streams on first_conn.
  1920. *
  1921. * (Note that we iterate over every stream on the circuit, so that after
  1922. * we've considered the first stream, we've chosen it with P=1; and
  1923. * after we consider the second stream, we've switched to it with P=1/2
  1924. * and stayed with the first stream with P=1/2; and after we've
  1925. * considered the third stream, we've switched to it with P=1/3 and
  1926. * remained with one of the first two streams with P=(2/3), giving each
  1927. * one P=(1/2)(2/3) )=(1/3).) */
  1928. }
  1929. }
  1930. /* Count how many non-marked streams there are that have anything on
  1931. * their inbuf, and enable reading on all of the connections. */
  1932. n_packaging_streams = 0;
  1933. /* Activate reading starting from the chosen stream */
  1934. for (conn=chosen_stream; conn; conn = conn->next_stream) {
  1935. /* Start reading for the streams starting from here */
  1936. if (conn->base_.marked_for_close || conn->package_window <= 0)
  1937. continue;
  1938. if (!layer_hint || conn->cpath_layer == layer_hint) {
  1939. connection_start_reading(TO_CONN(conn));
  1940. if (connection_get_inbuf_len(TO_CONN(conn)) > 0)
  1941. ++n_packaging_streams;
  1942. }
  1943. }
  1944. /* Go back and do the ones we skipped, circular-style */
  1945. for (conn = first_conn; conn != chosen_stream; conn = conn->next_stream) {
  1946. if (conn->base_.marked_for_close || conn->package_window <= 0)
  1947. continue;
  1948. if (!layer_hint || conn->cpath_layer == layer_hint) {
  1949. connection_start_reading(TO_CONN(conn));
  1950. if (connection_get_inbuf_len(TO_CONN(conn)) > 0)
  1951. ++n_packaging_streams;
  1952. }
  1953. }
  1954. if (n_packaging_streams == 0) /* avoid divide-by-zero */
  1955. return 0;
  1956. again:
  1957. cells_per_conn = CEIL_DIV(max_to_package, n_packaging_streams);
  1958. packaged_this_round = 0;
  1959. n_streams_left = 0;
  1960. /* Iterate over all connections. Package up to cells_per_conn cells on
  1961. * each. Update packaged_this_round with the total number of cells
  1962. * packaged, and n_streams_left with the number that still have data to
  1963. * package.
  1964. */
  1965. for (conn=first_conn; conn; conn=conn->next_stream) {
  1966. if (conn->base_.marked_for_close || conn->package_window <= 0)
  1967. continue;
  1968. if (!layer_hint || conn->cpath_layer == layer_hint) {
  1969. int n = cells_per_conn, r;
  1970. /* handle whatever might still be on the inbuf */
  1971. r = connection_edge_package_raw_inbuf(conn, 1, &n);
  1972. /* Note how many we packaged */
  1973. packaged_this_round += (cells_per_conn-n);
  1974. if (r<0) {
  1975. /* Problem while packaging. (We already sent an end cell if
  1976. * possible) */
  1977. connection_mark_for_close(TO_CONN(conn));
  1978. continue;
  1979. }
  1980. /* If there's still data to read, we'll be coming back to this stream. */
  1981. if (connection_get_inbuf_len(TO_CONN(conn)))
  1982. ++n_streams_left;
  1983. /* If the circuit won't accept any more data, return without looking
  1984. * at any more of the streams. Any connections that should be stopped
  1985. * have already been stopped by connection_edge_package_raw_inbuf. */
  1986. if (circuit_consider_stop_edge_reading(circ, layer_hint))
  1987. return -1;
  1988. /* XXXX should we also stop immediately if we fill up the cell queue?
  1989. * Probably. */
  1990. }
  1991. }
  1992. /* If we made progress, and we are willing to package more, and there are
  1993. * any streams left that want to package stuff... try again!
  1994. */
  1995. if (packaged_this_round && packaged_this_round < max_to_package &&
  1996. n_streams_left) {
  1997. max_to_package -= packaged_this_round;
  1998. n_packaging_streams = n_streams_left;
  1999. goto again;
  2000. }
  2001. return 0;
  2002. }
  2003. /** Check if the package window for <b>circ</b> is empty (at
  2004. * hop <b>layer_hint</b> if it's defined).
  2005. *
  2006. * If yes, tell edge streams to stop reading and return 1.
  2007. * Else return 0.
  2008. */
  2009. static int
  2010. circuit_consider_stop_edge_reading(circuit_t *circ, crypt_path_t *layer_hint)
  2011. {
  2012. edge_connection_t *conn = NULL;
  2013. unsigned domain = layer_hint ? LD_APP : LD_EXIT;
  2014. if (!layer_hint) {
  2015. or_circuit_t *or_circ = TO_OR_CIRCUIT(circ);
  2016. log_debug(domain,"considering circ->package_window %d",
  2017. circ->package_window);
  2018. if (circ->package_window <= 0) {
  2019. log_debug(domain,"yes, not-at-origin. stopped.");
  2020. for (conn = or_circ->n_streams; conn; conn=conn->next_stream)
  2021. connection_stop_reading(TO_CONN(conn));
  2022. return 1;
  2023. }
  2024. return 0;
  2025. }
  2026. /* else, layer hint is defined, use it */
  2027. log_debug(domain,"considering layer_hint->package_window %d",
  2028. layer_hint->package_window);
  2029. if (layer_hint->package_window <= 0) {
  2030. log_debug(domain,"yes, at-origin. stopped.");
  2031. for (conn = TO_ORIGIN_CIRCUIT(circ)->p_streams; conn;
  2032. conn=conn->next_stream) {
  2033. if (conn->cpath_layer == layer_hint)
  2034. connection_stop_reading(TO_CONN(conn));
  2035. }
  2036. return 1;
  2037. }
  2038. return 0;
  2039. }
  2040. /** Check if the deliver_window for circuit <b>circ</b> (at hop
  2041. * <b>layer_hint</b> if it's defined) is low enough that we should
  2042. * send a circuit-level sendme back down the circuit. If so, send
  2043. * enough sendmes that the window would be overfull if we sent any
  2044. * more.
  2045. */
  2046. static void
  2047. circuit_consider_sending_sendme(circuit_t *circ, crypt_path_t *layer_hint)
  2048. {
  2049. // log_fn(LOG_INFO,"Considering: layer_hint is %s",
  2050. // layer_hint ? "defined" : "null");
  2051. while ((layer_hint ? layer_hint->deliver_window : circ->deliver_window) <=
  2052. CIRCWINDOW_START - CIRCWINDOW_INCREMENT) {
  2053. log_debug(LD_CIRC,"Queuing circuit sendme.");
  2054. if (layer_hint)
  2055. layer_hint->deliver_window += CIRCWINDOW_INCREMENT;
  2056. else
  2057. circ->deliver_window += CIRCWINDOW_INCREMENT;
  2058. if (relay_send_command_from_edge(0, circ, RELAY_COMMAND_SENDME,
  2059. NULL, 0, layer_hint) < 0) {
  2060. log_warn(LD_CIRC,
  2061. "relay_send_command_from_edge failed. Circuit's closed.");
  2062. return; /* the circuit's closed, don't continue */
  2063. }
  2064. }
  2065. }
  2066. #ifdef ACTIVE_CIRCUITS_PARANOIA
  2067. #define assert_cmux_ok_paranoid(chan) \
  2068. assert_circuit_mux_okay(chan)
  2069. #else
  2070. #define assert_cmux_ok_paranoid(chan)
  2071. #endif
  2072. /** The total number of cells we have allocated. */
  2073. static size_t total_cells_allocated = 0;
  2074. /** Release storage held by <b>cell</b>. */
  2075. static inline void
  2076. packed_cell_free_unchecked(packed_cell_t *cell)
  2077. {
  2078. --total_cells_allocated;
  2079. tor_free(cell);
  2080. }
  2081. /** Allocate and return a new packed_cell_t. */
  2082. STATIC packed_cell_t *
  2083. packed_cell_new(void)
  2084. {
  2085. ++total_cells_allocated;
  2086. return tor_malloc_zero(sizeof(packed_cell_t));
  2087. }
  2088. /** Return a packed cell used outside by channel_t lower layer */
  2089. void
  2090. packed_cell_free(packed_cell_t *cell)
  2091. {
  2092. if (!cell)
  2093. return;
  2094. packed_cell_free_unchecked(cell);
  2095. }
  2096. /** Log current statistics for cell pool allocation at log level
  2097. * <b>severity</b>. */
  2098. void
  2099. dump_cell_pool_usage(int severity)
  2100. {
  2101. int n_circs = 0;
  2102. int n_cells = 0;
  2103. SMARTLIST_FOREACH_BEGIN(circuit_get_global_list(), circuit_t *, c) {
  2104. n_cells += c->n_chan_cells.n;
  2105. if (!CIRCUIT_IS_ORIGIN(c))
  2106. n_cells += TO_OR_CIRCUIT(c)->p_chan_cells.n;
  2107. ++n_circs;
  2108. }
  2109. SMARTLIST_FOREACH_END(c);
  2110. tor_log(severity, LD_MM,
  2111. "%d cells allocated on %d circuits. %d cells leaked.",
  2112. n_cells, n_circs, (int)total_cells_allocated - n_cells);
  2113. }
  2114. /** Allocate a new copy of packed <b>cell</b>. */
  2115. static inline packed_cell_t *
  2116. packed_cell_copy(const cell_t *cell, int wide_circ_ids)
  2117. {
  2118. packed_cell_t *c = packed_cell_new();
  2119. cell_pack(c, cell, wide_circ_ids);
  2120. return c;
  2121. }
  2122. /** Append <b>cell</b> to the end of <b>queue</b>. */
  2123. void
  2124. cell_queue_append(cell_queue_t *queue, packed_cell_t *cell)
  2125. {
  2126. TOR_SIMPLEQ_INSERT_TAIL(&queue->head, cell, next);
  2127. ++queue->n;
  2128. }
  2129. /** Append a newly allocated copy of <b>cell</b> to the end of the
  2130. * <b>exitward</b> (or app-ward) <b>queue</b> of <b>circ</b>. If
  2131. * <b>use_stats</b> is true, record statistics about the cell.
  2132. */
  2133. void
  2134. cell_queue_append_packed_copy(circuit_t *circ, cell_queue_t *queue,
  2135. int exitward, const cell_t *cell,
  2136. int wide_circ_ids, int use_stats)
  2137. {
  2138. packed_cell_t *copy = packed_cell_copy(cell, wide_circ_ids);
  2139. (void)circ;
  2140. (void)exitward;
  2141. (void)use_stats;
  2142. copy->inserted_time = (uint32_t) monotime_coarse_absolute_msec();
  2143. cell_queue_append(queue, copy);
  2144. }
  2145. /** Initialize <b>queue</b> as an empty cell queue. */
  2146. void
  2147. cell_queue_init(cell_queue_t *queue)
  2148. {
  2149. memset(queue, 0, sizeof(cell_queue_t));
  2150. TOR_SIMPLEQ_INIT(&queue->head);
  2151. }
  2152. /** Remove and free every cell in <b>queue</b>. */
  2153. void
  2154. cell_queue_clear(cell_queue_t *queue)
  2155. {
  2156. packed_cell_t *cell;
  2157. while ((cell = TOR_SIMPLEQ_FIRST(&queue->head))) {
  2158. TOR_SIMPLEQ_REMOVE_HEAD(&queue->head, next);
  2159. packed_cell_free_unchecked(cell);
  2160. }
  2161. TOR_SIMPLEQ_INIT(&queue->head);
  2162. queue->n = 0;
  2163. }
  2164. /** Extract and return the cell at the head of <b>queue</b>; return NULL if
  2165. * <b>queue</b> is empty. */
  2166. STATIC packed_cell_t *
  2167. cell_queue_pop(cell_queue_t *queue)
  2168. {
  2169. packed_cell_t *cell = TOR_SIMPLEQ_FIRST(&queue->head);
  2170. if (!cell)
  2171. return NULL;
  2172. TOR_SIMPLEQ_REMOVE_HEAD(&queue->head, next);
  2173. --queue->n;
  2174. return cell;
  2175. }
  2176. /** Initialize <b>queue</b> as an empty cell queue. */
  2177. void
  2178. destroy_cell_queue_init(destroy_cell_queue_t *queue)
  2179. {
  2180. memset(queue, 0, sizeof(destroy_cell_queue_t));
  2181. TOR_SIMPLEQ_INIT(&queue->head);
  2182. }
  2183. /** Remove and free every cell in <b>queue</b>. */
  2184. void
  2185. destroy_cell_queue_clear(destroy_cell_queue_t *queue)
  2186. {
  2187. destroy_cell_t *cell;
  2188. while ((cell = TOR_SIMPLEQ_FIRST(&queue->head))) {
  2189. TOR_SIMPLEQ_REMOVE_HEAD(&queue->head, next);
  2190. tor_free(cell);
  2191. }
  2192. TOR_SIMPLEQ_INIT(&queue->head);
  2193. queue->n = 0;
  2194. }
  2195. /** Extract and return the cell at the head of <b>queue</b>; return NULL if
  2196. * <b>queue</b> is empty. */
  2197. STATIC destroy_cell_t *
  2198. destroy_cell_queue_pop(destroy_cell_queue_t *queue)
  2199. {
  2200. destroy_cell_t *cell = TOR_SIMPLEQ_FIRST(&queue->head);
  2201. if (!cell)
  2202. return NULL;
  2203. TOR_SIMPLEQ_REMOVE_HEAD(&queue->head, next);
  2204. --queue->n;
  2205. return cell;
  2206. }
  2207. /** Append a destroy cell for <b>circid</b> to <b>queue</b>. */
  2208. void
  2209. destroy_cell_queue_append(destroy_cell_queue_t *queue,
  2210. circid_t circid,
  2211. uint8_t reason)
  2212. {
  2213. destroy_cell_t *cell = tor_malloc_zero(sizeof(destroy_cell_t));
  2214. cell->circid = circid;
  2215. cell->reason = reason;
  2216. /* Not yet used, but will be required for OOM handling. */
  2217. cell->inserted_time = (uint32_t) monotime_coarse_absolute_msec();
  2218. TOR_SIMPLEQ_INSERT_TAIL(&queue->head, cell, next);
  2219. ++queue->n;
  2220. }
  2221. /** Convert a destroy_cell_t to a newly allocated cell_t. Frees its input. */
  2222. static packed_cell_t *
  2223. destroy_cell_to_packed_cell(destroy_cell_t *inp, int wide_circ_ids)
  2224. {
  2225. packed_cell_t *packed = packed_cell_new();
  2226. cell_t cell;
  2227. memset(&cell, 0, sizeof(cell));
  2228. cell.circ_id = inp->circid;
  2229. cell.command = CELL_DESTROY;
  2230. cell.payload[0] = inp->reason;
  2231. cell_pack(packed, &cell, wide_circ_ids);
  2232. tor_free(inp);
  2233. return packed;
  2234. }
  2235. /** Return the total number of bytes used for each packed_cell in a queue.
  2236. * Approximate. */
  2237. size_t
  2238. packed_cell_mem_cost(void)
  2239. {
  2240. return sizeof(packed_cell_t);
  2241. }
  2242. /* DOCDOC */
  2243. STATIC size_t
  2244. cell_queues_get_total_allocation(void)
  2245. {
  2246. return total_cells_allocated * packed_cell_mem_cost();
  2247. }
  2248. /** How long after we've been low on memory should we try to conserve it? */
  2249. #define MEMORY_PRESSURE_INTERVAL (30*60)
  2250. /** The time at which we were last low on memory. */
  2251. static time_t last_time_under_memory_pressure = 0;
  2252. /** Check whether we've got too much space used for cells. If so,
  2253. * call the OOM handler and return 1. Otherwise, return 0. */
  2254. STATIC int
  2255. cell_queues_check_size(void)
  2256. {
  2257. time_t now = time(NULL);
  2258. size_t alloc = cell_queues_get_total_allocation();
  2259. alloc += buf_get_total_allocation();
  2260. alloc += tor_zlib_get_total_allocation();
  2261. const size_t rend_cache_total = rend_cache_get_total_allocation();
  2262. alloc += rend_cache_total;
  2263. const size_t geoip_client_cache_total =
  2264. geoip_client_cache_total_allocation();
  2265. alloc += geoip_client_cache_total;
  2266. if (alloc >= get_options()->MaxMemInQueues_low_threshold) {
  2267. last_time_under_memory_pressure = approx_time();
  2268. if (alloc >= get_options()->MaxMemInQueues) {
  2269. /* If we're spending over 20% of the memory limit on hidden service
  2270. * descriptors, free them until we're down to 10%. Do the same for geoip
  2271. * client cache. */
  2272. if (rend_cache_total > get_options()->MaxMemInQueues / 5) {
  2273. const size_t bytes_to_remove =
  2274. rend_cache_total - (size_t)(get_options()->MaxMemInQueues / 10);
  2275. rend_cache_clean_v2_descs_as_dir(now, bytes_to_remove);
  2276. alloc -= rend_cache_total;
  2277. alloc += rend_cache_get_total_allocation();
  2278. }
  2279. if (geoip_client_cache_total > get_options()->MaxMemInQueues / 5) {
  2280. const size_t bytes_to_remove =
  2281. geoip_client_cache_total -
  2282. (size_t)(get_options()->MaxMemInQueues / 10);
  2283. alloc -= geoip_client_cache_handle_oom(now, bytes_to_remove);
  2284. }
  2285. circuits_handle_oom(alloc);
  2286. return 1;
  2287. }
  2288. }
  2289. return 0;
  2290. }
  2291. /** Return true if we've been under memory pressure in the last
  2292. * MEMORY_PRESSURE_INTERVAL seconds. */
  2293. int
  2294. have_been_under_memory_pressure(void)
  2295. {
  2296. return last_time_under_memory_pressure + MEMORY_PRESSURE_INTERVAL
  2297. < approx_time();
  2298. }
  2299. /**
  2300. * Update the number of cells available on the circuit's n_chan or p_chan's
  2301. * circuit mux.
  2302. */
  2303. void
  2304. update_circuit_on_cmux_(circuit_t *circ, cell_direction_t direction,
  2305. const char *file, int lineno)
  2306. {
  2307. channel_t *chan = NULL;
  2308. or_circuit_t *or_circ = NULL;
  2309. circuitmux_t *cmux = NULL;
  2310. tor_assert(circ);
  2311. /* Okay, get the channel */
  2312. if (direction == CELL_DIRECTION_OUT) {
  2313. chan = circ->n_chan;
  2314. } else {
  2315. or_circ = TO_OR_CIRCUIT(circ);
  2316. chan = or_circ->p_chan;
  2317. }
  2318. tor_assert(chan);
  2319. tor_assert(chan->cmux);
  2320. /* Now get the cmux */
  2321. cmux = chan->cmux;
  2322. /* Cmux sanity check */
  2323. if (! circuitmux_is_circuit_attached(cmux, circ)) {
  2324. log_warn(LD_BUG, "called on non-attached circuit from %s:%d",
  2325. file, lineno);
  2326. return;
  2327. }
  2328. tor_assert(circuitmux_attached_circuit_direction(cmux, circ) == direction);
  2329. assert_cmux_ok_paranoid(chan);
  2330. /* Update the number of cells we have for the circuit mux */
  2331. if (direction == CELL_DIRECTION_OUT) {
  2332. circuitmux_set_num_cells(cmux, circ, circ->n_chan_cells.n);
  2333. } else {
  2334. circuitmux_set_num_cells(cmux, circ, or_circ->p_chan_cells.n);
  2335. }
  2336. assert_cmux_ok_paranoid(chan);
  2337. }
  2338. /** Remove all circuits from the cmux on <b>chan</b>.
  2339. *
  2340. * If <b>circuits_out</b> is non-NULL, add all detached circuits to
  2341. * <b>circuits_out</b>.
  2342. **/
  2343. void
  2344. channel_unlink_all_circuits(channel_t *chan, smartlist_t *circuits_out)
  2345. {
  2346. tor_assert(chan);
  2347. tor_assert(chan->cmux);
  2348. circuitmux_detach_all_circuits(chan->cmux, circuits_out);
  2349. chan->num_n_circuits = 0;
  2350. chan->num_p_circuits = 0;
  2351. }
  2352. /** Block (if <b>block</b> is true) or unblock (if <b>block</b> is false)
  2353. * every edge connection that is using <b>circ</b> to write to <b>chan</b>,
  2354. * and start or stop reading as appropriate.
  2355. *
  2356. * If <b>stream_id</b> is nonzero, block only the edge connection whose
  2357. * stream_id matches it.
  2358. *
  2359. * Returns the number of streams whose status we changed.
  2360. */
  2361. static int
  2362. set_streams_blocked_on_circ(circuit_t *circ, channel_t *chan,
  2363. int block, streamid_t stream_id)
  2364. {
  2365. edge_connection_t *edge = NULL;
  2366. int n = 0;
  2367. if (circ->n_chan == chan) {
  2368. circ->streams_blocked_on_n_chan = block;
  2369. if (CIRCUIT_IS_ORIGIN(circ))
  2370. edge = TO_ORIGIN_CIRCUIT(circ)->p_streams;
  2371. } else {
  2372. circ->streams_blocked_on_p_chan = block;
  2373. tor_assert(!CIRCUIT_IS_ORIGIN(circ));
  2374. edge = TO_OR_CIRCUIT(circ)->n_streams;
  2375. }
  2376. for (; edge; edge = edge->next_stream) {
  2377. connection_t *conn = TO_CONN(edge);
  2378. if (stream_id && edge->stream_id != stream_id)
  2379. continue;
  2380. if (edge->edge_blocked_on_circ != block) {
  2381. ++n;
  2382. edge->edge_blocked_on_circ = block;
  2383. }
  2384. if (!conn->read_event) {
  2385. /* This connection is a placeholder for something; probably a DNS
  2386. * request. It can't actually stop or start reading.*/
  2387. continue;
  2388. }
  2389. if (block) {
  2390. if (connection_is_reading(conn))
  2391. connection_stop_reading(conn);
  2392. } else {
  2393. /* Is this right? */
  2394. if (!connection_is_reading(conn))
  2395. connection_start_reading(conn);
  2396. }
  2397. }
  2398. return n;
  2399. }
  2400. /** Extract the command from a packed cell. */
  2401. static uint8_t
  2402. packed_cell_get_command(const packed_cell_t *cell, int wide_circ_ids)
  2403. {
  2404. if (wide_circ_ids) {
  2405. return get_uint8(cell->body+4);
  2406. } else {
  2407. return get_uint8(cell->body+2);
  2408. }
  2409. }
  2410. /** Extract the circuit ID from a packed cell. */
  2411. circid_t
  2412. packed_cell_get_circid(const packed_cell_t *cell, int wide_circ_ids)
  2413. {
  2414. if (wide_circ_ids) {
  2415. return ntohl(get_uint32(cell->body));
  2416. } else {
  2417. return ntohs(get_uint16(cell->body));
  2418. }
  2419. }
  2420. /** Pull as many cells as possible (but no more than <b>max</b>) from the
  2421. * queue of the first active circuit on <b>chan</b>, and write them to
  2422. * <b>chan</b>-&gt;outbuf. Return the number of cells written. Advance
  2423. * the active circuit pointer to the next active circuit in the ring. */
  2424. MOCK_IMPL(int,
  2425. channel_flush_from_first_active_circuit, (channel_t *chan, int max))
  2426. {
  2427. circuitmux_t *cmux = NULL;
  2428. int n_flushed = 0;
  2429. cell_queue_t *queue;
  2430. destroy_cell_queue_t *destroy_queue=NULL;
  2431. circuit_t *circ;
  2432. or_circuit_t *or_circ;
  2433. int streams_blocked;
  2434. packed_cell_t *cell;
  2435. /* Get the cmux */
  2436. tor_assert(chan);
  2437. tor_assert(chan->cmux);
  2438. cmux = chan->cmux;
  2439. /* Main loop: pick a circuit, send a cell, update the cmux */
  2440. while (n_flushed < max) {
  2441. circ = circuitmux_get_first_active_circuit(cmux, &destroy_queue);
  2442. if (destroy_queue) {
  2443. destroy_cell_t *dcell;
  2444. /* this code is duplicated from some of the logic below. Ugly! XXXX */
  2445. /* If we are given a destroy_queue here, then it is required to be
  2446. * nonempty... */
  2447. tor_assert(destroy_queue->n > 0);
  2448. dcell = destroy_cell_queue_pop(destroy_queue);
  2449. /* ...and pop() will always yield a cell from a nonempty queue. */
  2450. tor_assert(dcell);
  2451. /* frees dcell */
  2452. cell = destroy_cell_to_packed_cell(dcell, chan->wide_circ_ids);
  2453. /* frees cell */
  2454. channel_write_packed_cell(chan, cell);
  2455. /* Update the cmux destroy counter */
  2456. circuitmux_notify_xmit_destroy(cmux);
  2457. cell = NULL;
  2458. ++n_flushed;
  2459. continue;
  2460. }
  2461. /* If it returns NULL, no cells left to send */
  2462. if (!circ) break;
  2463. assert_cmux_ok_paranoid(chan);
  2464. if (circ->n_chan == chan) {
  2465. queue = &circ->n_chan_cells;
  2466. streams_blocked = circ->streams_blocked_on_n_chan;
  2467. } else {
  2468. or_circ = TO_OR_CIRCUIT(circ);
  2469. tor_assert(or_circ->p_chan == chan);
  2470. queue = &TO_OR_CIRCUIT(circ)->p_chan_cells;
  2471. streams_blocked = circ->streams_blocked_on_p_chan;
  2472. }
  2473. /* Circuitmux told us this was active, so it should have cells */
  2474. if (/*BUG(*/ queue->n == 0 /*)*/) {
  2475. log_warn(LD_BUG, "Found a supposedly active circuit with no cells "
  2476. "to send. Trying to recover.");
  2477. circuitmux_set_num_cells(cmux, circ, 0);
  2478. if (! circ->marked_for_close)
  2479. circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
  2480. continue;
  2481. }
  2482. tor_assert(queue->n > 0);
  2483. /*
  2484. * Get just one cell here; once we've sent it, that can change the circuit
  2485. * selection, so we have to loop around for another even if this circuit
  2486. * has more than one.
  2487. */
  2488. cell = cell_queue_pop(queue);
  2489. /* Calculate the exact time that this cell has spent in the queue. */
  2490. if (get_options()->CellStatistics ||
  2491. get_options()->TestingEnableCellStatsEvent) {
  2492. uint32_t msec_waiting;
  2493. uint32_t msec_now = (uint32_t)monotime_coarse_absolute_msec();
  2494. msec_waiting = msec_now - cell->inserted_time;
  2495. if (get_options()->CellStatistics && !CIRCUIT_IS_ORIGIN(circ)) {
  2496. or_circ = TO_OR_CIRCUIT(circ);
  2497. or_circ->total_cell_waiting_time += msec_waiting;
  2498. or_circ->processed_cells++;
  2499. }
  2500. if (get_options()->TestingEnableCellStatsEvent) {
  2501. uint8_t command = packed_cell_get_command(cell, chan->wide_circ_ids);
  2502. testing_cell_stats_entry_t *ent =
  2503. tor_malloc_zero(sizeof(testing_cell_stats_entry_t));
  2504. ent->command = command;
  2505. ent->waiting_time = msec_waiting / 10;
  2506. ent->removed = 1;
  2507. if (circ->n_chan == chan)
  2508. ent->exitward = 1;
  2509. if (!circ->testing_cell_stats)
  2510. circ->testing_cell_stats = smartlist_new();
  2511. smartlist_add(circ->testing_cell_stats, ent);
  2512. }
  2513. }
  2514. /* If we just flushed our queue and this circuit is used for a
  2515. * tunneled directory request, possibly advance its state. */
  2516. if (queue->n == 0 && chan->dirreq_id)
  2517. geoip_change_dirreq_state(chan->dirreq_id,
  2518. DIRREQ_TUNNELED,
  2519. DIRREQ_CIRC_QUEUE_FLUSHED);
  2520. /* Now send the cell */
  2521. channel_write_packed_cell(chan, cell);
  2522. cell = NULL;
  2523. /*
  2524. * Don't packed_cell_free_unchecked(cell) here because the channel will
  2525. * do so when it gets out of the channel queue (probably already did, in
  2526. * which case that was an immediate double-free bug).
  2527. */
  2528. /* Update the counter */
  2529. ++n_flushed;
  2530. /*
  2531. * Now update the cmux; tell it we've just sent a cell, and how many
  2532. * we have left.
  2533. */
  2534. circuitmux_notify_xmit_cells(cmux, circ, 1);
  2535. circuitmux_set_num_cells(cmux, circ, queue->n);
  2536. if (queue->n == 0)
  2537. log_debug(LD_GENERAL, "Made a circuit inactive.");
  2538. /* Is the cell queue low enough to unblock all the streams that are waiting
  2539. * to write to this circuit? */
  2540. if (streams_blocked && queue->n <= CELL_QUEUE_LOWWATER_SIZE)
  2541. set_streams_blocked_on_circ(circ, chan, 0, 0); /* unblock streams */
  2542. /* If n_flushed < max still, loop around and pick another circuit */
  2543. }
  2544. /* Okay, we're done sending now */
  2545. assert_cmux_ok_paranoid(chan);
  2546. return n_flushed;
  2547. }
  2548. #if 0
  2549. /** Indicate the current preferred cap for middle circuits; zero disables
  2550. * the cap. Right now it's just a constant, ORCIRC_MAX_MIDDLE_CELLS, but
  2551. * the logic in append_cell_to_circuit_queue() is written to be correct
  2552. * if we want to base it on a consensus param or something that might change
  2553. * in the future.
  2554. */
  2555. static int
  2556. get_max_middle_cells(void)
  2557. {
  2558. return ORCIRC_MAX_MIDDLE_CELLS;
  2559. }
  2560. #endif
  2561. /** Add <b>cell</b> to the queue of <b>circ</b> writing to <b>chan</b>
  2562. * transmitting in <b>direction</b>. */
  2563. void
  2564. append_cell_to_circuit_queue(circuit_t *circ, channel_t *chan,
  2565. cell_t *cell, cell_direction_t direction,
  2566. streamid_t fromstream)
  2567. {
  2568. or_circuit_t *orcirc = NULL;
  2569. cell_queue_t *queue;
  2570. int streams_blocked;
  2571. #if 0
  2572. uint32_t tgt_max_middle_cells, p_len, n_len, tmp, hard_max_middle_cells;
  2573. #endif
  2574. int exitward;
  2575. if (circ->marked_for_close)
  2576. return;
  2577. exitward = (direction == CELL_DIRECTION_OUT);
  2578. if (exitward) {
  2579. queue = &circ->n_chan_cells;
  2580. streams_blocked = circ->streams_blocked_on_n_chan;
  2581. } else {
  2582. orcirc = TO_OR_CIRCUIT(circ);
  2583. queue = &orcirc->p_chan_cells;
  2584. streams_blocked = circ->streams_blocked_on_p_chan;
  2585. }
  2586. /*
  2587. * Disabling this for now because of a possible guard discovery attack
  2588. */
  2589. #if 0
  2590. /* Are we a middle circuit about to exceed ORCIRC_MAX_MIDDLE_CELLS? */
  2591. if ((circ->n_chan != NULL) && CIRCUIT_IS_ORCIRC(circ)) {
  2592. orcirc = TO_OR_CIRCUIT(circ);
  2593. if (orcirc->p_chan) {
  2594. /* We are a middle circuit if we have both n_chan and p_chan */
  2595. /* We'll need to know the current preferred maximum */
  2596. tgt_max_middle_cells = get_max_middle_cells();
  2597. if (tgt_max_middle_cells > 0) {
  2598. /* Do we need to initialize middle_max_cells? */
  2599. if (orcirc->max_middle_cells == 0) {
  2600. orcirc->max_middle_cells = tgt_max_middle_cells;
  2601. } else {
  2602. if (tgt_max_middle_cells > orcirc->max_middle_cells) {
  2603. /* If we want to increase the cap, we can do so right away */
  2604. orcirc->max_middle_cells = tgt_max_middle_cells;
  2605. } else if (tgt_max_middle_cells < orcirc->max_middle_cells) {
  2606. /*
  2607. * If we're shrinking the cap, we can't shrink past either queue;
  2608. * compare tgt_max_middle_cells rather than tgt_max_middle_cells *
  2609. * ORCIRC_MAX_MIDDLE_KILL_THRESH so the queues don't shrink enough
  2610. * to generate spurious warnings, either.
  2611. */
  2612. n_len = circ->n_chan_cells.n;
  2613. p_len = orcirc->p_chan_cells.n;
  2614. tmp = tgt_max_middle_cells;
  2615. if (tmp < n_len) tmp = n_len;
  2616. if (tmp < p_len) tmp = p_len;
  2617. orcirc->max_middle_cells = tmp;
  2618. }
  2619. /* else no change */
  2620. }
  2621. } else {
  2622. /* tgt_max_middle_cells == 0 indicates we should disable the cap */
  2623. orcirc->max_middle_cells = 0;
  2624. }
  2625. /* Now we know orcirc->max_middle_cells is set correctly */
  2626. if (orcirc->max_middle_cells > 0) {
  2627. hard_max_middle_cells =
  2628. (uint32_t)(((double)orcirc->max_middle_cells) *
  2629. ORCIRC_MAX_MIDDLE_KILL_THRESH);
  2630. if ((unsigned)queue->n + 1 >= hard_max_middle_cells) {
  2631. /* Queueing this cell would put queue over the kill theshold */
  2632. log_warn(LD_CIRC,
  2633. "Got a cell exceeding the hard cap of %u in the "
  2634. "%s direction on middle circ ID %u on chan ID "
  2635. U64_FORMAT "; killing the circuit.",
  2636. hard_max_middle_cells,
  2637. (direction == CELL_DIRECTION_OUT) ? "n" : "p",
  2638. (direction == CELL_DIRECTION_OUT) ?
  2639. circ->n_circ_id : orcirc->p_circ_id,
  2640. U64_PRINTF_ARG(
  2641. (direction == CELL_DIRECTION_OUT) ?
  2642. circ->n_chan->global_identifier :
  2643. orcirc->p_chan->global_identifier));
  2644. circuit_mark_for_close(circ, END_CIRC_REASON_RESOURCELIMIT);
  2645. return;
  2646. } else if ((unsigned)queue->n + 1 == orcirc->max_middle_cells) {
  2647. /* Only use ==, not >= for this test so we don't spam the log */
  2648. log_warn(LD_CIRC,
  2649. "While trying to queue a cell, reached the soft cap of %u "
  2650. "in the %s direction on middle circ ID %u "
  2651. "on chan ID " U64_FORMAT ".",
  2652. orcirc->max_middle_cells,
  2653. (direction == CELL_DIRECTION_OUT) ? "n" : "p",
  2654. (direction == CELL_DIRECTION_OUT) ?
  2655. circ->n_circ_id : orcirc->p_circ_id,
  2656. U64_PRINTF_ARG(
  2657. (direction == CELL_DIRECTION_OUT) ?
  2658. circ->n_chan->global_identifier :
  2659. orcirc->p_chan->global_identifier));
  2660. }
  2661. }
  2662. }
  2663. }
  2664. #endif
  2665. cell_queue_append_packed_copy(circ, queue, exitward, cell,
  2666. chan->wide_circ_ids, 1);
  2667. if (PREDICT_UNLIKELY(cell_queues_check_size())) {
  2668. /* We ran the OOM handler */
  2669. if (circ->marked_for_close)
  2670. return;
  2671. }
  2672. /* If we have too many cells on the circuit, we should stop reading from
  2673. * the edge streams for a while. */
  2674. if (!streams_blocked && queue->n >= CELL_QUEUE_HIGHWATER_SIZE)
  2675. set_streams_blocked_on_circ(circ, chan, 1, 0); /* block streams */
  2676. if (streams_blocked && fromstream) {
  2677. /* This edge connection is apparently not blocked; block it. */
  2678. set_streams_blocked_on_circ(circ, chan, 1, fromstream);
  2679. }
  2680. update_circuit_on_cmux(circ, direction);
  2681. if (queue->n == 1) {
  2682. /* This was the first cell added to the queue. We just made this
  2683. * circuit active. */
  2684. log_debug(LD_GENERAL, "Made a circuit active.");
  2685. }
  2686. /* New way: mark this as having waiting cells for the scheduler */
  2687. scheduler_channel_has_waiting_cells(chan);
  2688. }
  2689. /** Append an encoded value of <b>addr</b> to <b>payload_out</b>, which must
  2690. * have at least 18 bytes of free space. The encoding is, as specified in
  2691. * tor-spec.txt:
  2692. * RESOLVED_TYPE_IPV4 or RESOLVED_TYPE_IPV6 [1 byte]
  2693. * LENGTH [1 byte]
  2694. * ADDRESS [length bytes]
  2695. * Return the number of bytes added, or -1 on error */
  2696. int
  2697. append_address_to_payload(uint8_t *payload_out, const tor_addr_t *addr)
  2698. {
  2699. uint32_t a;
  2700. switch (tor_addr_family(addr)) {
  2701. case AF_INET:
  2702. payload_out[0] = RESOLVED_TYPE_IPV4;
  2703. payload_out[1] = 4;
  2704. a = tor_addr_to_ipv4n(addr);
  2705. memcpy(payload_out+2, &a, 4);
  2706. return 6;
  2707. case AF_INET6:
  2708. payload_out[0] = RESOLVED_TYPE_IPV6;
  2709. payload_out[1] = 16;
  2710. memcpy(payload_out+2, tor_addr_to_in6_addr8(addr), 16);
  2711. return 18;
  2712. case AF_UNSPEC:
  2713. default:
  2714. return -1;
  2715. }
  2716. }
  2717. /** Given <b>payload_len</b> bytes at <b>payload</b>, starting with an address
  2718. * encoded as by append_address_to_payload(), try to decode the address into
  2719. * *<b>addr_out</b>. Return the next byte in the payload after the address on
  2720. * success, or NULL on failure. */
  2721. const uint8_t *
  2722. decode_address_from_payload(tor_addr_t *addr_out, const uint8_t *payload,
  2723. int payload_len)
  2724. {
  2725. if (payload_len < 2)
  2726. return NULL;
  2727. if (payload_len < 2+payload[1])
  2728. return NULL;
  2729. switch (payload[0]) {
  2730. case RESOLVED_TYPE_IPV4:
  2731. if (payload[1] != 4)
  2732. return NULL;
  2733. tor_addr_from_ipv4n(addr_out, get_uint32(payload+2));
  2734. break;
  2735. case RESOLVED_TYPE_IPV6:
  2736. if (payload[1] != 16)
  2737. return NULL;
  2738. tor_addr_from_ipv6_bytes(addr_out, (char*)(payload+2));
  2739. break;
  2740. default:
  2741. tor_addr_make_unspec(addr_out);
  2742. break;
  2743. }
  2744. return payload + 2 + payload[1];
  2745. }
  2746. /** Remove all the cells queued on <b>circ</b> for <b>chan</b>. */
  2747. void
  2748. circuit_clear_cell_queue(circuit_t *circ, channel_t *chan)
  2749. {
  2750. cell_queue_t *queue;
  2751. cell_direction_t direction;
  2752. if (circ->n_chan == chan) {
  2753. queue = &circ->n_chan_cells;
  2754. direction = CELL_DIRECTION_OUT;
  2755. } else {
  2756. or_circuit_t *orcirc = TO_OR_CIRCUIT(circ);
  2757. tor_assert(orcirc->p_chan == chan);
  2758. queue = &orcirc->p_chan_cells;
  2759. direction = CELL_DIRECTION_IN;
  2760. }
  2761. /* Clear the queue */
  2762. cell_queue_clear(queue);
  2763. /* Update the cell counter in the cmux */
  2764. if (chan->cmux && circuitmux_is_circuit_attached(chan->cmux, circ))
  2765. update_circuit_on_cmux(circ, direction);
  2766. }
  2767. /** Fail with an assert if the circuit mux on chan is corrupt
  2768. */
  2769. void
  2770. assert_circuit_mux_okay(channel_t *chan)
  2771. {
  2772. tor_assert(chan);
  2773. tor_assert(chan->cmux);
  2774. circuitmux_assert_okay(chan->cmux);
  2775. }
  2776. /** Return 1 if we shouldn't restart reading on this circuit, even if
  2777. * we get a SENDME. Else return 0.
  2778. */
  2779. static int
  2780. circuit_queue_streams_are_blocked(circuit_t *circ)
  2781. {
  2782. if (CIRCUIT_IS_ORIGIN(circ)) {
  2783. return circ->streams_blocked_on_n_chan;
  2784. } else {
  2785. return circ->streams_blocked_on_p_chan;
  2786. }
  2787. }