channelpadding.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /* Copyright (c) 2001 Matej Pfajfar.
  2. * Copyright (c) 2001-2004, Roger Dingledine.
  3. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  4. * Copyright (c) 2007-2015, The Tor Project, Inc. */
  5. /* See LICENSE for licensing information */
  6. /* TOR_CHANNEL_INTERNAL_ define needed for an O(1) implementation of
  7. * channelpadding_channel_to_channelinfo() */
  8. #define TOR_CHANNEL_INTERNAL_
  9. #include "or.h"
  10. #include "channel.h"
  11. #include "channelpadding.h"
  12. #include "channeltls.h"
  13. #include "config.h"
  14. #include "networkstatus.h"
  15. #include "connection.h"
  16. #include "connection_or.h"
  17. #include "main.h"
  18. #include "rephist.h"
  19. #include "router.h"
  20. #include "compat_time.h"
  21. #include <event2/event.h>
  22. #include "rendservice.h"
  23. STATIC int32_t channelpadding_get_netflow_inactive_timeout_ms(
  24. const channel_t *);
  25. STATIC int channelpadding_send_disable_command(channel_t *);
  26. STATIC int64_t channelpadding_compute_time_until_pad_for_netflow(channel_t *);
  27. /** The total number of pending channelpadding timers */
  28. static uint64_t total_timers_pending;
  29. /** These are cached consensus parameters for netflow */
  30. /** The timeout lower bound that is allowed before sending padding */
  31. static int consensus_nf_ito_low;
  32. /** The timeout upper bound that is allowed before sending padding */
  33. static int consensus_nf_ito_high;
  34. /** The timeout lower bound that is allowed before sending reduced padding */
  35. static int consensus_nf_ito_low_reduced;
  36. /** The timeout upper bound that is allowed before sending reduced padding */
  37. static int consensus_nf_ito_high_reduced;
  38. /** The connection timeout between relays */
  39. static int consensus_nf_conntimeout_relays;
  40. /** The connection timeout for client connections */
  41. static int consensus_nf_conntimeout_clients;
  42. /** Should we pad before circuits are actually used for client data? */
  43. static int consensus_nf_pad_before_usage;
  44. /** Should we pad relay-to-relay connections? */
  45. static int consensus_nf_pad_relays;
  46. /** Should we pad tor2web connections? */
  47. static int consensus_nf_pad_tor2web;
  48. /** Should we pad rosos connections? */
  49. static int consensus_nf_pad_single_onion;
  50. #define TOR_MSEC_PER_SEC 1000
  51. #define TOR_USEC_PER_MSEC 1000
  52. /**
  53. * How often do we get called by the connection housekeeping (ie: once
  54. * per second) */
  55. #define TOR_HOUSEKEEPING_CALLBACK_MSEC 1000
  56. /**
  57. * Additional extra time buffer on the housekeeping callback, since
  58. * it can be delayed. This extra slack is used to decide if we should
  59. * schedule a timer or wait for the next callback. */
  60. #define TOR_HOUSEKEEPING_CALLBACK_SLACK_MSEC 100
  61. /**
  62. * This macro tells us if either end of the channel is connected to a client.
  63. * (If we're not a server, we're definitely a client. If the channel thinks
  64. * its a client, use that. Then finally verify in the consensus).
  65. */
  66. #define CHANNEL_IS_CLIENT(chan, options) \
  67. (!public_server_mode((options)) || channel_is_client(chan) || \
  68. !connection_or_digest_is_known_relay((chan)->identity_digest))
  69. /**
  70. * This function is called to update cached consensus parameters every time
  71. * there is a consensus update. This allows us to move the consensus param
  72. * search off of the critical path, so it does not need to be evaluated
  73. * for every single connection, every second.
  74. */
  75. void
  76. channelpadding_new_consensus_params(networkstatus_t *ns)
  77. {
  78. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW 1500
  79. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH 9500
  80. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN 0
  81. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX 60000
  82. consensus_nf_ito_low = networkstatus_get_param(ns, "nf_ito_low",
  83. DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
  84. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
  85. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
  86. consensus_nf_ito_high = networkstatus_get_param(ns, "nf_ito_high",
  87. DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH,
  88. consensus_nf_ito_low,
  89. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
  90. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW 9000
  91. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH 14000
  92. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN 0
  93. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX 60000
  94. consensus_nf_ito_low_reduced =
  95. networkstatus_get_param(ns, "nf_ito_low_reduced",
  96. DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW,
  97. DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN,
  98. DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
  99. consensus_nf_ito_high_reduced =
  100. networkstatus_get_param(ns, "nf_ito_high_reduced",
  101. DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH,
  102. consensus_nf_ito_low_reduced,
  103. DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
  104. #define CONNTIMEOUT_RELAYS_DFLT (60*60) // 1 hour
  105. #define CONNTIMEOUT_RELAYS_MIN 60
  106. #define CONNTIMEOUT_RELAYS_MAX (7*24*60*60) // 1 week
  107. consensus_nf_conntimeout_relays =
  108. networkstatus_get_param(ns, "nf_conntimeout_relays",
  109. CONNTIMEOUT_RELAYS_DFLT,
  110. CONNTIMEOUT_RELAYS_MIN,
  111. CONNTIMEOUT_RELAYS_MAX);
  112. #define CIRCTIMEOUT_CLIENTS_DFLT (30*60) // 30 minutes
  113. #define CIRCTIMEOUT_CLIENTS_MIN 60
  114. #define CIRCTIMEOUT_CLIENTS_MAX (24*60*60) // 24 hours
  115. consensus_nf_conntimeout_clients =
  116. networkstatus_get_param(ns, "nf_conntimeout_clients",
  117. CIRCTIMEOUT_CLIENTS_DFLT,
  118. CIRCTIMEOUT_CLIENTS_MIN,
  119. CIRCTIMEOUT_CLIENTS_MAX);
  120. consensus_nf_pad_before_usage =
  121. networkstatus_get_param(ns, "nf_pad_before_usage", 1, 0, 1);
  122. consensus_nf_pad_relays =
  123. networkstatus_get_param(ns, "nf_pad_relays", 0, 0, 1);
  124. consensus_nf_pad_tor2web =
  125. networkstatus_get_param(ns,
  126. CHANNELPADDING_TOR2WEB_PARAM,
  127. CHANNELPADDING_TOR2WEB_DEFAULT, 0, 1);
  128. consensus_nf_pad_single_onion =
  129. networkstatus_get_param(ns,
  130. CHANNELPADDING_SOS_PARAM,
  131. CHANNELPADDING_SOS_DEFAULT, 0, 1);
  132. }
  133. /**
  134. * Get a random netflow inactive timeout keepalive period in milliseconds,
  135. * the range for which is determined by consensus parameters, negotiation,
  136. * configuration, or default values. The consensus parameters enforce the
  137. * minimum possible value, to avoid excessively frequent padding.
  138. *
  139. * The ranges for this value were chosen to be low enough to ensure that
  140. * routers do not emit a new netflow record for a connection due to it
  141. * being idle.
  142. *
  143. * Specific timeout values for major routers are listed in Proposal 251.
  144. * No major router appeared capable of setting an inactive timeout below 10
  145. * seconds, so we set the defaults below that value, since we can always
  146. * scale back if it ends up being too much padding.
  147. *
  148. * Returns the next timeout period (in milliseconds) after which we should
  149. * send a padding packet, or 0 if padding is disabled.
  150. */
  151. STATIC int32_t
  152. channelpadding_get_netflow_inactive_timeout_ms(const channel_t *chan)
  153. {
  154. int low_timeout = consensus_nf_ito_low;
  155. int high_timeout = consensus_nf_ito_high;
  156. int X1, X2;
  157. if (low_timeout == 0 && low_timeout == high_timeout)
  158. return 0; // No padding
  159. /* If we have negotiated different timeout values, use those, but
  160. * don't allow them to be lower than the consensus ones */
  161. if (chan->padding_timeout_low_ms && chan->padding_timeout_high_ms) {
  162. low_timeout = MAX(low_timeout, chan->padding_timeout_low_ms);
  163. high_timeout = MAX(high_timeout, chan->padding_timeout_high_ms);
  164. }
  165. if (low_timeout == high_timeout)
  166. return low_timeout; // No randomization
  167. /*
  168. * This MAX() hack is here because we apply the timeout on both the client
  169. * and the server. This creates the situation where the total time before
  170. * sending a packet in either direction is actually
  171. * min(client_timeout,server_timeout).
  172. *
  173. * If X is a random variable uniform from 0..R-1 (where R=high-low),
  174. * then Y=max(X,X) has Prob(Y == i) = (2.0*i + 1)/(R*R).
  175. *
  176. * If we create a third random variable Z=min(Y,Y), then it turns out that
  177. * Exp[Z] ~= Exp[X]. Here's a table:
  178. *
  179. * R Exp[X] Exp[Z] Exp[min(X,X)] Exp[max(X,X)]
  180. * 2000 999.5 1066 666.2 1332.8
  181. * 3000 1499.5 1599.5 999.5 1999.5
  182. * 5000 2499.5 2666 1666.2 3332.8
  183. * 6000 2999.5 3199.5 1999.5 3999.5
  184. * 7000 3499.5 3732.8 2332.8 4666.2
  185. * 8000 3999.5 4266.2 2666.2 5332.8
  186. * 10000 4999.5 5328 3332.8 6666.2
  187. * 15000 7499.5 7995 4999.5 9999.5
  188. * 20000 9900.5 10661 6666.2 13332.8
  189. *
  190. * In other words, this hack makes it so that when both the client and
  191. * the guard are sending this padding, then the averages work out closer
  192. * to the midpoint of the range, making the overhead easier to tune.
  193. * If only one endpoint is padding (for example: if the relay does not
  194. * support padding, but the client has set ConnectionPadding 1; or
  195. * if the relay does support padding, but the client has set
  196. * ReducedConnectionPadding 1), then the defense will still prevent
  197. * record splitting, but with less overhead than the midpoint
  198. * (as seen by the Exp[max(X,X)] column).
  199. *
  200. * To calculate average padding packet frequency (and thus overhead),
  201. * index into the table by picking a row based on R = high-low. Then,
  202. * use the appropriate column (Exp[Z] for two-sided padding, and
  203. * Exp[max(X,X)] for one-sided padding). Finally, take this value
  204. * and add it to the low timeout value. This value is the average
  205. * frequency which padding packets will be sent.
  206. */
  207. X1 = crypto_rand_int(high_timeout - low_timeout);
  208. X2 = crypto_rand_int(high_timeout - low_timeout);
  209. return low_timeout + MAX(X1, X2);
  210. }
  211. /**
  212. * Update this channel's padding settings based on the PADDING_NEGOTIATE
  213. * contents.
  214. *
  215. * Returns -1 on error; 1 on success.
  216. */
  217. int
  218. channelpadding_update_padding_for_channel(channel_t *chan,
  219. const channelpadding_negotiate_t *pad_vars)
  220. {
  221. if (pad_vars->version != 0) {
  222. static ratelim_t version_limit = RATELIM_INIT(600);
  223. log_fn_ratelim(&version_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
  224. "Got a PADDING_NEGOTIATE cell with an unknown version. Ignoring.");
  225. return -1;
  226. }
  227. // We should not allow malicious relays to disable or reduce padding for
  228. // us as clients. In fact, we should only accept this cell at all if we're
  229. // operating as a relay. Bridges should not accept it from relays, either
  230. // (only from their clients).
  231. if ((get_options()->BridgeRelay &&
  232. connection_or_digest_is_known_relay(chan->identity_digest)) ||
  233. !get_options()->ORPort_set) {
  234. static ratelim_t relay_limit = RATELIM_INIT(600);
  235. log_fn_ratelim(&relay_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
  236. "Got a PADDING_NEGOTIATE from relay at %s (%s). "
  237. "This should not happen.",
  238. chan->get_remote_descr(chan, 0),
  239. hex_str(chan->identity_digest, DIGEST_LEN));
  240. return -1;
  241. }
  242. chan->padding_enabled = (pad_vars->command == CHANNELPADDING_COMMAND_START);
  243. /* Min must not be lower than the current consensus parameter
  244. nf_ito_low. */
  245. chan->padding_timeout_low_ms = MAX(consensus_nf_ito_low,
  246. pad_vars->ito_low_ms);
  247. /* Max must not be lower than ito_low_ms */
  248. chan->padding_timeout_high_ms = MAX(chan->padding_timeout_low_ms,
  249. pad_vars->ito_high_ms);
  250. log_fn(LOG_INFO,LD_OR,
  251. "Negotiated padding=%d, lo=%d, hi=%d on "U64_FORMAT,
  252. chan->padding_enabled, chan->padding_timeout_low_ms,
  253. chan->padding_timeout_high_ms,
  254. U64_PRINTF_ARG(chan->global_identifier));
  255. return 1;
  256. }
  257. /**
  258. * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side not
  259. * to send padding.
  260. *
  261. * Returns -1 on error, 0 on success.
  262. */
  263. STATIC int
  264. channelpadding_send_disable_command(channel_t *chan)
  265. {
  266. channelpadding_negotiate_t disable;
  267. cell_t cell;
  268. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
  269. MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
  270. memset(&cell, 0, sizeof(cell_t));
  271. memset(&disable, 0, sizeof(channelpadding_negotiate_t));
  272. cell.command = CELL_PADDING_NEGOTIATE;
  273. channelpadding_negotiate_set_command(&disable, CHANNELPADDING_COMMAND_STOP);
  274. if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
  275. &disable) < 0)
  276. return -1;
  277. if (chan->write_cell(chan, &cell) == 1)
  278. return 0;
  279. else
  280. return -1;
  281. }
  282. /**
  283. * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side to
  284. * resume sending padding at some rate.
  285. *
  286. * Returns -1 on error, 0 on success.
  287. */
  288. int
  289. channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
  290. uint16_t high_timeout)
  291. {
  292. channelpadding_negotiate_t enable;
  293. cell_t cell;
  294. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
  295. MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
  296. memset(&cell, 0, sizeof(cell_t));
  297. memset(&enable, 0, sizeof(channelpadding_negotiate_t));
  298. cell.command = CELL_PADDING_NEGOTIATE;
  299. channelpadding_negotiate_set_command(&enable, CHANNELPADDING_COMMAND_START);
  300. channelpadding_negotiate_set_ito_low_ms(&enable, low_timeout);
  301. channelpadding_negotiate_set_ito_high_ms(&enable, high_timeout);
  302. if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
  303. &enable) < 0)
  304. return -1;
  305. if (chan->write_cell(chan, &cell) == 1)
  306. return 0;
  307. else
  308. return -1;
  309. }
  310. /**
  311. * Sends a CELL_PADDING cell on a channel if it has been idle since
  312. * our callback was scheduled.
  313. *
  314. * This function also clears the pending padding timer and the callback
  315. * flags.
  316. */
  317. static void
  318. channelpadding_send_padding_cell_for_callback(channel_t *chan)
  319. {
  320. cell_t cell;
  321. /* Check that the channel is still valid and open */
  322. if (!chan || chan->state != CHANNEL_STATE_OPEN) {
  323. if (chan) chan->pending_padding_callback = 0;
  324. log_fn(LOG_INFO,LD_OR,
  325. "Scheduled a netflow padding cell, but connection already closed.");
  326. return;
  327. }
  328. /* We should have a pending callback flag set. */
  329. if (BUG(chan->pending_padding_callback == 0))
  330. return;
  331. chan->pending_padding_callback = 0;
  332. if (monotime_coarse_is_zero(&chan->next_padding_time) ||
  333. chan->has_queued_writes(chan)) {
  334. /* We must have been active before the timer fired */
  335. monotime_coarse_zero(&chan->next_padding_time);
  336. return;
  337. }
  338. {
  339. monotime_coarse_t now;
  340. monotime_coarse_get(&now);
  341. log_fn(LOG_INFO,LD_OR,
  342. "Sending netflow keepalive on "U64_FORMAT" to %s (%s) after "
  343. I64_FORMAT" ms. Delta "I64_FORMAT"ms",
  344. U64_PRINTF_ARG(chan->global_identifier),
  345. safe_str_client(chan->get_remote_descr(chan, 0)),
  346. safe_str_client(hex_str(chan->identity_digest, DIGEST_LEN)),
  347. I64_PRINTF_ARG(monotime_coarse_diff_msec(&chan->timestamp_xfer,&now)),
  348. I64_PRINTF_ARG(
  349. monotime_coarse_diff_msec(&chan->next_padding_time,&now)));
  350. }
  351. /* Clear the timer */
  352. monotime_coarse_zero(&chan->next_padding_time);
  353. /* Send the padding cell. This will cause the channel to get a
  354. * fresh timestamp_active */
  355. memset(&cell, 0, sizeof(cell));
  356. cell.command = CELL_PADDING;
  357. chan->write_cell(chan, &cell);
  358. }
  359. /**
  360. * tor_timer callback function for us to send padding on an idle channel.
  361. *
  362. * This function just obtains the channel from the callback handle, ensures
  363. * it is still valid, and then hands it off to
  364. * channelpadding_send_padding_cell_for_callback(), which checks if
  365. * the channel is still idle before sending padding.
  366. */
  367. static void
  368. channelpadding_send_padding_callback(tor_timer_t *timer, void *args,
  369. const struct monotime_t *when)
  370. {
  371. channel_t *chan = channel_handle_get((struct channel_handle_t*)args);
  372. (void)timer; (void)when;
  373. if (chan && CHANNEL_CAN_HANDLE_CELLS(chan)) {
  374. /* Hrmm.. It might be nice to have an equivalent to assert_connection_ok
  375. * for channels. Then we could get rid of the channeltls dependency */
  376. tor_assert(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn)->magic ==
  377. OR_CONNECTION_MAGIC);
  378. assert_connection_ok(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn), approx_time());
  379. channelpadding_send_padding_cell_for_callback(chan);
  380. } else {
  381. log_fn(LOG_INFO,LD_OR,
  382. "Channel closed while waiting for timer.");
  383. }
  384. total_timers_pending--;
  385. }
  386. /**
  387. * Schedules a callback to send padding on a channel in_ms milliseconds from
  388. * now.
  389. *
  390. * Returns CHANNELPADDING_WONTPAD on error, CHANNELPADDING_PADDING_SENT if we
  391. * sent the packet immediately without a timer, and
  392. * CHANNELPADDING_PADDING_SCHEDULED if we decided to schedule a timer.
  393. */
  394. static channelpadding_decision_t
  395. channelpadding_schedule_padding(channel_t *chan, int in_ms)
  396. {
  397. struct timeval timeout;
  398. tor_assert(!chan->pending_padding_callback);
  399. if (in_ms <= 0) {
  400. chan->pending_padding_callback = 1;
  401. channelpadding_send_padding_cell_for_callback(chan);
  402. return CHANNELPADDING_PADDING_SENT;
  403. }
  404. timeout.tv_sec = in_ms/TOR_MSEC_PER_SEC;
  405. timeout.tv_usec = (in_ms%TOR_USEC_PER_MSEC)*TOR_USEC_PER_MSEC;
  406. if (!chan->timer_handle) {
  407. chan->timer_handle = channel_handle_new(chan);
  408. }
  409. if (chan->padding_timer) {
  410. timer_set_cb(chan->padding_timer,
  411. channelpadding_send_padding_callback,
  412. chan->timer_handle);
  413. } else {
  414. chan->padding_timer = timer_new(channelpadding_send_padding_callback,
  415. chan->timer_handle);
  416. }
  417. timer_schedule(chan->padding_timer, &timeout);
  418. rep_hist_padding_count_timers(++total_timers_pending);
  419. chan->pending_padding_callback = 1;
  420. return CHANNELPADDING_PADDING_SCHEDULED;
  421. }
  422. /**
  423. * Calculates the number of milliseconds from now to schedule a padding cell.
  424. *
  425. * Returns the number of milliseconds from now (relative) to schedule the
  426. * padding callback. If the padding timer is more than 1.1 seconds in the
  427. * future, we return -1, to avoid scheduling excessive callbacks. If padding
  428. * is disabled in the consensus, we return -2.
  429. *
  430. * Side-effects: Updates chan->next_padding_time_ms, storing an (absolute, not
  431. * relative) millisecond representation of when we should send padding, unless
  432. * other activity happens first. This side-effect allows us to avoid
  433. * scheduling a libevent callback until we're within 1.1 seconds of the padding
  434. * time.
  435. */
  436. #define CHANNELPADDING_TIME_LATER -1
  437. #define CHANNELPADDING_TIME_DISABLED -2
  438. STATIC int64_t
  439. channelpadding_compute_time_until_pad_for_netflow(channel_t *chan)
  440. {
  441. monotime_coarse_t now;
  442. monotime_coarse_get(&now);
  443. if (monotime_coarse_is_zero(&chan->next_padding_time)) {
  444. /* If the below line or crypto_rand_int() shows up on a profile,
  445. * we can avoid getting a timeout until we're at least nf_ito_lo
  446. * from a timeout window. That will prevent us from setting timers
  447. * on connections that were active up to 1.5 seconds ago.
  448. * Idle connections should only call this once every 5.5s on average
  449. * though, so that might be a micro-optimization for little gain. */
  450. int32_t padding_timeout =
  451. channelpadding_get_netflow_inactive_timeout_ms(chan);
  452. if (!padding_timeout)
  453. return CHANNELPADDING_TIME_DISABLED;
  454. monotime_coarse_add_msec(&chan->next_padding_time,
  455. &chan->timestamp_xfer,
  456. padding_timeout);
  457. }
  458. const int64_t ms_till_pad =
  459. monotime_coarse_diff_msec(&now, &chan->next_padding_time);
  460. /* If the next padding time is beyond the maximum possible consensus value,
  461. * then this indicates a clock jump, so just send padding now. This is
  462. * better than using monotonic time because we want to avoid the situation
  463. * where we wait around forever for monotonic time to move forward after
  464. * a clock jump far into the past.
  465. */
  466. if (ms_till_pad > DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX) {
  467. tor_fragile_assert();
  468. log_warn(LD_BUG,
  469. "Channel padding timeout scheduled "I64_FORMAT"ms in the future. "
  470. "Did the monotonic clock just jump?",
  471. I64_PRINTF_ARG(ms_till_pad));
  472. return 0; /* Clock jumped: Send padding now */
  473. }
  474. /* If the timeout will expire before the next time we're called (1000ms
  475. from now, plus some slack), then calculate the number of milliseconds
  476. from now which we should send padding, so we can schedule a callback
  477. then.
  478. */
  479. if (ms_till_pad < (TOR_HOUSEKEEPING_CALLBACK_MSEC +
  480. TOR_HOUSEKEEPING_CALLBACK_SLACK_MSEC)) {
  481. /* If the padding time is in the past, that means that libevent delayed
  482. * calling the once-per-second callback due to other work taking too long.
  483. * See https://bugs.torproject.org/22212 and
  484. * https://bugs.torproject.org/16585. This is a systemic problem
  485. * with being single-threaded, but let's emit a notice if this
  486. * is long enough in the past that we might have missed a netflow window,
  487. * and allowed a router to emit a netflow frame, just so we don't forget
  488. * about it entirely.. */
  489. #define NETFLOW_MISSED_WINDOW (150000 - DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH)
  490. if (ms_till_pad < 0) {
  491. int severity = (ms_till_pad < -NETFLOW_MISSED_WINDOW)
  492. ? LOG_NOTICE : LOG_INFO;
  493. log_fn(severity, LD_OR,
  494. "Channel padding timeout scheduled "I64_FORMAT"ms in the past. ",
  495. I64_PRINTF_ARG(-ms_till_pad));
  496. return 0; /* Clock jumped: Send padding now */
  497. }
  498. return ms_till_pad;
  499. }
  500. return CHANNELPADDING_TIME_LATER;
  501. }
  502. /**
  503. * Returns a randomized value for channel idle timeout in seconds.
  504. * The channel idle timeout governs how quickly we close a channel
  505. * after its last circuit has disappeared.
  506. *
  507. * There are three classes of channels:
  508. * 1. Client+non-canonical. These live for 3-4.5 minutes
  509. * 2. relay to relay. These live for 45-75 min by default
  510. * 3. Reduced padding clients. These live for 1.5-2.25 minutes.
  511. *
  512. * Also allows the default relay-to-relay value to be controlled by the
  513. * consensus.
  514. */
  515. unsigned int
  516. channelpadding_get_channel_idle_timeout(const channel_t *chan,
  517. int is_canonical)
  518. {
  519. const or_options_t *options = get_options();
  520. unsigned int timeout;
  521. /* Non-canonical and client channels only last for 3-4.5 min when idle */
  522. if (!is_canonical || CHANNEL_IS_CLIENT(chan, options)) {
  523. #define CONNTIMEOUT_CLIENTS_BASE 180 // 3 to 4.5 min
  524. timeout = CONNTIMEOUT_CLIENTS_BASE
  525. + crypto_rand_int(CONNTIMEOUT_CLIENTS_BASE/2);
  526. } else { // Canonical relay-to-relay channels
  527. // 45..75min or consensus +/- 25%
  528. timeout = consensus_nf_conntimeout_relays;
  529. timeout = 3*timeout/4 + crypto_rand_int(timeout/2);
  530. }
  531. /* If ReducedConnectionPadding is set, we want to halve the duration of
  532. * the channel idle timeout, since reducing the additional time that
  533. * a channel stays open will reduce the total overhead for making
  534. * new channels. This reduction in overhead/channel expense
  535. * is important for mobile users. The option cannot be set by relays.
  536. *
  537. * We also don't reduce any values for timeout that the user explicitly
  538. * set.
  539. */
  540. if (options->ReducedConnectionPadding
  541. && !options->CircuitsAvailableTimeout) {
  542. timeout /= 2;
  543. }
  544. return timeout;
  545. }
  546. /**
  547. * This function controls how long we keep idle circuits open,
  548. * and how long we build predicted circuits. This behavior is under
  549. * the control of channelpadding because circuit availability is the
  550. * dominant factor in channel lifespan, which influences total padding
  551. * overhead.
  552. *
  553. * Returns a randomized number of seconds in a range from
  554. * CircuitsAvailableTimeout to 2*CircuitsAvailableTimeout. This value is halved
  555. * if ReducedConnectionPadding is set. The default value of
  556. * CircuitsAvailableTimeout can be controlled by the consensus.
  557. */
  558. int
  559. channelpadding_get_circuits_available_timeout(void)
  560. {
  561. const or_options_t *options = get_options();
  562. int timeout = options->CircuitsAvailableTimeout;
  563. if (!timeout) {
  564. timeout = consensus_nf_conntimeout_clients;
  565. /* If ReducedConnectionPadding is set, we want to halve the duration of
  566. * the channel idle timeout, since reducing the additional time that
  567. * a channel stays open will reduce the total overhead for making
  568. * new connections. This reduction in overhead/connection expense
  569. * is important for mobile users. The option cannot be set by relays.
  570. *
  571. * We also don't reduce any values for timeout that the user explicitly
  572. * set.
  573. */
  574. if (options->ReducedConnectionPadding) {
  575. // half the value to 15..30min by default
  576. timeout /= 2;
  577. }
  578. }
  579. // 30..60min by default
  580. timeout = timeout + crypto_rand_int(timeout);
  581. return timeout;
  582. }
  583. /**
  584. * Calling this function on a channel causes it to tell the other side
  585. * not to send padding, and disables sending padding from this side as well.
  586. */
  587. void
  588. channelpadding_disable_padding_on_channel(channel_t *chan)
  589. {
  590. chan->padding_enabled = 0;
  591. // Send cell to disable padding on the other end
  592. channelpadding_send_disable_command(chan);
  593. }
  594. /**
  595. * Calling this function on a channel causes it to tell the other side
  596. * not to send padding, and reduces the rate that padding is sent from
  597. * this side.
  598. */
  599. void
  600. channelpadding_reduce_padding_on_channel(channel_t *chan)
  601. {
  602. /* Padding can be forced and reduced by clients, regardless of if
  603. * the channel supports it. So we check for support here before
  604. * sending any commands. */
  605. if (chan->padding_enabled) {
  606. channelpadding_send_disable_command(chan);
  607. }
  608. chan->padding_timeout_low_ms = consensus_nf_ito_low_reduced;
  609. chan->padding_timeout_high_ms = consensus_nf_ito_high_reduced;
  610. log_fn(LOG_INFO,LD_OR,
  611. "Reduced padding on channel "U64_FORMAT": lo=%d, hi=%d",
  612. U64_PRINTF_ARG(chan->global_identifier),
  613. chan->padding_timeout_low_ms, chan->padding_timeout_high_ms);
  614. }
  615. /**
  616. * This function is called once per second by run_connection_housekeeping(),
  617. * but only if the channel is still open, valid, and non-wedged.
  618. *
  619. * It decides if and when we should send a padding cell, and if needed,
  620. * schedules a callback to send that cell at the appropriate time.
  621. *
  622. * Returns an enum that represents the current padding decision state.
  623. * Return value is currently used only by unit tests.
  624. */
  625. channelpadding_decision_t
  626. channelpadding_decide_to_pad_channel(channel_t *chan)
  627. {
  628. const or_options_t *options = get_options();
  629. /* Only pad open channels */
  630. if (chan->state != CHANNEL_STATE_OPEN)
  631. return CHANNELPADDING_WONTPAD;
  632. if (chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS) {
  633. if (!consensus_nf_pad_before_usage)
  634. return CHANNELPADDING_WONTPAD;
  635. } else if (chan->channel_usage != CHANNEL_USED_FOR_USER_TRAFFIC) {
  636. return CHANNELPADDING_WONTPAD;
  637. }
  638. if (chan->pending_padding_callback)
  639. return CHANNELPADDING_PADDING_ALREADY_SCHEDULED;
  640. /* Don't pad the channel if we didn't negotiate it, but still
  641. * allow clients to force padding if options->ChannelPadding is
  642. * explicitly set to 1.
  643. */
  644. if (!chan->padding_enabled && options->ConnectionPadding != 1) {
  645. return CHANNELPADDING_WONTPAD;
  646. }
  647. if (options->Tor2webMode && !consensus_nf_pad_tor2web) {
  648. /* If the consensus just changed values, this channel may still
  649. * think padding is enabled. Negotiate it off. */
  650. if (chan->padding_enabled)
  651. channelpadding_disable_padding_on_channel(chan);
  652. return CHANNELPADDING_WONTPAD;
  653. }
  654. if (rend_service_allow_non_anonymous_connection(options) &&
  655. !consensus_nf_pad_single_onion) {
  656. /* If the consensus just changed values, this channel may still
  657. * think padding is enabled. Negotiate it off. */
  658. if (chan->padding_enabled)
  659. channelpadding_disable_padding_on_channel(chan);
  660. return CHANNELPADDING_WONTPAD;
  661. }
  662. if (!chan->has_queued_writes(chan)) {
  663. int is_client_channel = 0;
  664. if (CHANNEL_IS_CLIENT(chan, options)) {
  665. is_client_channel = 1;
  666. }
  667. /* If nf_pad_relays=1 is set in the consensus, we pad
  668. * on *all* idle connections, relay-relay or relay-client.
  669. * Otherwise pad only for client+bridge cons */
  670. if (is_client_channel || consensus_nf_pad_relays) {
  671. int64_t pad_time_ms =
  672. channelpadding_compute_time_until_pad_for_netflow(chan);
  673. if (pad_time_ms == CHANNELPADDING_TIME_DISABLED) {
  674. return CHANNELPADDING_WONTPAD;
  675. } else if (pad_time_ms == CHANNELPADDING_TIME_LATER) {
  676. chan->currently_padding = 1;
  677. return CHANNELPADDING_PADLATER;
  678. } else {
  679. if (BUG(pad_time_ms > INT_MAX)) {
  680. pad_time_ms = INT_MAX;
  681. }
  682. /* We have to schedule a callback because we're called exactly once per
  683. * second, but we don't want padding packets to go out exactly on an
  684. * integer multiple of seconds. This callback will only be scheduled
  685. * if we're within 1.1 seconds of the padding time.
  686. */
  687. chan->currently_padding = 1;
  688. return channelpadding_schedule_padding(chan, (int)pad_time_ms);
  689. }
  690. } else {
  691. chan->currently_padding = 0;
  692. return CHANNELPADDING_WONTPAD;
  693. }
  694. } else {
  695. return CHANNELPADDING_PADLATER;
  696. }
  697. }