channelpadding.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /* Copyright (c) 2001 Matej Pfajfar.
  2. * Copyright (c) 2001-2004, Roger Dingledine.
  3. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  4. * Copyright (c) 2007-2015, The Tor Project, Inc. */
  5. /* See LICENSE for licensing information */
  6. /* TOR_CHANNEL_INTERNAL_ define needed for an O(1) implementation of
  7. * channelpadding_channel_to_channelinfo() */
  8. #define TOR_CHANNEL_INTERNAL_
  9. #include "or.h"
  10. #include "channel.h"
  11. #include "channelpadding.h"
  12. #include "channeltls.h"
  13. #include "config.h"
  14. #include "networkstatus.h"
  15. #include "connection.h"
  16. #include "connection_or.h"
  17. #include "main.h"
  18. #include "rephist.h"
  19. #include "router.h"
  20. #include "compat_time.h"
  21. #include <event.h>
  22. STATIC int channelpadding_get_netflow_inactive_timeout_ms(const channel_t *);
  23. STATIC int channelpadding_send_disable_command(channel_t *);
  24. STATIC int64_t channelpadding_compute_time_until_pad_for_netflow(channel_t *);
  25. /** The total number of pending channelpadding timers */
  26. static uint64_t total_timers_pending;
  27. /** These are cached consensus parameters for netflow */
  28. /** The timeout lower bound that is allowed before sending padding */
  29. static int consensus_nf_ito_low;
  30. /** The timeout upper bound that is allowed before sending padding */
  31. static int consensus_nf_ito_high;
  32. /** The timeout lower bound that is allowed before sending reduced padding */
  33. static int consensus_nf_ito_low_reduced;
  34. /** The timeout upper bound that is allowed before sending reduced padding */
  35. static int consensus_nf_ito_high_reduced;
  36. /** The connection timeout between relays */
  37. static int consensus_nf_conntimeout_relays;
  38. /** The connection timeout for client connections */
  39. static int consensus_nf_conntimeout_clients;
  40. /** Should we pad before circuits are actually used for client data? */
  41. static int consensus_nf_pad_before_usage;
  42. /** Should we pad relay-to-relay connections? */
  43. static int consensus_nf_pad_relays;
  44. /**
  45. * This function is called to update cached consensus parameters every time
  46. * there is a consensus update. This allows us to move the consensus param
  47. * search off of the critical path, so it does not need to be evaluated
  48. * for every single connection, every second.
  49. */
  50. void
  51. channelpadding_new_consensus_params(networkstatus_t *ns)
  52. {
  53. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW 1500
  54. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH 9500
  55. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN 0
  56. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX 60000
  57. consensus_nf_ito_low = networkstatus_get_param(ns, "nf_ito_low",
  58. DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
  59. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
  60. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
  61. consensus_nf_ito_high = networkstatus_get_param(NULL, "nf_ito_high",
  62. DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH,
  63. consensus_nf_ito_low,
  64. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
  65. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW 9000
  66. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH 14000
  67. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN 0
  68. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX 60000
  69. consensus_nf_ito_low_reduced =
  70. networkstatus_get_param(NULL, "nf_ito_low_reduced",
  71. DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW,
  72. DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN,
  73. DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
  74. consensus_nf_ito_high_reduced =
  75. networkstatus_get_param(NULL, "nf_ito_high_reduced",
  76. DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH,
  77. consensus_nf_ito_low_reduced,
  78. DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
  79. #define CONNTIMEOUT_RELAYS_DFLT (60*60) // 1 hour
  80. #define CONNTIMEOUT_RELAYS_MIN 60
  81. #define CONNTIMEOUT_RELAYS_MAX (7*24*60*60) // 1 week
  82. consensus_nf_conntimeout_relays =
  83. networkstatus_get_param(NULL, "nf_conntimeout_relays",
  84. CONNTIMEOUT_RELAYS_DFLT,
  85. CONNTIMEOUT_RELAYS_MIN,
  86. CONNTIMEOUT_RELAYS_MAX);
  87. #define CIRCTIMEOUT_CLIENTS_DFLT (30*60) // 30 minutes
  88. #define CIRCTIMEOUT_CLIENTS_MIN 60
  89. #define CIRCTIMEOUT_CLIENTS_MAX (24*60*60) // 24 hours
  90. consensus_nf_conntimeout_clients =
  91. networkstatus_get_param(NULL, "nf_conntimeout_clients",
  92. CIRCTIMEOUT_CLIENTS_DFLT,
  93. CIRCTIMEOUT_CLIENTS_MIN,
  94. CIRCTIMEOUT_CLIENTS_MAX);
  95. consensus_nf_pad_before_usage =
  96. networkstatus_get_param(NULL, "nf_pad_before_usage", 1, 0, 1);
  97. consensus_nf_pad_relays =
  98. networkstatus_get_param(NULL, "nf_pad_relays", 0, 0, 1);
  99. }
  100. /**
  101. * Get a random netflow inactive timeout keepalive period in milliseconds,
  102. * the range for which is determined by consensus parameters, negotiation,
  103. * configuration, or default values. The consensus parameters enforce the
  104. * minimum possible value, to avoid excessively frequent padding.
  105. *
  106. * The ranges for this value were chosen to be low enough to ensure that
  107. * routers do not emit a new netflow record for a connection due to it
  108. * being idle.
  109. *
  110. * Specific timeout values for major routers are listed in Proposal 251.
  111. * No major router appeared capable of setting an inactive timeout below 10
  112. * seconds, so we set the defaults below that value, since we can always
  113. * scale back if it ends up being too much padding.
  114. *
  115. * Returns the next timeout period (in milliseconds) after which we should
  116. * send a padding packet, or 0 if padding is disabled.
  117. */
  118. STATIC int
  119. channelpadding_get_netflow_inactive_timeout_ms(const channel_t *chan)
  120. {
  121. int low_timeout = consensus_nf_ito_low;
  122. int high_timeout = consensus_nf_ito_high;
  123. int X1, X2;
  124. if (low_timeout == 0 && low_timeout == high_timeout)
  125. return 0; // No padding
  126. /* If we have negotiated different timeout values, use those, but
  127. * don't allow them to be lower than the consensus ones */
  128. if (chan->padding_timeout_low_ms && chan->padding_timeout_high_ms) {
  129. low_timeout = MAX(low_timeout, chan->padding_timeout_low_ms);
  130. high_timeout = MAX(high_timeout, chan->padding_timeout_high_ms);
  131. }
  132. if (low_timeout == high_timeout)
  133. return low_timeout; // No randomization
  134. /*
  135. * This MAX() hack is here because we apply the timeout on both the client
  136. * and the server. This creates the situation where the total time before
  137. * sending a packet in either direction is actually
  138. * min(client_timeout,server_timeout).
  139. *
  140. * If X is a random variable uniform from 0..R-1 (where R=high-low),
  141. * then Y=max(X,X) has Prob(Y == i) = (2.0*i + 1)/(R*R).
  142. *
  143. * If we create a third random variable Z=min(Y,Y), then it turns out that
  144. * Exp[Z] ~= Exp[X]. Here's a table:
  145. *
  146. * R Exp[X] Exp[Z] Exp[min(X,X)] Exp[max(X,X)]
  147. * 2000 999.5 1066 666.2 1332.8
  148. * 3000 1499.5 1599.5 999.5 1999.5
  149. * 5000 2499.5 2666 1666.2 3332.8
  150. * 6000 2999.5 3199.5 1999.5 3999.5
  151. * 7000 3499.5 3732.8 2332.8 4666.2
  152. * 8000 3999.5 4266.2 2666.2 5332.8
  153. * 10000 4999.5 5328 3332.8 6666.2
  154. * 15000 7499.5 7995 4999.5 9999.5
  155. * 20000 9900.5 10661 6666.2 13332.8
  156. *
  157. * In other words, this hack makes it so that when both the client and
  158. * the guard are sending this padding, then the averages work out closer
  159. * to the midpoint of the range, making the overhead easier to tune.
  160. * If only one endpoint is padding (for example: if the relay does not
  161. * support padding, but the client has set ConnectionPadding 1; or
  162. * if the relay does support padding, but the client has set
  163. * ReducedConnectionPadding 1), then the defense will still prevent
  164. * record splitting, but with less overhead than the midpoint
  165. * (as seen by the Exp[max(X,X)] column).
  166. *
  167. * To calculate average padding packet frequency (and thus overhead),
  168. * index into the table by picking a row based on R = high-low. Then,
  169. * use the appropriate column (Exp[Z] for two-sided padding, and
  170. * Exp[max(X,X)] for one-sided padding). Finally, take this value
  171. * and add it to the low timeout value. This value is the average
  172. * frequency which padding packets will be sent.
  173. */
  174. X1 = crypto_rand_int(high_timeout - low_timeout);
  175. X2 = crypto_rand_int(high_timeout - low_timeout);
  176. return low_timeout + MAX(X1, X2);
  177. }
  178. /**
  179. * Update this channel's padding settings based on the PADDING_NEGOTIATE
  180. * contents.
  181. *
  182. * Returns -1 on error; 1 on success.
  183. */
  184. int
  185. channelpadding_update_padding_for_channel(channel_t *chan,
  186. const channelpadding_negotiate_t *pad_vars)
  187. {
  188. if (pad_vars->version != 0) {
  189. static ratelim_t version_limit = RATELIM_INIT(600);
  190. log_fn_ratelim(&version_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
  191. "Got a PADDING_NEGOTIATE cell with an unknown version. Ignoring.");
  192. return -1;
  193. }
  194. // We should not allow malicious relays to disable or reduce padding for
  195. // us as clients. In fact, we should only accept this cell at all if we're
  196. // operating as a relay. Brides should not accept it from relays, either
  197. // (only from their clients).
  198. if ((get_options()->BridgeRelay &&
  199. connection_or_digest_is_known_relay(chan->identity_digest)) ||
  200. !get_options()->ORPort_set) {
  201. static ratelim_t relay_limit = RATELIM_INIT(600);
  202. log_fn_ratelim(&relay_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
  203. "Got a PADDING_NEGOTIATE from relay at %s (%s). "
  204. "This should not happen.",
  205. chan->get_remote_descr(chan, 0),
  206. hex_str(chan->identity_digest, DIGEST_LEN));
  207. return -1;
  208. }
  209. chan->padding_enabled = (pad_vars->command == CHANNELPADDING_COMMAND_START);
  210. /* Min must not be lower than the current consensus parameter
  211. nf_ito_low. */
  212. chan->padding_timeout_low_ms = MAX(consensus_nf_ito_low,
  213. pad_vars->ito_low_ms);
  214. /* Max must not be lower than ito_low_ms */
  215. chan->padding_timeout_high_ms = MAX(chan->padding_timeout_low_ms,
  216. pad_vars->ito_high_ms);
  217. log_fn(LOG_INFO,LD_OR,
  218. "Negotiated padding=%d, lo=%d, hi=%d on "U64_FORMAT,
  219. chan->padding_enabled, chan->padding_timeout_low_ms,
  220. chan->padding_timeout_high_ms,
  221. U64_PRINTF_ARG(chan->global_identifier));
  222. return 1;
  223. }
  224. /**
  225. * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side not
  226. * to send padding.
  227. *
  228. * Returns -1 on error, 0 on success.
  229. */
  230. STATIC int
  231. channelpadding_send_disable_command(channel_t *chan)
  232. {
  233. channelpadding_negotiate_t disable;
  234. cell_t cell;
  235. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
  236. MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
  237. memset(&cell, 0, sizeof(cell_t));
  238. memset(&disable, 0, sizeof(channelpadding_negotiate_t));
  239. cell.command = CELL_PADDING_NEGOTIATE;
  240. channelpadding_negotiate_set_command(&disable, CHANNELPADDING_COMMAND_STOP);
  241. if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
  242. &disable) < 0)
  243. return -1;
  244. if (chan->write_cell(chan, &cell) == 1)
  245. return 0;
  246. else
  247. return -1;
  248. }
  249. /**
  250. * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side to
  251. * resume sending padding at some rate.
  252. *
  253. * Returns -1 on error, 0 on success.
  254. */
  255. int
  256. channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
  257. uint16_t high_timeout)
  258. {
  259. channelpadding_negotiate_t enable;
  260. cell_t cell;
  261. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
  262. MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
  263. memset(&cell, 0, sizeof(cell_t));
  264. memset(&enable, 0, sizeof(channelpadding_negotiate_t));
  265. cell.command = CELL_PADDING_NEGOTIATE;
  266. channelpadding_negotiate_set_command(&enable, CHANNELPADDING_COMMAND_START);
  267. channelpadding_negotiate_set_ito_low_ms(&enable, low_timeout);
  268. channelpadding_negotiate_set_ito_high_ms(&enable, high_timeout);
  269. if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
  270. &enable) < 0)
  271. return -1;
  272. if (chan->write_cell(chan, &cell) == 1)
  273. return 0;
  274. else
  275. return -1;
  276. }
  277. /**
  278. * Sends a CELL_PADDING cell on a channel if it has been idle since
  279. * our callback was scheduled.
  280. *
  281. * This function also clears the pending padding timer and the callback
  282. * flags.
  283. */
  284. static void
  285. channelpadding_send_padding_cell_for_callback(channel_t *chan)
  286. {
  287. cell_t cell;
  288. /* Check that the channel is still valid and open */
  289. if (!chan || chan->state != CHANNEL_STATE_OPEN) {
  290. if (chan) chan->pending_padding_callback = 0;
  291. log_fn(LOG_INFO,LD_OR,
  292. "Scheduled a netflow padding cell, but connection already closed.");
  293. return;
  294. }
  295. /* We should have a pending callback flag set. */
  296. if (BUG(chan->pending_padding_callback == 0))
  297. return;
  298. chan->pending_padding_callback = 0;
  299. if (!chan->next_padding_time_ms ||
  300. chan->has_queued_writes(chan)) {
  301. /* We must have been active before the timer fired */
  302. chan->next_padding_time_ms = 0;
  303. return;
  304. }
  305. {
  306. uint64_t now = monotime_coarse_absolute_msec();
  307. log_fn(LOG_INFO,LD_OR,
  308. "Sending netflow keepalive on "U64_FORMAT" to %s (%s) after "
  309. I64_FORMAT" ms. Delta "I64_FORMAT"ms",
  310. U64_PRINTF_ARG(chan->global_identifier),
  311. safe_str_client(chan->get_remote_descr(chan, 0)),
  312. safe_str_client(hex_str(chan->identity_digest, DIGEST_LEN)),
  313. U64_PRINTF_ARG(now - chan->timestamp_xfer_ms),
  314. U64_PRINTF_ARG(now - chan->next_padding_time_ms));
  315. }
  316. /* Clear the timer */
  317. chan->next_padding_time_ms = 0;
  318. /* Send the padding cell. This will cause the channel to get a
  319. * fresh timestamp_active */
  320. memset(&cell, 0, sizeof(cell));
  321. cell.command = CELL_PADDING;
  322. chan->write_cell(chan, &cell);
  323. }
  324. /**
  325. * tor_timer callback function for us to send padding on an idle channel.
  326. *
  327. * This function just obtains the channel from the callback handle, ensures
  328. * it is still valid, and then hands it off to
  329. * channelpadding_send_padding_cell_for_callback(), which checks if
  330. * the channel is still idle before sending padding.
  331. */
  332. static void
  333. channelpadding_send_padding_callback(tor_timer_t *timer, void *args,
  334. const struct monotime_t *time)
  335. {
  336. channel_t *chan = channel_handle_get((struct channel_handle_t*)args);
  337. (void)timer; (void)time;
  338. if (chan && CHANNEL_CAN_HANDLE_CELLS(chan)) {
  339. /* Hrmm.. It might be nice to have an equivalent to assert_connection_ok
  340. * for channels. Then we could get rid of the channeltls dependency */
  341. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->base_.magic ==
  342. OR_CONNECTION_MAGIC);
  343. assert_connection_ok(&BASE_CHAN_TO_TLS(chan)->conn->base_, approx_time());
  344. channelpadding_send_padding_cell_for_callback(chan);
  345. } else {
  346. log_fn(LOG_INFO,LD_OR,
  347. "Channel closed while waiting for timer.");
  348. }
  349. total_timers_pending--;
  350. }
  351. /**
  352. * Schedules a callback to send padding on a channel in_ms milliseconds from
  353. * now.
  354. *
  355. * Returns CHANNELPADDING_WONTPAD on error, CHANNELPADDING_PADDING_SENT if we
  356. * sent the packet immediately without a timer, and
  357. * CHANNELPADDING_PADDING_SCHEDULED if we decided to schedule a timer.
  358. */
  359. static channelpadding_decision_t
  360. channelpadding_schedule_padding(channel_t *chan, int in_ms)
  361. {
  362. struct timeval timeout;
  363. tor_assert(!chan->pending_padding_callback);
  364. if (in_ms <= 0) {
  365. chan->pending_padding_callback = 1;
  366. channelpadding_send_padding_cell_for_callback(chan);
  367. return CHANNELPADDING_PADDING_SENT;
  368. }
  369. timeout.tv_sec = in_ms/1000;
  370. timeout.tv_usec = (in_ms%1000)*1000;
  371. if (!chan->timer_handle) {
  372. chan->timer_handle = channel_handle_new(chan);
  373. }
  374. if (chan->padding_timer) {
  375. timer_set_cb(chan->padding_timer,
  376. channelpadding_send_padding_callback,
  377. chan->timer_handle);
  378. } else {
  379. chan->padding_timer = timer_new(channelpadding_send_padding_callback,
  380. chan->timer_handle);
  381. }
  382. timer_schedule(chan->padding_timer, &timeout);
  383. rep_hist_padding_count_timers(++total_timers_pending);
  384. chan->pending_padding_callback = 1;
  385. return CHANNELPADDING_PADDING_SCHEDULED;
  386. }
  387. /**
  388. * Calculates the number of milliseconds from now to schedule a padding cell.
  389. *
  390. * Returns the number of milliseconds from now (relative) to schedule the
  391. * padding callback. If the padding timer is more than 1.1 seconds in the
  392. * future, we return -1, to avoid scheduling excessive callbacks. If padding
  393. * is disabled in the consensus, we return -2.
  394. *
  395. * Side-effects: Updates chan->next_padding_time_ms, storing an (absolute, not
  396. * relative) millisecond representation of when we should send padding, unless
  397. * other activity happens first. This side-effect allows us to avoid
  398. * scheduling a libevent callback until we're within 1.1 seconds of the padding
  399. * time.
  400. */
  401. #define CHANNELPADDING_TIME_LATER -1
  402. #define CHANNELPADDING_TIME_DISABLED -2
  403. STATIC int64_t
  404. channelpadding_compute_time_until_pad_for_netflow(channel_t *chan)
  405. {
  406. uint64_t long_now = monotime_coarse_absolute_msec();
  407. if (!chan->next_padding_time_ms) {
  408. /* If the below line or crypto_rand_int() shows up on a profile,
  409. * we can avoid getting a timeout until we're at least nt_ito_lo
  410. * from a timeout window. That will prevent us from setting timers
  411. * on connections that were active up to 1.5 seconds ago.
  412. * Idle connections should only call this once every 5.5s on average
  413. * though, so that might be a micro-optimization for little gain. */
  414. int64_t padding_timeout =
  415. channelpadding_get_netflow_inactive_timeout_ms(chan);
  416. if (!padding_timeout)
  417. return CHANNELPADDING_TIME_DISABLED;
  418. chan->next_padding_time_ms = padding_timeout
  419. + chan->timestamp_xfer_ms;
  420. }
  421. /* If the next padding time is beyond the maximum possible consensus value,
  422. * then this indicates a clock jump, so just send padding now. This is
  423. * better than using monotonic time because we want to avoid the situation
  424. * where we wait around forever for monotonic time to move forward after
  425. * a clock jump far into the past.
  426. */
  427. if (chan->next_padding_time_ms > long_now +
  428. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX) {
  429. tor_fragile_assert();
  430. log_warn(LD_BUG,
  431. "Channel padding timeout scheduled "I64_FORMAT"ms in the future. "
  432. "Did the monotonic clock just jump?",
  433. I64_PRINTF_ARG(chan->next_padding_time_ms - long_now));
  434. return 0; /* Clock jumped: Send padding now */
  435. }
  436. /* If the timeout will expire before the next time we're called (1000ms
  437. from now, plus some slack), then calcualte the number of milliseconds
  438. from now which we should send padding, so we can schedule a callback
  439. then.
  440. */
  441. if (long_now + 1100 >= chan->next_padding_time_ms) {
  442. int64_t ms_until_pad_for_netflow = chan->next_padding_time_ms -
  443. long_now;
  444. if (ms_until_pad_for_netflow < 0) {
  445. log_warn(LD_BUG,
  446. "Channel padding timeout scheduled "I64_FORMAT"ms in the past. "
  447. "Did the monotonic clock just jump?",
  448. I64_PRINTF_ARG(-ms_until_pad_for_netflow));
  449. return 0; /* Clock jumped: Send padding now */
  450. }
  451. return ms_until_pad_for_netflow;
  452. }
  453. return CHANNELPADDING_TIME_LATER;
  454. }
  455. /**
  456. * Returns a randomized value for channel idle timeout in seconds.
  457. * The channel idle timeout governs how quickly we close a channel
  458. * after its last circuit has disappeared.
  459. *
  460. * There are three classes of channels:
  461. * 1. Client+non-canonical. These live for 3-4.5 minutes
  462. * 2. relay to relay. These live for 45-75 min by default
  463. * 3. Reduced padding clients. These live for 1.5-2.25 minutes.
  464. *
  465. * Also allows the default relay-to-relay value to be controlled by the
  466. * consensus.
  467. */
  468. unsigned int
  469. channelpadding_get_channel_idle_timeout(const channel_t *chan,
  470. int is_canonical)
  471. {
  472. const or_options_t *options = get_options();
  473. unsigned int timeout;
  474. /* Non-canonical and client channels only last for 3-4.5 min when idle */
  475. if (!is_canonical || !public_server_mode(options) ||
  476. chan->is_client ||
  477. !connection_or_digest_is_known_relay(chan->identity_digest)) {
  478. #define CONNTIMEOUT_CLIENTS_BASE 180 // 3 to 4.5 min
  479. timeout = CONNTIMEOUT_CLIENTS_BASE
  480. + crypto_rand_int(CONNTIMEOUT_CLIENTS_BASE/2);
  481. } else { // Canonical relay-to-relay channels
  482. // 45..75min or consensus +/- 25%
  483. timeout = consensus_nf_conntimeout_relays;
  484. timeout = 3*timeout/4 + crypto_rand_int(timeout/2);
  485. }
  486. /* If ReducedConnectionPadding is set, we want to halve the duration of
  487. * the channel idle timeout, since reducing the additional time that
  488. * a channel stays open will reduce the total overhead for making
  489. * new channels. This reduction in overhead/channel expense
  490. * is important for mobile users. The option cannot be set by relays.
  491. *
  492. * We also don't reduce any values for timeout that the user explicitly
  493. * set.
  494. */
  495. if (options->ReducedConnectionPadding
  496. && !options->CircuitsAvailableTimeout) {
  497. timeout /= 2;
  498. }
  499. return timeout;
  500. }
  501. /**
  502. * This function controls how long we keep idle circuits open,
  503. * and how long we build predicted circuits. This behavior is under
  504. * the control of channelpadding because circuit availability is the
  505. * dominant factor in channel lifespan, which influences total padding
  506. * overhead.
  507. *
  508. * Returns a randomized number of seconds in a range from
  509. * CircuitsAvailableTimeout to 2*CircuitsAvailableTimeout. This value is halved
  510. * if ReducedConnectionPadding is set. The default value of
  511. * CircuitsAvailableTimeout can be controlled by the consensus.
  512. */
  513. int
  514. channelpadding_get_circuits_available_timeout(void)
  515. {
  516. const or_options_t *options = get_options();
  517. int timeout = options->CircuitsAvailableTimeout;
  518. if (!timeout) {
  519. timeout = consensus_nf_conntimeout_clients;
  520. /* If ReducedConnectionPadding is set, we want to halve the duration of
  521. * the channel idle timeout, since reducing the additional time that
  522. * a channel stays open will reduce the total overhead for making
  523. * new connections. This reduction in overhead/connection expense
  524. * is important for mobile users. The option cannot be set by relays.
  525. *
  526. * We also don't reduce any values for timeout that the user explicitly
  527. * set.
  528. */
  529. if (options->ReducedConnectionPadding) {
  530. // half the value to 15..30min by default
  531. timeout /= 2;
  532. }
  533. }
  534. // 30..60min by default
  535. timeout = timeout + crypto_rand_int(timeout);
  536. return timeout;
  537. }
  538. /**
  539. * Calling this function on a channel causes it to tell the other side
  540. * not to send padding, and disables sending padding from this side as well.
  541. */
  542. void
  543. channelpadding_disable_padding_on_channel(channel_t *chan)
  544. {
  545. chan->padding_enabled = 0;
  546. // Send cell to disable padding on the other end
  547. channelpadding_send_disable_command(chan);
  548. }
  549. /**
  550. * Calling this function on a channel causes it to tell the other side
  551. * not to send padding, and reduces the rate that padding is sent from
  552. * this side.
  553. */
  554. void
  555. channelpadding_reduce_padding_on_channel(channel_t *chan)
  556. {
  557. /* Padding can be forced and reduced by clients, regardless of if
  558. * the channel supports it. So we check for support here before
  559. * sending any commands. */
  560. if (chan->padding_enabled) {
  561. channelpadding_send_disable_command(chan);
  562. }
  563. chan->padding_timeout_low_ms = consensus_nf_ito_low_reduced;
  564. chan->padding_timeout_high_ms = consensus_nf_ito_high_reduced;
  565. log_fn(LOG_INFO,LD_OR,
  566. "Reduced padding on channel "U64_FORMAT": lo=%d, hi=%d",
  567. U64_PRINTF_ARG(chan->global_identifier),
  568. chan->padding_timeout_low_ms, chan->padding_timeout_high_ms);
  569. }
  570. /**
  571. * This function is called once per second by run_connection_housekeeping(),
  572. * but only if the channel is still open, valid, and non-wedged.
  573. *
  574. * It decides if and when we should send a padding cell, and if needed,
  575. * schedules a callback to send that cell at the appropriate time.
  576. *
  577. * Returns an enum that represents the current padding decision state.
  578. * Return value is currently used only by unit tests.
  579. */
  580. channelpadding_decision_t
  581. channelpadding_decide_to_pad_channel(channel_t *chan)
  582. {
  583. const or_options_t *options = get_options();
  584. /* Only pad open channels */
  585. if (chan->state != CHANNEL_STATE_OPEN)
  586. return CHANNELPADDING_WONTPAD;
  587. if (chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS) {
  588. if (!consensus_nf_pad_before_usage)
  589. return CHANNELPADDING_WONTPAD;
  590. } else if (chan->channel_usage != CHANNEL_USED_FOR_USER_TRAFFIC) {
  591. return CHANNELPADDING_WONTPAD;
  592. }
  593. if (chan->pending_padding_callback)
  594. return CHANNELPADDING_PADDING_ALREADY_SCHEDULED;
  595. /* Don't pad the channel if we didn't negotiate it, but still
  596. * allow clients to force padding if options->ChannelPadding is
  597. * explicitly set to 1.
  598. */
  599. if (!chan->padding_enabled && options->ConnectionPadding != 1) {
  600. return CHANNELPADDING_WONTPAD;
  601. }
  602. if (!chan->has_queued_writes(chan)) {
  603. int is_client_channel = 0;
  604. if (!public_server_mode(options) || chan->is_client ||
  605. !connection_or_digest_is_known_relay(chan->identity_digest)) {
  606. is_client_channel = 1;
  607. }
  608. /* If nf_pad_relays=1 is set in the consensus, we pad
  609. * on *all* idle connections, relay-relay or relay-client.
  610. * Otherwise pad only for client+bridge cons */
  611. if (is_client_channel || consensus_nf_pad_relays) {
  612. int64_t pad_time_ms =
  613. channelpadding_compute_time_until_pad_for_netflow(chan);
  614. if (pad_time_ms == CHANNELPADDING_TIME_DISABLED) {
  615. return CHANNELPADDING_WONTPAD;
  616. } else if (pad_time_ms == CHANNELPADDING_TIME_LATER) {
  617. chan->currently_padding = 1;
  618. return CHANNELPADDING_PADLATER;
  619. } else {
  620. /* We have to schedule a callback because we're called exactly once per
  621. * second, but we don't want padding packets to go out exactly on an
  622. * integer multiple of seconds. This callback will only be scheduled
  623. * if we're within 1.1 seconds of the padding time.
  624. */
  625. chan->currently_padding = 1;
  626. return channelpadding_schedule_padding(chan, pad_time_ms);
  627. }
  628. } else {
  629. chan->currently_padding = 0;
  630. return CHANNELPADDING_WONTPAD;
  631. }
  632. } else {
  633. return CHANNELPADDING_PADLATER;
  634. }
  635. }