channelpadding.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. /* Copyright (c) 2001 Matej Pfajfar.
  2. * Copyright (c) 2001-2004, Roger Dingledine.
  3. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  4. * Copyright (c) 2007-2018, The Tor Project, Inc. */
  5. /* See LICENSE for licensing information */
  6. /* TOR_CHANNEL_INTERNAL_ define needed for an O(1) implementation of
  7. * channelpadding_channel_to_channelinfo() */
  8. #define TOR_CHANNEL_INTERNAL_
  9. #include "or/or.h"
  10. #include "or/channel.h"
  11. #include "or/channelpadding.h"
  12. #include "or/channeltls.h"
  13. #include "or/config.h"
  14. #include "or/networkstatus.h"
  15. #include "or/connection.h"
  16. #include "or/connection_or.h"
  17. #include "lib/crypt_ops/crypto_rand.h"
  18. #include "or/main.h"
  19. #include "or/rephist.h"
  20. #include "or/router.h"
  21. #include "lib/time/compat_time.h"
  22. #include "or/rendservice.h"
  23. #include "common/timers.h"
  24. #include "or/cell_st.h"
  25. #include "or/or_connection_st.h"
  26. STATIC int32_t channelpadding_get_netflow_inactive_timeout_ms(
  27. const channel_t *);
  28. STATIC int channelpadding_send_disable_command(channel_t *);
  29. STATIC int64_t channelpadding_compute_time_until_pad_for_netflow(channel_t *);
  30. /** The total number of pending channelpadding timers */
  31. static uint64_t total_timers_pending;
  32. /** These are cached consensus parameters for netflow */
  33. /** The timeout lower bound that is allowed before sending padding */
  34. static int consensus_nf_ito_low;
  35. /** The timeout upper bound that is allowed before sending padding */
  36. static int consensus_nf_ito_high;
  37. /** The timeout lower bound that is allowed before sending reduced padding */
  38. static int consensus_nf_ito_low_reduced;
  39. /** The timeout upper bound that is allowed before sending reduced padding */
  40. static int consensus_nf_ito_high_reduced;
  41. /** The connection timeout between relays */
  42. static int consensus_nf_conntimeout_relays;
  43. /** The connection timeout for client connections */
  44. static int consensus_nf_conntimeout_clients;
  45. /** Should we pad before circuits are actually used for client data? */
  46. static int consensus_nf_pad_before_usage;
  47. /** Should we pad relay-to-relay connections? */
  48. static int consensus_nf_pad_relays;
  49. /** Should we pad tor2web connections? */
  50. static int consensus_nf_pad_tor2web;
  51. /** Should we pad rosos connections? */
  52. static int consensus_nf_pad_single_onion;
  53. #define TOR_MSEC_PER_SEC 1000
  54. #define TOR_USEC_PER_MSEC 1000
  55. /**
  56. * How often do we get called by the connection housekeeping (ie: once
  57. * per second) */
  58. #define TOR_HOUSEKEEPING_CALLBACK_MSEC 1000
  59. /**
  60. * Additional extra time buffer on the housekeeping callback, since
  61. * it can be delayed. This extra slack is used to decide if we should
  62. * schedule a timer or wait for the next callback. */
  63. #define TOR_HOUSEKEEPING_CALLBACK_SLACK_MSEC 100
  64. /**
  65. * This macro tells us if either end of the channel is connected to a client.
  66. * (If we're not a server, we're definitely a client. If the channel thinks
  67. * it's a client, use that. Then finally verify in the consensus).
  68. */
  69. #define CHANNEL_IS_CLIENT(chan, options) \
  70. (!public_server_mode((options)) || channel_is_client(chan) || \
  71. !connection_or_digest_is_known_relay((chan)->identity_digest))
  72. /**
  73. * This function is called to update cached consensus parameters every time
  74. * there is a consensus update. This allows us to move the consensus param
  75. * search off of the critical path, so it does not need to be evaluated
  76. * for every single connection, every second.
  77. */
  78. void
  79. channelpadding_new_consensus_params(networkstatus_t *ns)
  80. {
  81. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW 1500
  82. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH 9500
  83. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN 0
  84. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX 60000
  85. consensus_nf_ito_low = networkstatus_get_param(ns, "nf_ito_low",
  86. DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
  87. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
  88. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
  89. consensus_nf_ito_high = networkstatus_get_param(ns, "nf_ito_high",
  90. DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH,
  91. consensus_nf_ito_low,
  92. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
  93. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW 9000
  94. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH 14000
  95. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN 0
  96. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX 60000
  97. consensus_nf_ito_low_reduced =
  98. networkstatus_get_param(ns, "nf_ito_low_reduced",
  99. DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW,
  100. DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN,
  101. DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
  102. consensus_nf_ito_high_reduced =
  103. networkstatus_get_param(ns, "nf_ito_high_reduced",
  104. DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH,
  105. consensus_nf_ito_low_reduced,
  106. DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
  107. #define CONNTIMEOUT_RELAYS_DFLT (60*60) // 1 hour
  108. #define CONNTIMEOUT_RELAYS_MIN 60
  109. #define CONNTIMEOUT_RELAYS_MAX (7*24*60*60) // 1 week
  110. consensus_nf_conntimeout_relays =
  111. networkstatus_get_param(ns, "nf_conntimeout_relays",
  112. CONNTIMEOUT_RELAYS_DFLT,
  113. CONNTIMEOUT_RELAYS_MIN,
  114. CONNTIMEOUT_RELAYS_MAX);
  115. #define CIRCTIMEOUT_CLIENTS_DFLT (30*60) // 30 minutes
  116. #define CIRCTIMEOUT_CLIENTS_MIN 60
  117. #define CIRCTIMEOUT_CLIENTS_MAX (24*60*60) // 24 hours
  118. consensus_nf_conntimeout_clients =
  119. networkstatus_get_param(ns, "nf_conntimeout_clients",
  120. CIRCTIMEOUT_CLIENTS_DFLT,
  121. CIRCTIMEOUT_CLIENTS_MIN,
  122. CIRCTIMEOUT_CLIENTS_MAX);
  123. consensus_nf_pad_before_usage =
  124. networkstatus_get_param(ns, "nf_pad_before_usage", 1, 0, 1);
  125. consensus_nf_pad_relays =
  126. networkstatus_get_param(ns, "nf_pad_relays", 0, 0, 1);
  127. consensus_nf_pad_tor2web =
  128. networkstatus_get_param(ns,
  129. CHANNELPADDING_TOR2WEB_PARAM,
  130. CHANNELPADDING_TOR2WEB_DEFAULT, 0, 1);
  131. consensus_nf_pad_single_onion =
  132. networkstatus_get_param(ns,
  133. CHANNELPADDING_SOS_PARAM,
  134. CHANNELPADDING_SOS_DEFAULT, 0, 1);
  135. }
  136. /**
  137. * Get a random netflow inactive timeout keepalive period in milliseconds,
  138. * the range for which is determined by consensus parameters, negotiation,
  139. * configuration, or default values. The consensus parameters enforce the
  140. * minimum possible value, to avoid excessively frequent padding.
  141. *
  142. * The ranges for this value were chosen to be low enough to ensure that
  143. * routers do not emit a new netflow record for a connection due to it
  144. * being idle.
  145. *
  146. * Specific timeout values for major routers are listed in Proposal 251.
  147. * No major router appeared capable of setting an inactive timeout below 10
  148. * seconds, so we set the defaults below that value, since we can always
  149. * scale back if it ends up being too much padding.
  150. *
  151. * Returns the next timeout period (in milliseconds) after which we should
  152. * send a padding packet, or 0 if padding is disabled.
  153. */
  154. STATIC int32_t
  155. channelpadding_get_netflow_inactive_timeout_ms(const channel_t *chan)
  156. {
  157. int low_timeout = consensus_nf_ito_low;
  158. int high_timeout = consensus_nf_ito_high;
  159. int X1, X2;
  160. if (low_timeout == 0 && low_timeout == high_timeout)
  161. return 0; // No padding
  162. /* If we have negotiated different timeout values, use those, but
  163. * don't allow them to be lower than the consensus ones */
  164. if (chan->padding_timeout_low_ms && chan->padding_timeout_high_ms) {
  165. low_timeout = MAX(low_timeout, chan->padding_timeout_low_ms);
  166. high_timeout = MAX(high_timeout, chan->padding_timeout_high_ms);
  167. }
  168. if (low_timeout == high_timeout)
  169. return low_timeout; // No randomization
  170. /*
  171. * This MAX() hack is here because we apply the timeout on both the client
  172. * and the server. This creates the situation where the total time before
  173. * sending a packet in either direction is actually
  174. * min(client_timeout,server_timeout).
  175. *
  176. * If X is a random variable uniform from 0..R-1 (where R=high-low),
  177. * then Y=max(X,X) has Prob(Y == i) = (2.0*i + 1)/(R*R).
  178. *
  179. * If we create a third random variable Z=min(Y,Y), then it turns out that
  180. * Exp[Z] ~= Exp[X]. Here's a table:
  181. *
  182. * R Exp[X] Exp[Z] Exp[min(X,X)] Exp[max(X,X)]
  183. * 2000 999.5 1066 666.2 1332.8
  184. * 3000 1499.5 1599.5 999.5 1999.5
  185. * 5000 2499.5 2666 1666.2 3332.8
  186. * 6000 2999.5 3199.5 1999.5 3999.5
  187. * 7000 3499.5 3732.8 2332.8 4666.2
  188. * 8000 3999.5 4266.2 2666.2 5332.8
  189. * 10000 4999.5 5328 3332.8 6666.2
  190. * 15000 7499.5 7995 4999.5 9999.5
  191. * 20000 9900.5 10661 6666.2 13332.8
  192. *
  193. * In other words, this hack makes it so that when both the client and
  194. * the guard are sending this padding, then the averages work out closer
  195. * to the midpoint of the range, making the overhead easier to tune.
  196. * If only one endpoint is padding (for example: if the relay does not
  197. * support padding, but the client has set ConnectionPadding 1; or
  198. * if the relay does support padding, but the client has set
  199. * ReducedConnectionPadding 1), then the defense will still prevent
  200. * record splitting, but with less overhead than the midpoint
  201. * (as seen by the Exp[max(X,X)] column).
  202. *
  203. * To calculate average padding packet frequency (and thus overhead),
  204. * index into the table by picking a row based on R = high-low. Then,
  205. * use the appropriate column (Exp[Z] for two-sided padding, and
  206. * Exp[max(X,X)] for one-sided padding). Finally, take this value
  207. * and add it to the low timeout value. This value is the average
  208. * frequency which padding packets will be sent.
  209. */
  210. X1 = crypto_rand_int(high_timeout - low_timeout);
  211. X2 = crypto_rand_int(high_timeout - low_timeout);
  212. return low_timeout + MAX(X1, X2);
  213. }
  214. /**
  215. * Update this channel's padding settings based on the PADDING_NEGOTIATE
  216. * contents.
  217. *
  218. * Returns -1 on error; 1 on success.
  219. */
  220. int
  221. channelpadding_update_padding_for_channel(channel_t *chan,
  222. const channelpadding_negotiate_t *pad_vars)
  223. {
  224. if (pad_vars->version != 0) {
  225. static ratelim_t version_limit = RATELIM_INIT(600);
  226. log_fn_ratelim(&version_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
  227. "Got a PADDING_NEGOTIATE cell with an unknown version. Ignoring.");
  228. return -1;
  229. }
  230. // We should not allow malicious relays to disable or reduce padding for
  231. // us as clients. In fact, we should only accept this cell at all if we're
  232. // operating as a relay. Bridges should not accept it from relays, either
  233. // (only from their clients).
  234. if ((get_options()->BridgeRelay &&
  235. connection_or_digest_is_known_relay(chan->identity_digest)) ||
  236. !get_options()->ORPort_set) {
  237. static ratelim_t relay_limit = RATELIM_INIT(600);
  238. log_fn_ratelim(&relay_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
  239. "Got a PADDING_NEGOTIATE from relay at %s (%s). "
  240. "This should not happen.",
  241. chan->get_remote_descr(chan, 0),
  242. hex_str(chan->identity_digest, DIGEST_LEN));
  243. return -1;
  244. }
  245. chan->padding_enabled = (pad_vars->command == CHANNELPADDING_COMMAND_START);
  246. /* Min must not be lower than the current consensus parameter
  247. nf_ito_low. */
  248. chan->padding_timeout_low_ms = MAX(consensus_nf_ito_low,
  249. pad_vars->ito_low_ms);
  250. /* Max must not be lower than ito_low_ms */
  251. chan->padding_timeout_high_ms = MAX(chan->padding_timeout_low_ms,
  252. pad_vars->ito_high_ms);
  253. log_fn(LOG_INFO,LD_OR,
  254. "Negotiated padding=%d, lo=%d, hi=%d on %"PRIu64,
  255. chan->padding_enabled, chan->padding_timeout_low_ms,
  256. chan->padding_timeout_high_ms,
  257. (chan->global_identifier));
  258. return 1;
  259. }
  260. /**
  261. * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side not
  262. * to send padding.
  263. *
  264. * Returns -1 on error, 0 on success.
  265. */
  266. STATIC int
  267. channelpadding_send_disable_command(channel_t *chan)
  268. {
  269. channelpadding_negotiate_t disable;
  270. cell_t cell;
  271. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
  272. MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
  273. memset(&cell, 0, sizeof(cell_t));
  274. memset(&disable, 0, sizeof(channelpadding_negotiate_t));
  275. cell.command = CELL_PADDING_NEGOTIATE;
  276. channelpadding_negotiate_set_command(&disable, CHANNELPADDING_COMMAND_STOP);
  277. if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
  278. &disable) < 0)
  279. return -1;
  280. if (chan->write_cell(chan, &cell) == 1)
  281. return 0;
  282. else
  283. return -1;
  284. }
  285. /**
  286. * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side to
  287. * resume sending padding at some rate.
  288. *
  289. * Returns -1 on error, 0 on success.
  290. */
  291. int
  292. channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
  293. uint16_t high_timeout)
  294. {
  295. channelpadding_negotiate_t enable;
  296. cell_t cell;
  297. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
  298. MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
  299. memset(&cell, 0, sizeof(cell_t));
  300. memset(&enable, 0, sizeof(channelpadding_negotiate_t));
  301. cell.command = CELL_PADDING_NEGOTIATE;
  302. channelpadding_negotiate_set_command(&enable, CHANNELPADDING_COMMAND_START);
  303. channelpadding_negotiate_set_ito_low_ms(&enable, low_timeout);
  304. channelpadding_negotiate_set_ito_high_ms(&enable, high_timeout);
  305. if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
  306. &enable) < 0)
  307. return -1;
  308. if (chan->write_cell(chan, &cell) == 1)
  309. return 0;
  310. else
  311. return -1;
  312. }
  313. /**
  314. * Sends a CELL_PADDING cell on a channel if it has been idle since
  315. * our callback was scheduled.
  316. *
  317. * This function also clears the pending padding timer and the callback
  318. * flags.
  319. */
  320. static void
  321. channelpadding_send_padding_cell_for_callback(channel_t *chan)
  322. {
  323. cell_t cell;
  324. /* Check that the channel is still valid and open */
  325. if (!chan || chan->state != CHANNEL_STATE_OPEN) {
  326. if (chan) chan->pending_padding_callback = 0;
  327. log_fn(LOG_INFO,LD_OR,
  328. "Scheduled a netflow padding cell, but connection already closed.");
  329. return;
  330. }
  331. /* We should have a pending callback flag set. */
  332. if (BUG(chan->pending_padding_callback == 0))
  333. return;
  334. chan->pending_padding_callback = 0;
  335. if (monotime_coarse_is_zero(&chan->next_padding_time) ||
  336. chan->has_queued_writes(chan)) {
  337. /* We must have been active before the timer fired */
  338. monotime_coarse_zero(&chan->next_padding_time);
  339. return;
  340. }
  341. {
  342. monotime_coarse_t now;
  343. monotime_coarse_get(&now);
  344. log_fn(LOG_INFO,LD_OR,
  345. "Sending netflow keepalive on %"PRIu64" to %s (%s) after "
  346. "%"PRId64" ms. Delta %"PRId64"ms",
  347. (chan->global_identifier),
  348. safe_str_client(chan->get_remote_descr(chan, 0)),
  349. safe_str_client(hex_str(chan->identity_digest, DIGEST_LEN)),
  350. (monotime_coarse_diff_msec(&chan->timestamp_xfer,&now)),
  351. (
  352. monotime_coarse_diff_msec(&chan->next_padding_time,&now)));
  353. }
  354. /* Clear the timer */
  355. monotime_coarse_zero(&chan->next_padding_time);
  356. /* Send the padding cell. This will cause the channel to get a
  357. * fresh timestamp_active */
  358. memset(&cell, 0, sizeof(cell));
  359. cell.command = CELL_PADDING;
  360. chan->write_cell(chan, &cell);
  361. }
  362. /**
  363. * tor_timer callback function for us to send padding on an idle channel.
  364. *
  365. * This function just obtains the channel from the callback handle, ensures
  366. * it is still valid, and then hands it off to
  367. * channelpadding_send_padding_cell_for_callback(), which checks if
  368. * the channel is still idle before sending padding.
  369. */
  370. static void
  371. channelpadding_send_padding_callback(tor_timer_t *timer, void *args,
  372. const struct monotime_t *when)
  373. {
  374. channel_t *chan = channel_handle_get((struct channel_handle_t*)args);
  375. (void)timer; (void)when;
  376. if (chan && CHANNEL_CAN_HANDLE_CELLS(chan)) {
  377. /* Hrmm.. It might be nice to have an equivalent to assert_connection_ok
  378. * for channels. Then we could get rid of the channeltls dependency */
  379. tor_assert(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn)->magic ==
  380. OR_CONNECTION_MAGIC);
  381. assert_connection_ok(TO_CONN(BASE_CHAN_TO_TLS(chan)->conn), approx_time());
  382. channelpadding_send_padding_cell_for_callback(chan);
  383. } else {
  384. log_fn(LOG_INFO,LD_OR,
  385. "Channel closed while waiting for timer.");
  386. }
  387. total_timers_pending--;
  388. }
  389. /**
  390. * Schedules a callback to send padding on a channel in_ms milliseconds from
  391. * now.
  392. *
  393. * Returns CHANNELPADDING_WONTPAD on error, CHANNELPADDING_PADDING_SENT if we
  394. * sent the packet immediately without a timer, and
  395. * CHANNELPADDING_PADDING_SCHEDULED if we decided to schedule a timer.
  396. */
  397. static channelpadding_decision_t
  398. channelpadding_schedule_padding(channel_t *chan, int in_ms)
  399. {
  400. struct timeval timeout;
  401. tor_assert(!chan->pending_padding_callback);
  402. if (in_ms <= 0) {
  403. chan->pending_padding_callback = 1;
  404. channelpadding_send_padding_cell_for_callback(chan);
  405. return CHANNELPADDING_PADDING_SENT;
  406. }
  407. timeout.tv_sec = in_ms/TOR_MSEC_PER_SEC;
  408. timeout.tv_usec = (in_ms%TOR_USEC_PER_MSEC)*TOR_USEC_PER_MSEC;
  409. if (!chan->timer_handle) {
  410. chan->timer_handle = channel_handle_new(chan);
  411. }
  412. if (chan->padding_timer) {
  413. timer_set_cb(chan->padding_timer,
  414. channelpadding_send_padding_callback,
  415. chan->timer_handle);
  416. } else {
  417. chan->padding_timer = timer_new(channelpadding_send_padding_callback,
  418. chan->timer_handle);
  419. }
  420. timer_schedule(chan->padding_timer, &timeout);
  421. rep_hist_padding_count_timers(++total_timers_pending);
  422. chan->pending_padding_callback = 1;
  423. return CHANNELPADDING_PADDING_SCHEDULED;
  424. }
  425. /**
  426. * Calculates the number of milliseconds from now to schedule a padding cell.
  427. *
  428. * Returns the number of milliseconds from now (relative) to schedule the
  429. * padding callback. If the padding timer is more than 1.1 seconds in the
  430. * future, we return -1, to avoid scheduling excessive callbacks. If padding
  431. * is disabled in the consensus, we return -2.
  432. *
  433. * Side-effects: Updates chan->next_padding_time_ms, storing an (absolute, not
  434. * relative) millisecond representation of when we should send padding, unless
  435. * other activity happens first. This side-effect allows us to avoid
  436. * scheduling a libevent callback until we're within 1.1 seconds of the padding
  437. * time.
  438. */
  439. #define CHANNELPADDING_TIME_LATER -1
  440. #define CHANNELPADDING_TIME_DISABLED -2
  441. STATIC int64_t
  442. channelpadding_compute_time_until_pad_for_netflow(channel_t *chan)
  443. {
  444. monotime_coarse_t now;
  445. monotime_coarse_get(&now);
  446. if (monotime_coarse_is_zero(&chan->next_padding_time)) {
  447. /* If the below line or crypto_rand_int() shows up on a profile,
  448. * we can avoid getting a timeout until we're at least nf_ito_lo
  449. * from a timeout window. That will prevent us from setting timers
  450. * on connections that were active up to 1.5 seconds ago.
  451. * Idle connections should only call this once every 5.5s on average
  452. * though, so that might be a micro-optimization for little gain. */
  453. int32_t padding_timeout =
  454. channelpadding_get_netflow_inactive_timeout_ms(chan);
  455. if (!padding_timeout)
  456. return CHANNELPADDING_TIME_DISABLED;
  457. monotime_coarse_add_msec(&chan->next_padding_time,
  458. &chan->timestamp_xfer,
  459. padding_timeout);
  460. }
  461. const int64_t ms_till_pad =
  462. monotime_coarse_diff_msec(&now, &chan->next_padding_time);
  463. /* If the next padding time is beyond the maximum possible consensus value,
  464. * then this indicates a clock jump, so just send padding now. This is
  465. * better than using monotonic time because we want to avoid the situation
  466. * where we wait around forever for monotonic time to move forward after
  467. * a clock jump far into the past.
  468. */
  469. if (ms_till_pad > DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX) {
  470. tor_fragile_assert();
  471. log_warn(LD_BUG,
  472. "Channel padding timeout scheduled %"PRId64"ms in the future. "
  473. "Did the monotonic clock just jump?",
  474. (ms_till_pad));
  475. return 0; /* Clock jumped: Send padding now */
  476. }
  477. /* If the timeout will expire before the next time we're called (1000ms
  478. from now, plus some slack), then calculate the number of milliseconds
  479. from now which we should send padding, so we can schedule a callback
  480. then.
  481. */
  482. if (ms_till_pad < (TOR_HOUSEKEEPING_CALLBACK_MSEC +
  483. TOR_HOUSEKEEPING_CALLBACK_SLACK_MSEC)) {
  484. /* If the padding time is in the past, that means that libevent delayed
  485. * calling the once-per-second callback due to other work taking too long.
  486. * See https://bugs.torproject.org/22212 and
  487. * https://bugs.torproject.org/16585. This is a systemic problem
  488. * with being single-threaded, but let's emit a notice if this
  489. * is long enough in the past that we might have missed a netflow window,
  490. * and allowed a router to emit a netflow frame, just so we don't forget
  491. * about it entirely.. */
  492. #define NETFLOW_MISSED_WINDOW (150000 - DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH)
  493. if (ms_till_pad < 0) {
  494. int severity = (ms_till_pad < -NETFLOW_MISSED_WINDOW)
  495. ? LOG_NOTICE : LOG_INFO;
  496. log_fn(severity, LD_OR,
  497. "Channel padding timeout scheduled %"PRId64"ms in the past. ",
  498. (-ms_till_pad));
  499. return 0; /* Clock jumped: Send padding now */
  500. }
  501. return ms_till_pad;
  502. }
  503. return CHANNELPADDING_TIME_LATER;
  504. }
  505. /**
  506. * Returns a randomized value for channel idle timeout in seconds.
  507. * The channel idle timeout governs how quickly we close a channel
  508. * after its last circuit has disappeared.
  509. *
  510. * There are three classes of channels:
  511. * 1. Client+non-canonical. These live for 3-4.5 minutes
  512. * 2. relay to relay. These live for 45-75 min by default
  513. * 3. Reduced padding clients. These live for 1.5-2.25 minutes.
  514. *
  515. * Also allows the default relay-to-relay value to be controlled by the
  516. * consensus.
  517. */
  518. unsigned int
  519. channelpadding_get_channel_idle_timeout(const channel_t *chan,
  520. int is_canonical)
  521. {
  522. const or_options_t *options = get_options();
  523. unsigned int timeout;
  524. /* Non-canonical and client channels only last for 3-4.5 min when idle */
  525. if (!is_canonical || CHANNEL_IS_CLIENT(chan, options)) {
  526. #define CONNTIMEOUT_CLIENTS_BASE 180 // 3 to 4.5 min
  527. timeout = CONNTIMEOUT_CLIENTS_BASE
  528. + crypto_rand_int(CONNTIMEOUT_CLIENTS_BASE/2);
  529. } else { // Canonical relay-to-relay channels
  530. // 45..75min or consensus +/- 25%
  531. timeout = consensus_nf_conntimeout_relays;
  532. timeout = 3*timeout/4 + crypto_rand_int(timeout/2);
  533. }
  534. /* If ReducedConnectionPadding is set, we want to halve the duration of
  535. * the channel idle timeout, since reducing the additional time that
  536. * a channel stays open will reduce the total overhead for making
  537. * new channels. This reduction in overhead/channel expense
  538. * is important for mobile users. The option cannot be set by relays.
  539. *
  540. * We also don't reduce any values for timeout that the user explicitly
  541. * set.
  542. */
  543. if (options->ReducedConnectionPadding
  544. && !options->CircuitsAvailableTimeout) {
  545. timeout /= 2;
  546. }
  547. return timeout;
  548. }
  549. /**
  550. * This function controls how long we keep idle circuits open,
  551. * and how long we build predicted circuits. This behavior is under
  552. * the control of channelpadding because circuit availability is the
  553. * dominant factor in channel lifespan, which influences total padding
  554. * overhead.
  555. *
  556. * Returns a randomized number of seconds in a range from
  557. * CircuitsAvailableTimeout to 2*CircuitsAvailableTimeout. This value is halved
  558. * if ReducedConnectionPadding is set. The default value of
  559. * CircuitsAvailableTimeout can be controlled by the consensus.
  560. */
  561. int
  562. channelpadding_get_circuits_available_timeout(void)
  563. {
  564. const or_options_t *options = get_options();
  565. int timeout = options->CircuitsAvailableTimeout;
  566. if (!timeout) {
  567. timeout = consensus_nf_conntimeout_clients;
  568. /* If ReducedConnectionPadding is set, we want to halve the duration of
  569. * the channel idle timeout, since reducing the additional time that
  570. * a channel stays open will reduce the total overhead for making
  571. * new connections. This reduction in overhead/connection expense
  572. * is important for mobile users. The option cannot be set by relays.
  573. *
  574. * We also don't reduce any values for timeout that the user explicitly
  575. * set.
  576. */
  577. if (options->ReducedConnectionPadding) {
  578. // half the value to 15..30min by default
  579. timeout /= 2;
  580. }
  581. }
  582. // 30..60min by default
  583. timeout = timeout + crypto_rand_int(timeout);
  584. return timeout;
  585. }
  586. /**
  587. * Calling this function on a channel causes it to tell the other side
  588. * not to send padding, and disables sending padding from this side as well.
  589. */
  590. void
  591. channelpadding_disable_padding_on_channel(channel_t *chan)
  592. {
  593. chan->padding_enabled = 0;
  594. // Send cell to disable padding on the other end
  595. channelpadding_send_disable_command(chan);
  596. }
  597. /**
  598. * Calling this function on a channel causes it to tell the other side
  599. * not to send padding, and reduces the rate that padding is sent from
  600. * this side.
  601. */
  602. void
  603. channelpadding_reduce_padding_on_channel(channel_t *chan)
  604. {
  605. /* Padding can be forced and reduced by clients, regardless of if
  606. * the channel supports it. So we check for support here before
  607. * sending any commands. */
  608. if (chan->padding_enabled) {
  609. channelpadding_send_disable_command(chan);
  610. }
  611. chan->padding_timeout_low_ms = consensus_nf_ito_low_reduced;
  612. chan->padding_timeout_high_ms = consensus_nf_ito_high_reduced;
  613. log_fn(LOG_INFO,LD_OR,
  614. "Reduced padding on channel %"PRIu64": lo=%d, hi=%d",
  615. (chan->global_identifier),
  616. chan->padding_timeout_low_ms, chan->padding_timeout_high_ms);
  617. }
  618. /**
  619. * This function is called once per second by run_connection_housekeeping(),
  620. * but only if the channel is still open, valid, and non-wedged.
  621. *
  622. * It decides if and when we should send a padding cell, and if needed,
  623. * schedules a callback to send that cell at the appropriate time.
  624. *
  625. * Returns an enum that represents the current padding decision state.
  626. * Return value is currently used only by unit tests.
  627. */
  628. channelpadding_decision_t
  629. channelpadding_decide_to_pad_channel(channel_t *chan)
  630. {
  631. const or_options_t *options = get_options();
  632. /* Only pad open channels */
  633. if (chan->state != CHANNEL_STATE_OPEN)
  634. return CHANNELPADDING_WONTPAD;
  635. if (chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS) {
  636. if (!consensus_nf_pad_before_usage)
  637. return CHANNELPADDING_WONTPAD;
  638. } else if (chan->channel_usage != CHANNEL_USED_FOR_USER_TRAFFIC) {
  639. return CHANNELPADDING_WONTPAD;
  640. }
  641. if (chan->pending_padding_callback)
  642. return CHANNELPADDING_PADDING_ALREADY_SCHEDULED;
  643. /* Don't pad the channel if we didn't negotiate it, but still
  644. * allow clients to force padding if options->ChannelPadding is
  645. * explicitly set to 1.
  646. */
  647. if (!chan->padding_enabled && options->ConnectionPadding != 1) {
  648. return CHANNELPADDING_WONTPAD;
  649. }
  650. if (options->Tor2webMode && !consensus_nf_pad_tor2web) {
  651. /* If the consensus just changed values, this channel may still
  652. * think padding is enabled. Negotiate it off. */
  653. if (chan->padding_enabled)
  654. channelpadding_disable_padding_on_channel(chan);
  655. return CHANNELPADDING_WONTPAD;
  656. }
  657. if (rend_service_allow_non_anonymous_connection(options) &&
  658. !consensus_nf_pad_single_onion) {
  659. /* If the consensus just changed values, this channel may still
  660. * think padding is enabled. Negotiate it off. */
  661. if (chan->padding_enabled)
  662. channelpadding_disable_padding_on_channel(chan);
  663. return CHANNELPADDING_WONTPAD;
  664. }
  665. if (!chan->has_queued_writes(chan)) {
  666. int is_client_channel = 0;
  667. if (CHANNEL_IS_CLIENT(chan, options)) {
  668. is_client_channel = 1;
  669. }
  670. /* If nf_pad_relays=1 is set in the consensus, we pad
  671. * on *all* idle connections, relay-relay or relay-client.
  672. * Otherwise pad only for client+bridge cons */
  673. if (is_client_channel || consensus_nf_pad_relays) {
  674. int64_t pad_time_ms =
  675. channelpadding_compute_time_until_pad_for_netflow(chan);
  676. if (pad_time_ms == CHANNELPADDING_TIME_DISABLED) {
  677. return CHANNELPADDING_WONTPAD;
  678. } else if (pad_time_ms == CHANNELPADDING_TIME_LATER) {
  679. chan->currently_padding = 1;
  680. return CHANNELPADDING_PADLATER;
  681. } else {
  682. if (BUG(pad_time_ms > INT_MAX)) {
  683. pad_time_ms = INT_MAX;
  684. }
  685. /* We have to schedule a callback because we're called exactly once per
  686. * second, but we don't want padding packets to go out exactly on an
  687. * integer multiple of seconds. This callback will only be scheduled
  688. * if we're within 1.1 seconds of the padding time.
  689. */
  690. chan->currently_padding = 1;
  691. return channelpadding_schedule_padding(chan, (int)pad_time_ms);
  692. }
  693. } else {
  694. chan->currently_padding = 0;
  695. return CHANNELPADDING_WONTPAD;
  696. }
  697. } else {
  698. return CHANNELPADDING_PADLATER;
  699. }
  700. }