channelpadding.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. /* Copyright (c) 2001 Matej Pfajfar.
  2. * Copyright (c) 2001-2004, Roger Dingledine.
  3. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  4. * Copyright (c) 2007-2015, The Tor Project, Inc. */
  5. /* See LICENSE for licensing information */
  6. /* TOR_CHANNEL_INTERNAL_ define needed for an O(1) implementation of
  7. * channelpadding_channel_to_channelinfo() */
  8. #define TOR_CHANNEL_INTERNAL_
  9. #include "or.h"
  10. #include "channel.h"
  11. #include "channelpadding.h"
  12. #include "channeltls.h"
  13. #include "config.h"
  14. #include "networkstatus.h"
  15. #include "connection.h"
  16. #include "connection_or.h"
  17. #include "main.h"
  18. #include "rephist.h"
  19. #include "router.h"
  20. #include "compat_time.h"
  21. #include <event.h>
  22. STATIC int channelpadding_get_netflow_inactive_timeout_ms(const channel_t *);
  23. STATIC int channelpadding_send_disable_command(channel_t *);
  24. STATIC int64_t channelpadding_compute_time_until_pad_for_netflow(channel_t *);
  25. /** The total number of pending channelpadding timers */
  26. static uint64_t total_timers_pending;
  27. /**
  28. * Get a random netflow inactive timeout keepalive period in milliseconds,
  29. * the range for which is determined by consensus parameters, negotiation,
  30. * configuration, or default values. The consensus parameters enforce the
  31. * minimum possible value, to avoid excessively frequent padding.
  32. *
  33. * The ranges for this value were chosen to be low enough to ensure that
  34. * routers do not emit a new netflow record for a connection due to it
  35. * being idle.
  36. *
  37. * Specific timeout values for major routers are listed in Proposal 251.
  38. * No major router appeared capable of setting an inactive timeout below 10
  39. * seconds, so we set the defaults below that value, since we can always
  40. * scale back if it ends up being too much padding.
  41. *
  42. * Returns the next timeout period (in milliseconds) after which we should
  43. * send a padding packet, or 0 if padding is disabled.
  44. */
  45. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW 1500
  46. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH 9500
  47. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN 0
  48. #define DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX 60000
  49. STATIC int
  50. channelpadding_get_netflow_inactive_timeout_ms(const channel_t *chan)
  51. {
  52. int low_timeout = networkstatus_get_param(NULL, "nf_ito_low",
  53. DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
  54. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
  55. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
  56. int high_timeout = networkstatus_get_param(NULL, "nf_ito_high",
  57. DFLT_NETFLOW_INACTIVE_KEEPALIVE_HIGH,
  58. low_timeout,
  59. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX);
  60. int X1, X2;
  61. if (low_timeout == 0 && low_timeout == high_timeout)
  62. return 0; // No padding
  63. /* If we have negotiated different timeout values, use those, but
  64. * don't allow them to be lower than the consensus ones */
  65. if (chan->padding_timeout_low_ms && chan->padding_timeout_high_ms) {
  66. low_timeout = MAX(low_timeout, chan->padding_timeout_low_ms);
  67. high_timeout = MAX(high_timeout, chan->padding_timeout_high_ms);
  68. }
  69. if (low_timeout == high_timeout)
  70. return low_timeout; // No randomization
  71. /*
  72. * This MAX() hack is here because we apply the timeout on both the client
  73. * and the server. This creates the situation where the total time before
  74. * sending a packet in either direction is actually
  75. * min(client_timeout,server_timeout).
  76. *
  77. * If X is a random variable uniform from 0..R-1 (where R=high-low),
  78. * then Y=max(X,X) has Prob(Y == i) = (2.0*i + 1)/(R*R).
  79. *
  80. * If we create a third random variable Z=min(Y,Y), then it turns out that
  81. * Exp[Z] ~= Exp[X]. Here's a table:
  82. *
  83. * R Exp[X] Exp[Z] Exp[min(X,X)] Exp[max(X,X)]
  84. * 2000 999.5 1066 666.2 1332.8
  85. * 3000 1499.5 1599.5 999.5 1999.5
  86. * 5000 2499.5 2666 1666.2 3332.8
  87. * 6000 2999.5 3199.5 1999.5 3999.5
  88. * 7000 3499.5 3732.8 2332.8 4666.2
  89. * 8000 3999.5 4266.2 2666.2 5332.8
  90. * 10000 4999.5 5328 3332.8 6666.2
  91. * 15000 7499.5 7995 4999.5 9999.5
  92. * 20000 9900.5 10661 6666.2 13332.8
  93. *
  94. * In other words, this hack makes it so that when both the client and
  95. * the guard are sending this padding, then the averages work out closer
  96. * to the midpoint of the range, making the overhead easier to tune.
  97. * If only one endpoint is padding (for example: if the relay does not
  98. * support padding, but the client has set ConnectionPadding 1; or
  99. * if the relay does support padding, but the client has set
  100. * ReducedConnectionPadding 1), then the defense will still prevent
  101. * record splitting, but with less overhead than the midpoint
  102. * (as seen by the Exp[max(X,X)] column).
  103. *
  104. * To calculate average padding packet frequency (and thus overhead),
  105. * index into the table by picking a row based on R = high-low. Then,
  106. * use the appropriate column (Exp[Z] for two-sided padding, and
  107. * Exp[max(X,X)] for one-sided padding). Finally, take this value
  108. * and add it to the low timeout value. This value is the average
  109. * frequency which padding packets will be sent.
  110. */
  111. X1 = crypto_rand_int(high_timeout - low_timeout);
  112. X2 = crypto_rand_int(high_timeout - low_timeout);
  113. return low_timeout + MAX(X1, X2);
  114. }
  115. /**
  116. * Update this channel's padding settings based on the PADDING_NEGOTIATE
  117. * contents.
  118. *
  119. * Returns -1 on error; 1 on success.
  120. */
  121. int
  122. channelpadding_update_padding_for_channel(channel_t *chan,
  123. const channelpadding_negotiate_t *pad_vars)
  124. {
  125. if (pad_vars->version != 0) {
  126. static ratelim_t version_limit = RATELIM_INIT(600);
  127. log_fn_ratelim(&version_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
  128. "Got a PADDING_NEGOTIATE cell with an unknown version. Ignoring.");
  129. return -1;
  130. }
  131. // We should not allow malicious relays to disable or reduce padding for
  132. // us as clients. In fact, we should only accept this cell at all if we're
  133. // operating as a relay. Brides should not accept it from relays, either
  134. // (only from their clients).
  135. if ((get_options()->BridgeRelay &&
  136. connection_or_digest_is_known_relay(chan->identity_digest)) ||
  137. !get_options()->ORPort_set) {
  138. static ratelim_t relay_limit = RATELIM_INIT(600);
  139. log_fn_ratelim(&relay_limit,LOG_PROTOCOL_WARN,LD_PROTOCOL,
  140. "Got a PADDING_NEGOTIATE from relay at %s (%s). "
  141. "This should not happen.",
  142. chan->get_remote_descr(chan, 0),
  143. hex_str(chan->identity_digest, DIGEST_LEN));
  144. return -1;
  145. }
  146. chan->padding_enabled = (pad_vars->command == CHANNELPADDING_COMMAND_START);
  147. /* Min must not be lower than the current consensus parameter
  148. nf_ito_low. */
  149. chan->padding_timeout_low_ms = MAX(networkstatus_get_param(NULL,
  150. "nf_ito_low",
  151. DFLT_NETFLOW_INACTIVE_KEEPALIVE_LOW,
  152. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MIN,
  153. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX),
  154. pad_vars->ito_low_ms);
  155. /* Max must not be lower than ito_low_ms */
  156. chan->padding_timeout_high_ms = MAX(chan->padding_timeout_low_ms,
  157. pad_vars->ito_high_ms);
  158. log_fn(LOG_INFO,LD_OR,
  159. "Negotiated padding=%d, lo=%d, hi=%d on "U64_FORMAT,
  160. chan->padding_enabled, chan->padding_timeout_low_ms,
  161. chan->padding_timeout_high_ms,
  162. U64_PRINTF_ARG(chan->global_identifier));
  163. return 1;
  164. }
  165. /**
  166. * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side not
  167. * to send padding.
  168. *
  169. * Returns -1 on error, 0 on success.
  170. */
  171. STATIC int
  172. channelpadding_send_disable_command(channel_t *chan)
  173. {
  174. channelpadding_negotiate_t disable;
  175. cell_t cell;
  176. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
  177. MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
  178. memset(&cell, 0, sizeof(cell_t));
  179. memset(&disable, 0, sizeof(channelpadding_negotiate_t));
  180. cell.command = CELL_PADDING_NEGOTIATE;
  181. channelpadding_negotiate_set_command(&disable, CHANNELPADDING_COMMAND_STOP);
  182. if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
  183. &disable) < 0)
  184. return -1;
  185. if (chan->write_cell(chan, &cell) == 1)
  186. return 0;
  187. else
  188. return -1;
  189. }
  190. /**
  191. * Sends a CELL_PADDING_NEGOTIATE on the channel to tell the other side to
  192. * resume sending padding at some rate.
  193. *
  194. * Returns -1 on error, 0 on success.
  195. */
  196. int
  197. channelpadding_send_enable_command(channel_t *chan, uint16_t low_timeout,
  198. uint16_t high_timeout)
  199. {
  200. channelpadding_negotiate_t enable;
  201. cell_t cell;
  202. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->link_proto >=
  203. MIN_LINK_PROTO_FOR_CHANNEL_PADDING);
  204. memset(&cell, 0, sizeof(cell_t));
  205. memset(&enable, 0, sizeof(channelpadding_negotiate_t));
  206. cell.command = CELL_PADDING_NEGOTIATE;
  207. channelpadding_negotiate_set_command(&enable, CHANNELPADDING_COMMAND_START);
  208. channelpadding_negotiate_set_ito_low_ms(&enable, low_timeout);
  209. channelpadding_negotiate_set_ito_high_ms(&enable, high_timeout);
  210. if (channelpadding_negotiate_encode(cell.payload, CELL_PAYLOAD_SIZE,
  211. &enable) < 0)
  212. return -1;
  213. if (chan->write_cell(chan, &cell) == 1)
  214. return 0;
  215. else
  216. return -1;
  217. }
  218. /**
  219. * Sends a CELL_PADDING cell on a channel if it has been idle since
  220. * our callback was scheduled.
  221. *
  222. * This function also clears the pending padding timer and the callback
  223. * flags.
  224. */
  225. static void
  226. channelpadding_send_padding_cell_for_callback(channel_t *chan)
  227. {
  228. cell_t cell;
  229. /* Check that the channel is still valid and open */
  230. if (!chan || chan->state != CHANNEL_STATE_OPEN) {
  231. if (chan) chan->pending_padding_callback = 0;
  232. log_fn(LOG_INFO,LD_OR,
  233. "Scheduled a netflow padding cell, but connection already closed.");
  234. return;
  235. }
  236. /* We should have a pending callback flag set. */
  237. if (BUG(chan->pending_padding_callback == 0))
  238. return;
  239. chan->pending_padding_callback = 0;
  240. if (!chan->next_padding_time_ms ||
  241. chan->has_queued_writes(chan)) {
  242. /* We must have been active before the timer fired */
  243. chan->next_padding_time_ms = 0;
  244. return;
  245. }
  246. {
  247. uint64_t now = monotime_coarse_absolute_msec();
  248. log_fn(LOG_INFO,LD_OR,
  249. "Sending netflow keepalive on "U64_FORMAT" to %s (%s) after "
  250. I64_FORMAT" ms. Delta "I64_FORMAT"ms",
  251. U64_PRINTF_ARG(chan->global_identifier),
  252. safe_str_client(chan->get_remote_descr(chan, 0)),
  253. safe_str_client(hex_str(chan->identity_digest, DIGEST_LEN)),
  254. U64_PRINTF_ARG(now - chan->timestamp_xfer_ms),
  255. U64_PRINTF_ARG(now - chan->next_padding_time_ms));
  256. }
  257. /* Clear the timer */
  258. chan->next_padding_time_ms = 0;
  259. /* Send the padding cell. This will cause the channel to get a
  260. * fresh timestamp_active */
  261. memset(&cell, 0, sizeof(cell));
  262. cell.command = CELL_PADDING;
  263. chan->write_cell(chan, &cell);
  264. }
  265. /**
  266. * tor_timer callback function for us to send padding on an idle channel.
  267. *
  268. * This function just obtains the channel from the callback handle, ensures
  269. * it is still valid, and then hands it off to
  270. * channelpadding_send_padding_cell_for_callback(), which checks if
  271. * the channel is still idle before sending padding.
  272. */
  273. static void
  274. channelpadding_send_padding_callback(tor_timer_t *timer, void *args,
  275. const struct monotime_t *time)
  276. {
  277. channel_t *chan = channel_handle_get((struct channel_handle_t*)args);
  278. (void)timer; (void)time;
  279. if (chan && CHANNEL_CAN_HANDLE_CELLS(chan)) {
  280. /* Hrmm.. It might be nice to have an equivalent to assert_connection_ok
  281. * for channels. Then we could get rid of the channeltls dependency */
  282. tor_assert(BASE_CHAN_TO_TLS(chan)->conn->base_.magic ==
  283. OR_CONNECTION_MAGIC);
  284. assert_connection_ok(&BASE_CHAN_TO_TLS(chan)->conn->base_, approx_time());
  285. channelpadding_send_padding_cell_for_callback(chan);
  286. } else {
  287. log_fn(LOG_INFO,LD_OR,
  288. "Channel closed while waiting for timer.");
  289. }
  290. total_timers_pending--;
  291. }
  292. /**
  293. * Schedules a callback to send padding on a channel in_ms milliseconds from
  294. * now.
  295. *
  296. * Returns CHANNELPADDING_WONTPAD on error, CHANNELPADDING_PADDING_SENT if we
  297. * sent the packet immediately without a timer, and
  298. * CHANNELPADDING_PADDING_SCHEDULED if we decided to schedule a timer.
  299. */
  300. static channelpadding_decision_t
  301. channelpadding_schedule_padding(channel_t *chan, int in_ms)
  302. {
  303. struct timeval timeout;
  304. tor_assert(!chan->pending_padding_callback);
  305. if (in_ms <= 0) {
  306. chan->pending_padding_callback = 1;
  307. channelpadding_send_padding_cell_for_callback(chan);
  308. return CHANNELPADDING_PADDING_SENT;
  309. }
  310. timeout.tv_sec = in_ms/1000;
  311. timeout.tv_usec = (in_ms%1000)*1000;
  312. if (!chan->timer_handle) {
  313. chan->timer_handle = channel_handle_new(chan);
  314. }
  315. if (chan->padding_timer) {
  316. timer_set_cb(chan->padding_timer,
  317. channelpadding_send_padding_callback,
  318. chan->timer_handle);
  319. } else {
  320. chan->padding_timer = timer_new(channelpadding_send_padding_callback,
  321. chan->timer_handle);
  322. }
  323. timer_schedule(chan->padding_timer, &timeout);
  324. rep_hist_padding_count_timers(++total_timers_pending);
  325. chan->pending_padding_callback = 1;
  326. return CHANNELPADDING_PADDING_SCHEDULED;
  327. }
  328. /**
  329. * Calculates the number of milliseconds from now to schedule a padding cell.
  330. *
  331. * Returns the number of milliseconds from now (relative) to schedule the
  332. * padding callback. If the padding timer is more than 1.1 seconds in the
  333. * future, we return -1, to avoid scheduling excessive callbacks. If padding
  334. * is disabled in the consensus, we return -2.
  335. *
  336. * Side-effects: Updates chan->next_padding_time_ms, storing an (absolute, not
  337. * relative) millisecond representation of when we should send padding, unless
  338. * other activity happens first. This side-effect allows us to avoid
  339. * scheduling a libevent callback until we're within 1.1 seconds of the padding
  340. * time.
  341. */
  342. #define CHANNELPADDING_TIME_LATER -1
  343. #define CHANNELPADDING_TIME_DISABLED -2
  344. STATIC int64_t
  345. channelpadding_compute_time_until_pad_for_netflow(channel_t *chan)
  346. {
  347. uint64_t long_now = monotime_coarse_absolute_msec();
  348. if (!chan->next_padding_time_ms) {
  349. int64_t padding_timeout =
  350. channelpadding_get_netflow_inactive_timeout_ms(chan);
  351. if (!padding_timeout)
  352. return CHANNELPADDING_TIME_DISABLED;
  353. chan->next_padding_time_ms = padding_timeout
  354. + chan->timestamp_xfer_ms;
  355. }
  356. /* If the next padding time is beyond the maximum possible consensus value,
  357. * then this indicates a clock jump, so just send padding now. This is
  358. * better than using monotonic time because we want to avoid the situation
  359. * where we wait around forever for monotonic time to move forward after
  360. * a clock jump far into the past.
  361. */
  362. if (chan->next_padding_time_ms > long_now +
  363. DFLT_NETFLOW_INACTIVE_KEEPALIVE_MAX) {
  364. tor_fragile_assert();
  365. log_warn(LD_BUG,
  366. "Channel padding timeout scheduled "I64_FORMAT"ms in the future. "
  367. "Did the monotonic clock just jump?",
  368. I64_PRINTF_ARG(chan->next_padding_time_ms - long_now));
  369. return 0; /* Clock jumped: Send padding now */
  370. }
  371. /* If the timeout will expire before the next time we're called (1000ms
  372. from now, plus some slack), then calcualte the number of milliseconds
  373. from now which we should send padding, so we can schedule a callback
  374. then.
  375. */
  376. if (long_now + 1100 >= chan->next_padding_time_ms) {
  377. int64_t ms_until_pad_for_netflow = chan->next_padding_time_ms -
  378. long_now;
  379. if (ms_until_pad_for_netflow < 0) {
  380. log_warn(LD_BUG,
  381. "Channel padding timeout scheduled "I64_FORMAT"ms in the past. "
  382. "Did the monotonic clock just jump?",
  383. I64_PRINTF_ARG(-ms_until_pad_for_netflow));
  384. return 0; /* Clock jumped: Send padding now */
  385. }
  386. return ms_until_pad_for_netflow;
  387. }
  388. return CHANNELPADDING_TIME_LATER;
  389. }
  390. /**
  391. * Calling this function on a channel causes it to tell the other side
  392. * not to send padding, and disables sending padding from this side as well.
  393. */
  394. void
  395. channelpadding_disable_padding_on_channel(channel_t *chan)
  396. {
  397. chan->padding_enabled = 0;
  398. // Send cell to disable padding on the other end
  399. channelpadding_send_disable_command(chan);
  400. }
  401. /**
  402. * Calling this function on a channel causes it to tell the other side
  403. * not to send padding, and reduces the rate that padding is sent from
  404. * this side.
  405. */
  406. void
  407. channelpadding_reduce_padding_on_channel(channel_t *chan)
  408. {
  409. /* Padding can be forced and reduced by clients, regardless of if
  410. * the channel supports it. So we check for support here before
  411. * sending any commands. */
  412. if (chan->padding_enabled) {
  413. channelpadding_send_disable_command(chan);
  414. }
  415. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW 9000
  416. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH 14000
  417. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN 0
  418. #define DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX 60000
  419. chan->padding_timeout_low_ms =
  420. networkstatus_get_param(NULL, "nf_ito_low_reduced",
  421. DFLT_NETFLOW_REDUCED_KEEPALIVE_LOW,
  422. DFLT_NETFLOW_REDUCED_KEEPALIVE_MIN,
  423. DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
  424. chan->padding_timeout_high_ms =
  425. networkstatus_get_param(NULL, "nf_ito_high_reduced",
  426. DFLT_NETFLOW_REDUCED_KEEPALIVE_HIGH,
  427. chan->padding_timeout_low_ms,
  428. DFLT_NETFLOW_REDUCED_KEEPALIVE_MAX);
  429. log_fn(LOG_INFO,LD_OR,
  430. "Reduced padding on channel "U64_FORMAT": lo=%d, hi=%d",
  431. U64_PRINTF_ARG(chan->global_identifier),
  432. chan->padding_timeout_low_ms, chan->padding_timeout_high_ms);
  433. }
  434. /**
  435. * This function is called once per second by run_connection_housekeeping(),
  436. * but only if the channel is still open, valid, and non-wedged.
  437. *
  438. * It decides if and when we should send a padding cell, and if needed,
  439. * schedules a callback to send that cell at the appropriate time.
  440. *
  441. * Returns an enum that represents the current padding decision state.
  442. * Return value is currently used only by unit tests.
  443. */
  444. channelpadding_decision_t
  445. channelpadding_decide_to_pad_channel(channel_t *chan)
  446. {
  447. const or_options_t *options = get_options();
  448. /* Only pad open channels */
  449. if (chan->state != CHANNEL_STATE_OPEN)
  450. return CHANNELPADDING_WONTPAD;
  451. if (chan->channel_usage == CHANNEL_USED_FOR_FULL_CIRCS) {
  452. if (!networkstatus_get_param(NULL, "nf_pad_before_usage", 1, 0, 1))
  453. return CHANNELPADDING_WONTPAD;
  454. } else if (chan->channel_usage != CHANNEL_USED_FOR_USER_TRAFFIC) {
  455. return CHANNELPADDING_WONTPAD;
  456. }
  457. if (chan->pending_padding_callback)
  458. return CHANNELPADDING_PADDING_ALREADY_SCHEDULED;
  459. /* Don't pad the channel if we didn't negotiate it, but still
  460. * allow clients to force padding if options->ChannelPadding is
  461. * explicitly set to 1.
  462. */
  463. if (!chan->padding_enabled && options->ConnectionPadding != 1) {
  464. return CHANNELPADDING_WONTPAD;
  465. }
  466. if (!chan->has_queued_writes(chan)) {
  467. int is_client_channel = 0;
  468. if (!public_server_mode(options) || chan->is_client ||
  469. !connection_or_digest_is_known_relay(chan->identity_digest)) {
  470. is_client_channel = 1;
  471. }
  472. /* If nf_pad_relays=1 is set in the consensus, we pad
  473. * on *all* idle connections, relay-relay or relay-client.
  474. * Otherwise pad only for client+bridge cons */
  475. if (is_client_channel ||
  476. networkstatus_get_param(NULL, "nf_pad_relays", 0, 0, 1)) {
  477. int64_t pad_time_ms =
  478. channelpadding_compute_time_until_pad_for_netflow(chan);
  479. if (pad_time_ms == CHANNELPADDING_TIME_DISABLED) {
  480. return CHANNELPADDING_WONTPAD;
  481. } else if (pad_time_ms == CHANNELPADDING_TIME_LATER) {
  482. chan->currently_padding = 1;
  483. return CHANNELPADDING_PADLATER;
  484. } else {
  485. /* We have to schedule a callback because we're called exactly once per
  486. * second, but we don't want padding packets to go out exactly on an
  487. * integer multiple of seconds. This callback will only be scheduled
  488. * if we're within 1.1 seconds of the padding time.
  489. */
  490. chan->currently_padding = 1;
  491. return channelpadding_schedule_padding(chan, pad_time_ms);
  492. }
  493. } else {
  494. chan->currently_padding = 0;
  495. return CHANNELPADDING_WONTPAD;
  496. }
  497. } else {
  498. return CHANNELPADDING_PADLATER;
  499. }
  500. }