cpuworker.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713
  1. /* Copyright (c) 2003-2004, Roger Dingledine.
  2. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  3. * Copyright (c) 2007-2013, The Tor Project, Inc. */
  4. /* See LICENSE for licensing information */
  5. /**
  6. * \file cpuworker.c
  7. * \brief Implements a farm of 'CPU worker' processes to perform
  8. * CPU-intensive tasks in another thread or process, to not
  9. * interrupt the main thread.
  10. *
  11. * Right now, we only use this for processing onionskins.
  12. **/
  13. #include "or.h"
  14. #include "buffers.h"
  15. #include "channel.h"
  16. #include "channeltls.h"
  17. #include "circuitbuild.h"
  18. #include "circuitlist.h"
  19. #include "config.h"
  20. #include "connection.h"
  21. #include "connection_or.h"
  22. #include "cpuworker.h"
  23. #include "main.h"
  24. #include "onion.h"
  25. #include "rephist.h"
  26. #include "router.h"
  27. /** The maximum number of cpuworker processes we will keep around. */
  28. #define MAX_CPUWORKERS 16
  29. /** The minimum number of cpuworker processes we will keep around. */
  30. #define MIN_CPUWORKERS 1
  31. /** The tag specifies which circuit this onionskin was from. */
  32. #define TAG_LEN 12
  33. /** How many cpuworkers we have running right now. */
  34. static int num_cpuworkers=0;
  35. /** How many of the running cpuworkers have an assigned task right now. */
  36. static int num_cpuworkers_busy=0;
  37. /** We need to spawn new cpuworkers whenever we rotate the onion keys
  38. * on platforms where execution contexts==processes. This variable stores
  39. * the last time we got a key rotation event. */
  40. static time_t last_rotation_time=0;
  41. static void cpuworker_main(void *data) ATTR_NORETURN;
  42. static int spawn_cpuworker(void);
  43. static void spawn_enough_cpuworkers(void);
  44. static void process_pending_task(connection_t *cpuworker);
  45. /** Initialize the cpuworker subsystem.
  46. */
  47. void
  48. cpu_init(void)
  49. {
  50. cpuworkers_rotate();
  51. }
  52. /** Called when we're done sending a request to a cpuworker. */
  53. int
  54. connection_cpu_finished_flushing(connection_t *conn)
  55. {
  56. tor_assert(conn);
  57. tor_assert(conn->type == CONN_TYPE_CPUWORKER);
  58. return 0;
  59. }
  60. /** Pack global_id and circ_id; set *tag to the result. (See note on
  61. * cpuworker_main for wire format.) */
  62. static void
  63. tag_pack(uint8_t *tag, uint64_t chan_id, circid_t circ_id)
  64. {
  65. /*XXXX RETHINK THIS WHOLE MESS !!!! !NM NM NM NM*/
  66. /*XXXX DOUBLEPLUSTHIS!!!! AS AS AS AS*/
  67. set_uint64(tag, chan_id);
  68. set_uint32(tag+8, circ_id);
  69. }
  70. /** Unpack <b>tag</b> into addr, port, and circ_id.
  71. */
  72. static void
  73. tag_unpack(const uint8_t *tag, uint64_t *chan_id, circid_t *circ_id)
  74. {
  75. *chan_id = get_uint64(tag);
  76. *circ_id = get_uint32(tag+8);
  77. }
  78. /** Magic numbers to make sure our cpuworker_requests don't grow any
  79. * mis-framing bugs. */
  80. #define CPUWORKER_REQUEST_MAGIC 0xda4afeed
  81. #define CPUWORKER_REPLY_MAGIC 0x5eedf00d
  82. /** A request sent to a cpuworker. */
  83. typedef struct cpuworker_request_t {
  84. /** Magic number; must be CPUWORKER_REQUEST_MAGIC. */
  85. uint32_t magic;
  86. /** Opaque tag to identify the job */
  87. uint8_t tag[TAG_LEN];
  88. /** Task code. Must be one of CPUWORKER_TASK_* */
  89. uint8_t task;
  90. /** Flag: Are we timing this request? */
  91. unsigned timed : 1;
  92. /** If we're timing this request, when was it sent to the cpuworker? */
  93. struct timeval started_at;
  94. /** A create cell for the cpuworker to process. */
  95. create_cell_t create_cell;
  96. /* Turn the above into a tagged union if needed. */
  97. } cpuworker_request_t;
  98. /** A reply sent by a cpuworker. */
  99. typedef struct cpuworker_reply_t {
  100. /** Magic number; must be CPUWORKER_REPLY_MAGIC. */
  101. uint32_t magic;
  102. /** Opaque tag to identify the job; matches the request's tag.*/
  103. uint8_t tag[TAG_LEN];
  104. /** True iff we got a successful request. */
  105. uint8_t success;
  106. /** Are we timing this request? */
  107. unsigned int timed : 1;
  108. /** What handshake type was the request? (Used for timing) */
  109. uint16_t handshake_type;
  110. /** When did we send the request to the cpuworker? */
  111. struct timeval started_at;
  112. /** Once the cpuworker received the request, how many microseconds did it
  113. * take? (This shouldn't overflow; 4 billion micoseconds is over an hour,
  114. * and we'll never have an onion handshake that takes so long.) */
  115. uint32_t n_usec;
  116. /** Output of processing a create cell
  117. *
  118. * @{
  119. */
  120. /** The created cell to send back. */
  121. created_cell_t created_cell;
  122. /** The keys to use on this circuit. */
  123. uint8_t keys[CPATH_KEY_MATERIAL_LEN];
  124. /** Input to use for authenticating introduce1 cells. */
  125. uint8_t rend_auth_material[DIGEST_LEN];
  126. } cpuworker_reply_t;
  127. /** Called when the onion key has changed and we need to spawn new
  128. * cpuworkers. Close all currently idle cpuworkers, and mark the last
  129. * rotation time as now.
  130. */
  131. void
  132. cpuworkers_rotate(void)
  133. {
  134. connection_t *cpuworker;
  135. while ((cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER,
  136. CPUWORKER_STATE_IDLE))) {
  137. connection_mark_for_close(cpuworker);
  138. --num_cpuworkers;
  139. }
  140. last_rotation_time = time(NULL);
  141. if (server_mode(get_options()))
  142. spawn_enough_cpuworkers();
  143. }
  144. /** If the cpuworker closes the connection,
  145. * mark it as closed and spawn a new one as needed. */
  146. int
  147. connection_cpu_reached_eof(connection_t *conn)
  148. {
  149. log_warn(LD_GENERAL,"Read eof. CPU worker died unexpectedly.");
  150. if (conn->state != CPUWORKER_STATE_IDLE) {
  151. /* the circ associated with this cpuworker will have to wait until
  152. * it gets culled in run_connection_housekeeping(), since we have
  153. * no way to find out which circ it was. */
  154. log_warn(LD_GENERAL,"...and it left a circuit queued; abandoning circ.");
  155. num_cpuworkers_busy--;
  156. }
  157. num_cpuworkers--;
  158. spawn_enough_cpuworkers(); /* try to regrow. hope we don't end up
  159. spinning. */
  160. connection_mark_for_close(conn);
  161. return 0;
  162. }
  163. /** Indexed by handshake type: how many onionskins have we processed and
  164. * counted of that type? */
  165. static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1];
  166. /** Indexed by handshake type, corresponding to the onionskins counted in
  167. * onionskins_n_processed: how many microseconds have we spent in cpuworkers
  168. * processing that kind of onionskin? */
  169. static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1];
  170. /** Indexed by handshake type, corresponding to onionskins counted in
  171. * onionskins_n_processed: how many microseconds have we spent waiting for
  172. * cpuworkers to give us answers for that kind of onionskin?
  173. */
  174. static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1];
  175. /** If any onionskin takes longer than this, we clip them to this
  176. * time. (microseconds) */
  177. #define MAX_BELIEVABLE_ONIONSKIN_DELAY (2*1000*1000)
  178. static tor_weak_rng_t request_sample_rng = TOR_WEAK_RNG_INIT;
  179. /** Return true iff we'd like to measure a handshake of type
  180. * <b>onionskin_type</b>. Call only from the main thread. */
  181. static int
  182. should_time_request(uint16_t onionskin_type)
  183. {
  184. /* If we've never heard of this type, we shouldn't even be here. */
  185. if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE)
  186. return 0;
  187. /* Measure the first N handshakes of each type, to ensure we have a
  188. * sample */
  189. if (onionskins_n_processed[onionskin_type] < 4096)
  190. return 1;
  191. /** Otherwise, measure with P=1/128. We avoid doing this for every
  192. * handshake, since the measurement itself can take a little time. */
  193. return tor_weak_random_one_in_n(&request_sample_rng, 128);
  194. }
  195. /** Return an estimate of how many microseconds we will need for a single
  196. * cpuworker to to process <b>n_requests</b> onionskins of type
  197. * <b>onionskin_type</b>. */
  198. uint64_t
  199. estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
  200. {
  201. if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
  202. return 1000 * (uint64_t)n_requests;
  203. if (PREDICT_UNLIKELY(onionskins_n_processed[onionskin_type] < 100)) {
  204. /* Until we have 100 data points, just asssume everything takes 1 msec. */
  205. return 1000 * (uint64_t)n_requests;
  206. } else {
  207. /* This can't overflow: we'll never have more than 500000 onionskins
  208. * measured in onionskin_usec_internal, and they won't take anything near
  209. * 1 sec each, and we won't have anything like 1 million queued
  210. * onionskins. But that's 5e5 * 1e6 * 1e6, which is still less than
  211. * UINT64_MAX. */
  212. return (onionskins_usec_internal[onionskin_type] * n_requests) /
  213. onionskins_n_processed[onionskin_type];
  214. }
  215. }
  216. /** Compute the absolute and relative overhead of using the cpuworker
  217. * framework for onionskins of type <b>onionskin_type</b>.*/
  218. static int
  219. get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out,
  220. uint16_t onionskin_type)
  221. {
  222. uint64_t overhead;
  223. *usec_out = 0;
  224. *frac_out = 0.0;
  225. if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
  226. return -1;
  227. if (onionskins_n_processed[onionskin_type] == 0 ||
  228. onionskins_usec_internal[onionskin_type] == 0 ||
  229. onionskins_usec_roundtrip[onionskin_type] == 0)
  230. return -1;
  231. overhead = onionskins_usec_roundtrip[onionskin_type] -
  232. onionskins_usec_internal[onionskin_type];
  233. *usec_out = (uint32_t)(overhead / onionskins_n_processed[onionskin_type]);
  234. *frac_out = U64_TO_DBL(overhead) / onionskins_usec_internal[onionskin_type];
  235. return 0;
  236. }
  237. /** If we've measured overhead for onionskins of type <b>onionskin_type</b>,
  238. * log it. */
  239. void
  240. cpuworker_log_onionskin_overhead(int severity, int onionskin_type,
  241. const char *onionskin_type_name)
  242. {
  243. uint32_t overhead;
  244. double relative_overhead;
  245. int r;
  246. r = get_overhead_for_onionskins(&overhead, &relative_overhead,
  247. onionskin_type);
  248. if (!overhead || r<0)
  249. return;
  250. log_fn(severity, LD_OR,
  251. "%s onionskins have averaged %u usec overhead (%.2f%%) in "
  252. "cpuworker code ",
  253. onionskin_type_name, (unsigned)overhead, relative_overhead*100);
  254. }
  255. /** Called when we get data from a cpuworker. If the answer is not complete,
  256. * wait for a complete answer. If the answer is complete,
  257. * process it as appropriate.
  258. */
  259. int
  260. connection_cpu_process_inbuf(connection_t *conn)
  261. {
  262. uint64_t chan_id;
  263. circid_t circ_id;
  264. channel_t *p_chan = NULL;
  265. circuit_t *circ;
  266. tor_assert(conn);
  267. tor_assert(conn->type == CONN_TYPE_CPUWORKER);
  268. if (!connection_get_inbuf_len(conn))
  269. return 0;
  270. if (conn->state == CPUWORKER_STATE_BUSY_ONION) {
  271. cpuworker_reply_t rpl;
  272. if (connection_get_inbuf_len(conn) < sizeof(cpuworker_reply_t))
  273. return 0; /* not yet */
  274. tor_assert(connection_get_inbuf_len(conn) == sizeof(cpuworker_reply_t));
  275. connection_fetch_from_buf((void*)&rpl,sizeof(cpuworker_reply_t),conn);
  276. tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC);
  277. if (rpl.timed && rpl.success &&
  278. rpl.handshake_type <= MAX_ONION_HANDSHAKE_TYPE) {
  279. /* Time how long this request took. The handshake_type check should be
  280. needless, but let's leave it in to be safe. */
  281. struct timeval tv_end, tv_diff;
  282. int64_t usec_roundtrip;
  283. tor_gettimeofday(&tv_end);
  284. timersub(&tv_end, &rpl.started_at, &tv_diff);
  285. usec_roundtrip = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
  286. if (usec_roundtrip >= 0 &&
  287. usec_roundtrip < MAX_BELIEVABLE_ONIONSKIN_DELAY) {
  288. ++onionskins_n_processed[rpl.handshake_type];
  289. onionskins_usec_internal[rpl.handshake_type] += rpl.n_usec;
  290. onionskins_usec_roundtrip[rpl.handshake_type] += usec_roundtrip;
  291. if (onionskins_n_processed[rpl.handshake_type] >= 500000) {
  292. /* Scale down every 500000 handshakes. On a busy server, that's
  293. * less impressive than it sounds. */
  294. onionskins_n_processed[rpl.handshake_type] /= 2;
  295. onionskins_usec_internal[rpl.handshake_type] /= 2;
  296. onionskins_usec_roundtrip[rpl.handshake_type] /= 2;
  297. }
  298. }
  299. }
  300. /* parse out the circ it was talking about */
  301. tag_unpack(rpl.tag, &chan_id, &circ_id);
  302. circ = NULL;
  303. log_debug(LD_OR,
  304. "Unpacking cpuworker reply, chan_id is " U64_FORMAT
  305. ", circ_id is %u",
  306. U64_PRINTF_ARG(chan_id), (unsigned)circ_id);
  307. p_chan = channel_find_by_global_id(chan_id);
  308. if (p_chan)
  309. circ = circuit_get_by_circid_channel(circ_id, p_chan);
  310. if (rpl.success == 0) {
  311. log_debug(LD_OR,
  312. "decoding onionskin failed. "
  313. "(Old key or bad software.) Closing.");
  314. if (circ)
  315. circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
  316. goto done_processing;
  317. }
  318. if (!circ) {
  319. /* This happens because somebody sends us a destroy cell and the
  320. * circuit goes away, while the cpuworker is working. This is also
  321. * why our tag doesn't include a pointer to the circ, because we'd
  322. * never know if it's still valid.
  323. */
  324. log_debug(LD_OR,"processed onion for a circ that's gone. Dropping.");
  325. goto done_processing;
  326. }
  327. tor_assert(! CIRCUIT_IS_ORIGIN(circ));
  328. if (onionskin_answer(TO_OR_CIRCUIT(circ),
  329. &rpl.created_cell,
  330. (const char*)rpl.keys,
  331. rpl.rend_auth_material) < 0) {
  332. log_warn(LD_OR,"onionskin_answer failed. Closing.");
  333. circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
  334. goto done_processing;
  335. }
  336. log_debug(LD_OR,"onionskin_answer succeeded. Yay.");
  337. } else {
  338. tor_assert(0); /* don't ask me to do handshakes yet */
  339. }
  340. done_processing:
  341. conn->state = CPUWORKER_STATE_IDLE;
  342. num_cpuworkers_busy--;
  343. if (conn->timestamp_created < last_rotation_time) {
  344. connection_mark_for_close(conn);
  345. num_cpuworkers--;
  346. spawn_enough_cpuworkers();
  347. } else {
  348. process_pending_task(conn);
  349. }
  350. return 0;
  351. }
  352. /** Implement a cpuworker. 'data' is an fdarray as returned by socketpair.
  353. * Read and writes from fdarray[1]. Reads requests, writes answers.
  354. *
  355. * Request format:
  356. * cpuworker_request_t.
  357. * Response format:
  358. * cpuworker_reply_t
  359. */
  360. static void
  361. cpuworker_main(void *data)
  362. {
  363. /* For talking to the parent thread/process */
  364. tor_socket_t *fdarray = data;
  365. tor_socket_t fd;
  366. /* variables for onion processing */
  367. server_onion_keys_t onion_keys;
  368. cpuworker_request_t req;
  369. cpuworker_reply_t rpl;
  370. fd = fdarray[1]; /* this side is ours */
  371. tor_free(data);
  372. setup_server_onion_keys(&onion_keys);
  373. for (;;) {
  374. if (read_all(fd, (void *)&req, sizeof(req), 1) != sizeof(req)) {
  375. log_info(LD_OR, "read request failed. Exiting.");
  376. goto end;
  377. }
  378. tor_assert(req.magic == CPUWORKER_REQUEST_MAGIC);
  379. memset(&rpl, 0, sizeof(rpl));
  380. if (req.task == CPUWORKER_TASK_ONION) {
  381. const create_cell_t *cc = &req.create_cell;
  382. created_cell_t *cell_out = &rpl.created_cell;
  383. struct timeval tv_start = {0,0}, tv_end;
  384. int n;
  385. rpl.timed = req.timed;
  386. rpl.started_at = req.started_at;
  387. rpl.handshake_type = cc->handshake_type;
  388. if (req.timed)
  389. tor_gettimeofday(&tv_start);
  390. n = onion_skin_server_handshake(cc->handshake_type,
  391. cc->onionskin, cc->handshake_len,
  392. &onion_keys,
  393. cell_out->reply,
  394. rpl.keys, CPATH_KEY_MATERIAL_LEN,
  395. rpl.rend_auth_material);
  396. if (n < 0) {
  397. /* failure */
  398. log_debug(LD_OR,"onion_skin_server_handshake failed.");
  399. memset(&rpl, 0, sizeof(rpl));
  400. memcpy(rpl.tag, req.tag, TAG_LEN);
  401. rpl.success = 0;
  402. } else {
  403. /* success */
  404. log_debug(LD_OR,"onion_skin_server_handshake succeeded.");
  405. memcpy(rpl.tag, req.tag, TAG_LEN);
  406. cell_out->handshake_len = n;
  407. switch (cc->cell_type) {
  408. case CELL_CREATE:
  409. cell_out->cell_type = CELL_CREATED; break;
  410. case CELL_CREATE2:
  411. cell_out->cell_type = CELL_CREATED2; break;
  412. case CELL_CREATE_FAST:
  413. cell_out->cell_type = CELL_CREATED_FAST; break;
  414. default:
  415. tor_assert(0);
  416. goto end;
  417. }
  418. rpl.success = 1;
  419. }
  420. rpl.magic = CPUWORKER_REPLY_MAGIC;
  421. if (req.timed) {
  422. struct timeval tv_diff;
  423. int64_t usec;
  424. tor_gettimeofday(&tv_end);
  425. timersub(&tv_end, &tv_start, &tv_diff);
  426. usec = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
  427. if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY)
  428. rpl.n_usec = MAX_BELIEVABLE_ONIONSKIN_DELAY;
  429. else
  430. rpl.n_usec = (uint32_t) usec;
  431. }
  432. if (write_all(fd, (void*)&rpl, sizeof(rpl), 1) != sizeof(rpl)) {
  433. log_err(LD_BUG,"writing response buf failed. Exiting.");
  434. goto end;
  435. }
  436. log_debug(LD_OR,"finished writing response.");
  437. } else if (req.task == CPUWORKER_TASK_SHUTDOWN) {
  438. log_info(LD_OR,"Clean shutdown: exiting");
  439. goto end;
  440. }
  441. memwipe(&req, 0, sizeof(req));
  442. memwipe(&rpl, 0, sizeof(req));
  443. }
  444. end:
  445. memwipe(&req, 0, sizeof(req));
  446. memwipe(&rpl, 0, sizeof(req));
  447. release_server_onion_keys(&onion_keys);
  448. tor_close_socket(fd);
  449. crypto_thread_cleanup();
  450. spawn_exit();
  451. }
  452. /** Launch a new cpuworker. Return 0 if we're happy, -1 if we failed.
  453. */
  454. static int
  455. spawn_cpuworker(void)
  456. {
  457. tor_socket_t *fdarray;
  458. tor_socket_t fd;
  459. connection_t *conn;
  460. int err;
  461. fdarray = tor_calloc(sizeof(tor_socket_t), 2);
  462. if ((err = tor_socketpair(AF_UNIX, SOCK_STREAM, 0, fdarray)) < 0) {
  463. log_warn(LD_NET, "Couldn't construct socketpair for cpuworker: %s",
  464. tor_socket_strerror(-err));
  465. tor_free(fdarray);
  466. return -1;
  467. }
  468. tor_assert(SOCKET_OK(fdarray[0]));
  469. tor_assert(SOCKET_OK(fdarray[1]));
  470. fd = fdarray[0];
  471. if (spawn_func(cpuworker_main, (void*)fdarray) < 0) {
  472. tor_close_socket(fdarray[0]);
  473. tor_close_socket(fdarray[1]);
  474. tor_free(fdarray);
  475. return -1;
  476. }
  477. log_debug(LD_OR,"just spawned a cpu worker.");
  478. conn = connection_new(CONN_TYPE_CPUWORKER, AF_UNIX);
  479. /* set up conn so it's got all the data we need to remember */
  480. conn->s = fd;
  481. conn->address = tor_strdup("localhost");
  482. tor_addr_make_unspec(&conn->addr);
  483. if (set_socket_nonblocking(fd) == -1) {
  484. connection_free(conn); /* this closes fd */
  485. return -1;
  486. }
  487. if (connection_add(conn) < 0) { /* no space, forget it */
  488. log_warn(LD_NET,"connection_add for cpuworker failed. Giving up.");
  489. connection_free(conn); /* this closes fd */
  490. return -1;
  491. }
  492. conn->state = CPUWORKER_STATE_IDLE;
  493. connection_start_reading(conn);
  494. return 0; /* success */
  495. }
  496. /** If we have too few or too many active cpuworkers, try to spawn new ones
  497. * or kill idle ones.
  498. */
  499. static void
  500. spawn_enough_cpuworkers(void)
  501. {
  502. int num_cpuworkers_needed = get_num_cpus(get_options());
  503. int reseed = 0;
  504. if (num_cpuworkers_needed < MIN_CPUWORKERS)
  505. num_cpuworkers_needed = MIN_CPUWORKERS;
  506. if (num_cpuworkers_needed > MAX_CPUWORKERS)
  507. num_cpuworkers_needed = MAX_CPUWORKERS;
  508. while (num_cpuworkers < num_cpuworkers_needed) {
  509. if (spawn_cpuworker() < 0) {
  510. log_warn(LD_GENERAL,"Cpuworker spawn failed. Will try again later.");
  511. return;
  512. }
  513. num_cpuworkers++;
  514. reseed++;
  515. }
  516. if (reseed)
  517. crypto_seed_weak_rng(&request_sample_rng);
  518. }
  519. /** Take a pending task from the queue and assign it to 'cpuworker'. */
  520. static void
  521. process_pending_task(connection_t *cpuworker)
  522. {
  523. or_circuit_t *circ;
  524. create_cell_t *onionskin = NULL;
  525. tor_assert(cpuworker);
  526. /* for now only process onion tasks */
  527. circ = onion_next_task(&onionskin);
  528. if (!circ)
  529. return;
  530. if (assign_onionskin_to_cpuworker(cpuworker, circ, onionskin))
  531. log_warn(LD_OR,"assign_to_cpuworker failed. Ignoring.");
  532. }
  533. /** How long should we let a cpuworker stay busy before we give
  534. * up on it and decide that we have a bug or infinite loop?
  535. * This value is high because some servers with low memory/cpu
  536. * sometimes spend an hour or more swapping, and Tor starves. */
  537. #define CPUWORKER_BUSY_TIMEOUT (60*60*12)
  538. /** We have a bug that I can't find. Sometimes, very rarely, cpuworkers get
  539. * stuck in the 'busy' state, even though the cpuworker process thinks of
  540. * itself as idle. I don't know why. But here's a workaround to kill any
  541. * cpuworker that's been busy for more than CPUWORKER_BUSY_TIMEOUT.
  542. */
  543. static void
  544. cull_wedged_cpuworkers(void)
  545. {
  546. time_t now = time(NULL);
  547. smartlist_t *conns = get_connection_array();
  548. SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
  549. if (!conn->marked_for_close &&
  550. conn->type == CONN_TYPE_CPUWORKER &&
  551. conn->state == CPUWORKER_STATE_BUSY_ONION &&
  552. conn->timestamp_lastwritten + CPUWORKER_BUSY_TIMEOUT < now) {
  553. log_notice(LD_BUG,
  554. "closing wedged cpuworker. Can somebody find the bug?");
  555. num_cpuworkers_busy--;
  556. num_cpuworkers--;
  557. connection_mark_for_close(conn);
  558. }
  559. } SMARTLIST_FOREACH_END(conn);
  560. }
  561. /** Try to tell a cpuworker to perform the public key operations necessary to
  562. * respond to <b>onionskin</b> for the circuit <b>circ</b>.
  563. *
  564. * If <b>cpuworker</b> is defined, assert that he's idle, and use him. Else,
  565. * look for an idle cpuworker and use him. If none idle, queue task onto the
  566. * pending onion list and return. Return 0 if we successfully assign the
  567. * task, or -1 on failure.
  568. */
  569. int
  570. assign_onionskin_to_cpuworker(connection_t *cpuworker,
  571. or_circuit_t *circ,
  572. create_cell_t *onionskin)
  573. {
  574. cpuworker_request_t req;
  575. time_t now = approx_time();
  576. static time_t last_culled_cpuworkers = 0;
  577. int should_time;
  578. /* Checking for wedged cpuworkers requires a linear search over all
  579. * connections, so let's do it only once a minute.
  580. */
  581. #define CULL_CPUWORKERS_INTERVAL 60
  582. if (last_culled_cpuworkers + CULL_CPUWORKERS_INTERVAL <= now) {
  583. cull_wedged_cpuworkers();
  584. spawn_enough_cpuworkers();
  585. last_culled_cpuworkers = now;
  586. }
  587. if (1) {
  588. if (num_cpuworkers_busy == num_cpuworkers) {
  589. log_debug(LD_OR,"No idle cpuworkers. Queuing.");
  590. if (onion_pending_add(circ, onionskin) < 0) {
  591. tor_free(onionskin);
  592. return -1;
  593. }
  594. return 0;
  595. }
  596. if (!cpuworker)
  597. cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER,
  598. CPUWORKER_STATE_IDLE);
  599. tor_assert(cpuworker);
  600. if (!circ->p_chan) {
  601. log_info(LD_OR,"circ->p_chan gone. Failing circ.");
  602. tor_free(onionskin);
  603. return -1;
  604. }
  605. if (connection_or_digest_is_known_relay(circ->p_chan->identity_digest))
  606. rep_hist_note_circuit_handshake_assigned(onionskin->handshake_type);
  607. should_time = should_time_request(onionskin->handshake_type);
  608. memset(&req, 0, sizeof(req));
  609. req.magic = CPUWORKER_REQUEST_MAGIC;
  610. tag_pack(req.tag, circ->p_chan->global_identifier,
  611. circ->p_circ_id);
  612. req.timed = should_time;
  613. cpuworker->state = CPUWORKER_STATE_BUSY_ONION;
  614. /* touch the lastwritten timestamp, since that's how we check to
  615. * see how long it's been since we asked the question, and sometimes
  616. * we check before the first call to connection_handle_write(). */
  617. cpuworker->timestamp_lastwritten = now;
  618. num_cpuworkers_busy++;
  619. req.task = CPUWORKER_TASK_ONION;
  620. memcpy(&req.create_cell, onionskin, sizeof(create_cell_t));
  621. tor_free(onionskin);
  622. if (should_time)
  623. tor_gettimeofday(&req.started_at);
  624. connection_write_to_buf((void*)&req, sizeof(req), cpuworker);
  625. memwipe(&req, 0, sizeof(req));
  626. }
  627. return 0;
  628. }