cpuworker.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705
  1. /* Copyright (c) 2003-2004, Roger Dingledine.
  2. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  3. * Copyright (c) 2007-2012, The Tor Project, Inc. */
  4. /* See LICENSE for licensing information */
  5. /**
  6. * \file cpuworker.c
  7. * \brief Implements a farm of 'CPU worker' processes to perform
  8. * CPU-intensive tasks in another thread or process, to not
  9. * interrupt the main thread.
  10. *
  11. * Right now, we only use this for processing onionskins.
  12. **/
  13. #include "or.h"
  14. #include "buffers.h"
  15. #include "channel.h"
  16. #include "channeltls.h"
  17. #include "circuitbuild.h"
  18. #include "circuitlist.h"
  19. #include "config.h"
  20. #include "connection.h"
  21. #include "cpuworker.h"
  22. #include "main.h"
  23. #include "onion.h"
  24. #include "router.h"
  25. /** The maximum number of cpuworker processes we will keep around. */
  26. #define MAX_CPUWORKERS 16
  27. /** The minimum number of cpuworker processes we will keep around. */
  28. #define MIN_CPUWORKERS 1
  29. /** The tag specifies which circuit this onionskin was from. */
  30. #define TAG_LEN 10
  31. /** How many cpuworkers we have running right now. */
  32. static int num_cpuworkers=0;
  33. /** How many of the running cpuworkers have an assigned task right now. */
  34. static int num_cpuworkers_busy=0;
  35. /** We need to spawn new cpuworkers whenever we rotate the onion keys
  36. * on platforms where execution contexts==processes. This variable stores
  37. * the last time we got a key rotation event. */
  38. static time_t last_rotation_time=0;
  39. static void cpuworker_main(void *data) ATTR_NORETURN;
  40. static int spawn_cpuworker(void);
  41. static void spawn_enough_cpuworkers(void);
  42. static void process_pending_task(connection_t *cpuworker);
  43. /** Initialize the cpuworker subsystem.
  44. */
  45. void
  46. cpu_init(void)
  47. {
  48. cpuworkers_rotate();
  49. }
  50. /** Called when we're done sending a request to a cpuworker. */
  51. int
  52. connection_cpu_finished_flushing(connection_t *conn)
  53. {
  54. tor_assert(conn);
  55. tor_assert(conn->type == CONN_TYPE_CPUWORKER);
  56. return 0;
  57. }
  58. /** Pack global_id and circ_id; set *tag to the result. (See note on
  59. * cpuworker_main for wire format.) */
  60. static void
  61. tag_pack(uint8_t *tag, uint64_t chan_id, circid_t circ_id)
  62. {
  63. /*XXXX RETHINK THIS WHOLE MESS !!!! !NM NM NM NM*/
  64. /*XXXX DOUBLEPLUSTHIS!!!! AS AS AS AS*/
  65. set_uint64(tag, chan_id);
  66. set_uint16(tag+8, circ_id);
  67. }
  68. /** Unpack <b>tag</b> into addr, port, and circ_id.
  69. */
  70. static void
  71. tag_unpack(const uint8_t *tag, uint64_t *chan_id, circid_t *circ_id)
  72. {
  73. *chan_id = get_uint64(tag);
  74. *circ_id = get_uint16(tag+8);
  75. }
  76. /** Magic numbers to make sure our cpuworker_requests don't grow any
  77. * mis-framing bugs. */
  78. #define CPUWORKER_REQUEST_MAGIC 0xda4afeed
  79. #define CPUWORKER_REPLY_MAGIC 0x5eedf00d
  80. /** A request sent to a cpuworker. */
  81. typedef struct cpuworker_request_t {
  82. /** Magic number; must be CPUWORKER_REQUEST_MAGIC. */
  83. uint32_t magic;
  84. /** Opaque tag to identify the job */
  85. uint8_t tag[TAG_LEN];
  86. /** Task code. Must be one of CPUWORKER_TASK_* */
  87. uint8_t task;
  88. /** Flag: Are we timing this request? */
  89. unsigned timed : 1;
  90. /** If we're timing this request, when was it sent to the cpuworker? */
  91. struct timeval started_at;
  92. /** A create cell for the cpuworker to process. */
  93. create_cell_t create_cell;
  94. /* Turn the above into a tagged union if needed. */
  95. } cpuworker_request_t;
  96. /** A reply sent by a cpuworker. */
  97. typedef struct cpuworker_reply_t {
  98. /** Magic number; must be CPUWORKER_REPLY_MAGIC. */
  99. uint32_t magic;
  100. /** Opaque tag to identify the job; matches the request's tag.*/
  101. uint8_t tag[TAG_LEN];
  102. /** True iff we got a successful request. */
  103. uint8_t success;
  104. /** Are we timing this request? */
  105. unsigned int timed : 1;
  106. /** What handshake type was the request? (Used for timing) */
  107. uint16_t handshake_type;
  108. /** When did we send the request to the cpuworker? */
  109. struct timeval started_at;
  110. /** Once the cpuworker received the request, how many microseconds did it
  111. * take? (This shouldn't overflow; 4 billion micoseconds is over an hour,
  112. * and we'll never have an onion handshake that takes so long.) */
  113. uint32_t n_usec;
  114. /** Output of processing a create cell
  115. *
  116. * @{
  117. */
  118. /** The created cell to send back. */
  119. created_cell_t created_cell;
  120. /** The keys to use on this circuit. */
  121. uint8_t keys[CPATH_KEY_MATERIAL_LEN];
  122. /** Input to use for authenticating introduce1 cells. */
  123. uint8_t rend_auth_material[DIGEST_LEN];
  124. } cpuworker_reply_t;
  125. /** Called when the onion key has changed and we need to spawn new
  126. * cpuworkers. Close all currently idle cpuworkers, and mark the last
  127. * rotation time as now.
  128. */
  129. void
  130. cpuworkers_rotate(void)
  131. {
  132. connection_t *cpuworker;
  133. while ((cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER,
  134. CPUWORKER_STATE_IDLE))) {
  135. connection_mark_for_close(cpuworker);
  136. --num_cpuworkers;
  137. }
  138. last_rotation_time = time(NULL);
  139. if (server_mode(get_options()))
  140. spawn_enough_cpuworkers();
  141. }
  142. /** If the cpuworker closes the connection,
  143. * mark it as closed and spawn a new one as needed. */
  144. int
  145. connection_cpu_reached_eof(connection_t *conn)
  146. {
  147. log_warn(LD_GENERAL,"Read eof. CPU worker died unexpectedly.");
  148. if (conn->state != CPUWORKER_STATE_IDLE) {
  149. /* the circ associated with this cpuworker will have to wait until
  150. * it gets culled in run_connection_housekeeping(), since we have
  151. * no way to find out which circ it was. */
  152. log_warn(LD_GENERAL,"...and it left a circuit queued; abandoning circ.");
  153. num_cpuworkers_busy--;
  154. }
  155. num_cpuworkers--;
  156. spawn_enough_cpuworkers(); /* try to regrow. hope we don't end up
  157. spinning. */
  158. connection_mark_for_close(conn);
  159. return 0;
  160. }
  161. /** Indexed by handshake type: how many onionskins have we processed and
  162. * counted of that type? */
  163. static uint64_t onionskins_n_processed[MAX_ONION_HANDSHAKE_TYPE+1];
  164. /** Indexed by handshake type, corresponding to the onionskins counted in
  165. * onionskins_n_processed: how many microseconds have we spent in cpuworkers
  166. * processing that kind of onionskin? */
  167. static uint64_t onionskins_usec_internal[MAX_ONION_HANDSHAKE_TYPE+1];
  168. /** Indexed by handshake type, corresponding to onionskins counted in
  169. * onionskins_n_processed: how many microseconds have we spent waiting for
  170. * cpuworkers to give us answers for that kind of onionskin?
  171. */
  172. static uint64_t onionskins_usec_roundtrip[MAX_ONION_HANDSHAKE_TYPE+1];
  173. /** If any onionskin takes longer than this, we clip them to this
  174. * time. (microseconds) */
  175. #define MAX_BELIEVABLE_ONIONSKIN_DELAY (2*1000*1000)
  176. /** Return true iff we'd like to measure a handshake of type
  177. * <b>onionskin_type</b>. */
  178. static int
  179. should_time_request(uint16_t onionskin_type)
  180. {
  181. /* If we've never heard of this type, we shouldn't even be here. */
  182. if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE)
  183. return 0;
  184. /* Measure the first N handshakes of each type, to ensure we have a
  185. * sample */
  186. if (onionskins_n_processed[onionskin_type] < 4096)
  187. return 1;
  188. /** Otherwise, measure with P=1/128. We avoid doing this for every
  189. * handshake, since the measurement itself can take a little time. */
  190. return tor_weak_random() < (TOR_RAND_MAX/128);
  191. }
  192. /** Return an estimate of how many microseconds we will need for a single
  193. * cpuworker to to process <b>n_requests</b> onionskins of type
  194. * <b>onionskin_type</b>. */
  195. uint64_t
  196. estimated_usec_for_onionskins(uint32_t n_requests, uint16_t onionskin_type)
  197. {
  198. if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
  199. return 1000 * n_requests;
  200. if (PREDICT_UNLIKELY(onionskins_n_processed[onionskin_type] < 100)) {
  201. /* Until we have 100 data points, just asssume everything takes 1 msec. */
  202. return 1000 * n_requests;
  203. } else {
  204. /* This can't overflow: we'll never have more than 500000 onionskins
  205. * measured in onionskin_usec_internal, and they won't take anything near
  206. * 1 sec each, and we won't have anything like 1 million queued
  207. * onionskins. But that's 5e5 * 1e6 * 1e6, which is still less than
  208. * UINT64_MAX. */
  209. return (onionskins_usec_internal[onionskin_type] * n_requests) /
  210. onionskins_n_processed[onionskin_type];
  211. }
  212. }
  213. /** Compute the absolute and relative overhead of using the cpuworker
  214. * framework for onionskins of type <b>onionskin_type</b>.*/
  215. static int
  216. get_overhead_for_onionskins(uint32_t *usec_out, double *frac_out,
  217. uint16_t onionskin_type)
  218. {
  219. uint64_t overhead;
  220. *usec_out = 0;
  221. *frac_out = 0.0;
  222. if (onionskin_type > MAX_ONION_HANDSHAKE_TYPE) /* should be impossible */
  223. return -1;
  224. if (onionskins_n_processed[onionskin_type] == 0 ||
  225. onionskins_usec_internal[onionskin_type] == 0 ||
  226. onionskins_usec_roundtrip[onionskin_type] == 0)
  227. return -1;
  228. overhead = onionskins_usec_roundtrip[onionskin_type] -
  229. onionskins_usec_internal[onionskin_type];
  230. *usec_out = (uint32_t)(overhead / onionskins_n_processed[onionskin_type]);
  231. *frac_out = U64_TO_DBL(overhead) / onionskins_usec_internal[onionskin_type];
  232. return 0;
  233. }
  234. /** If we've measured overhead for onionskins of type <b>onionskin_type</b>,
  235. * log it. */
  236. void
  237. cpuworker_log_onionskin_overhead(int severity, int onionskin_type,
  238. const char *onionskin_type_name)
  239. {
  240. uint32_t overhead;
  241. double relative_overhead;
  242. int r;
  243. r = get_overhead_for_onionskins(&overhead, &relative_overhead,
  244. onionskin_type);
  245. if (!overhead || r<0)
  246. return;
  247. log_fn(severity, LD_OR,
  248. "%s onionskins have averaged %u usec overhead (%.2f%%) in "
  249. "cpuworker code ",
  250. onionskin_type_name, (unsigned)overhead, relative_overhead*100);
  251. }
  252. /** Called when we get data from a cpuworker. If the answer is not complete,
  253. * wait for a complete answer. If the answer is complete,
  254. * process it as appropriate.
  255. */
  256. int
  257. connection_cpu_process_inbuf(connection_t *conn)
  258. {
  259. uint64_t chan_id;
  260. circid_t circ_id;
  261. channel_t *p_chan = NULL;
  262. circuit_t *circ;
  263. tor_assert(conn);
  264. tor_assert(conn->type == CONN_TYPE_CPUWORKER);
  265. if (!connection_get_inbuf_len(conn))
  266. return 0;
  267. if (conn->state == CPUWORKER_STATE_BUSY_ONION) {
  268. cpuworker_reply_t rpl;
  269. if (connection_get_inbuf_len(conn) < sizeof(cpuworker_reply_t))
  270. return 0; /* not yet */
  271. tor_assert(connection_get_inbuf_len(conn) == sizeof(cpuworker_reply_t));
  272. connection_fetch_from_buf((void*)&rpl,sizeof(cpuworker_reply_t),conn);
  273. tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC);
  274. if (rpl.timed && rpl.success &&
  275. rpl.handshake_type <= MAX_ONION_HANDSHAKE_TYPE) {
  276. /* Time how long this request took. The handshake_type check should be
  277. needless, but let's leave it in to be safe. */
  278. struct timeval tv_end, tv_diff;
  279. int64_t usec_roundtrip;
  280. tor_gettimeofday(&tv_end);
  281. timersub(&tv_end, &rpl.started_at, &tv_diff);
  282. usec_roundtrip = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
  283. if (usec_roundtrip < 0 ||
  284. usec_roundtrip > MAX_BELIEVABLE_ONIONSKIN_DELAY) {
  285. usec_roundtrip = MAX_BELIEVABLE_ONIONSKIN_DELAY;
  286. }
  287. ++onionskins_n_processed[rpl.handshake_type];
  288. onionskins_usec_internal[rpl.handshake_type] += rpl.n_usec;
  289. onionskins_usec_roundtrip[rpl.handshake_type] += usec_roundtrip;
  290. if (onionskins_n_processed[rpl.handshake_type] >= 500000) {
  291. /* Scale down every 500000 handshakes. On a busy server, that's
  292. * less impressive than it sounds. */
  293. onionskins_n_processed[rpl.handshake_type] /= 2;
  294. onionskins_usec_internal[rpl.handshake_type] /= 2;
  295. onionskins_usec_roundtrip[rpl.handshake_type] /= 2;
  296. }
  297. }
  298. /* parse out the circ it was talking about */
  299. tag_unpack(rpl.tag, &chan_id, &circ_id);
  300. circ = NULL;
  301. log_debug(LD_OR,
  302. "Unpacking cpuworker reply, chan_id is " U64_FORMAT
  303. ", circ_id is %d",
  304. U64_PRINTF_ARG(chan_id), circ_id);
  305. p_chan = channel_find_by_global_id(chan_id);
  306. if (p_chan)
  307. circ = circuit_get_by_circid_channel(circ_id, p_chan);
  308. if (rpl.success == 0) {
  309. log_debug(LD_OR,
  310. "decoding onionskin failed. "
  311. "(Old key or bad software.) Closing.");
  312. if (circ)
  313. circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
  314. goto done_processing;
  315. }
  316. if (!circ) {
  317. /* This happens because somebody sends us a destroy cell and the
  318. * circuit goes away, while the cpuworker is working. This is also
  319. * why our tag doesn't include a pointer to the circ, because we'd
  320. * never know if it's still valid.
  321. */
  322. log_debug(LD_OR,"processed onion for a circ that's gone. Dropping.");
  323. goto done_processing;
  324. }
  325. tor_assert(! CIRCUIT_IS_ORIGIN(circ));
  326. if (onionskin_answer(TO_OR_CIRCUIT(circ),
  327. &rpl.created_cell,
  328. (const char*)rpl.keys,
  329. rpl.rend_auth_material) < 0) {
  330. log_warn(LD_OR,"onionskin_answer failed. Closing.");
  331. circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
  332. goto done_processing;
  333. }
  334. log_debug(LD_OR,"onionskin_answer succeeded. Yay.");
  335. } else {
  336. tor_assert(0); /* don't ask me to do handshakes yet */
  337. }
  338. done_processing:
  339. conn->state = CPUWORKER_STATE_IDLE;
  340. num_cpuworkers_busy--;
  341. if (conn->timestamp_created < last_rotation_time) {
  342. connection_mark_for_close(conn);
  343. num_cpuworkers--;
  344. spawn_enough_cpuworkers();
  345. } else {
  346. process_pending_task(conn);
  347. }
  348. return 0;
  349. }
  350. /** Implement a cpuworker. 'data' is an fdarray as returned by socketpair.
  351. * Read and writes from fdarray[1]. Reads requests, writes answers.
  352. *
  353. * Request format:
  354. * cpuworker_request_t.
  355. * Response format:
  356. * cpuworker_reply_t
  357. */
  358. static void
  359. cpuworker_main(void *data)
  360. {
  361. /* For talking to the parent thread/process */
  362. tor_socket_t *fdarray = data;
  363. tor_socket_t fd;
  364. /* variables for onion processing */
  365. server_onion_keys_t onion_keys;
  366. cpuworker_request_t req;
  367. cpuworker_reply_t rpl;
  368. fd = fdarray[1]; /* this side is ours */
  369. #ifndef TOR_IS_MULTITHREADED
  370. tor_close_socket(fdarray[0]); /* this is the side of the socketpair the
  371. * parent uses */
  372. tor_free_all(1); /* so the child doesn't hold the parent's fd's open */
  373. handle_signals(0); /* ignore interrupts from the keyboard, etc */
  374. #endif
  375. tor_free(data);
  376. setup_server_onion_keys(&onion_keys);
  377. for (;;) {
  378. if (read_all(fd, (void *)&req, sizeof(req), 1) != sizeof(req)) {
  379. log_info(LD_OR, "read request failed. Exiting.");
  380. goto end;
  381. }
  382. tor_assert(req.magic == CPUWORKER_REQUEST_MAGIC);
  383. memset(&rpl, 0, sizeof(rpl));
  384. if (req.task == CPUWORKER_TASK_ONION) {
  385. const create_cell_t *cc = &req.create_cell;
  386. created_cell_t *cell_out = &rpl.created_cell;
  387. struct timeval tv_start, tv_end;
  388. int n;
  389. rpl.timed = req.timed;
  390. rpl.started_at = req.started_at;
  391. rpl.handshake_type = cc->handshake_type;
  392. if (req.timed)
  393. tor_gettimeofday(&tv_start);
  394. n = onion_skin_server_handshake(cc->handshake_type,
  395. cc->onionskin, cc->handshake_len,
  396. &onion_keys,
  397. cell_out->reply,
  398. rpl.keys, CPATH_KEY_MATERIAL_LEN,
  399. rpl.rend_auth_material);
  400. if (n < 0) {
  401. /* failure */
  402. log_debug(LD_OR,"onion_skin_server_handshake failed.");
  403. memset(&rpl, 0, sizeof(rpl));
  404. memcpy(rpl.tag, req.tag, TAG_LEN);
  405. rpl.success = 0;
  406. } else {
  407. /* success */
  408. log_debug(LD_OR,"onion_skin_server_handshake succeeded.");
  409. memcpy(rpl.tag, req.tag, TAG_LEN);
  410. cell_out->handshake_len = n;
  411. switch (cc->cell_type) {
  412. case CELL_CREATE:
  413. cell_out->cell_type = CELL_CREATED; break;
  414. case CELL_CREATE2:
  415. cell_out->cell_type = CELL_CREATED2; break;
  416. case CELL_CREATE_FAST:
  417. cell_out->cell_type = CELL_CREATED_FAST; break;
  418. default:
  419. tor_assert(0);
  420. goto end;
  421. }
  422. rpl.success = 1;
  423. }
  424. rpl.magic = CPUWORKER_REPLY_MAGIC;
  425. if (req.timed) {
  426. struct timeval tv_diff;
  427. int64_t usec;
  428. tor_gettimeofday(&tv_end);
  429. timersub(&tv_end, &tv_start, &tv_diff);
  430. usec = ((int64_t)tv_diff.tv_sec)*1000000 + tv_diff.tv_usec;
  431. if (usec < 0 || usec > MAX_BELIEVABLE_ONIONSKIN_DELAY)
  432. rpl.n_usec = MAX_BELIEVABLE_ONIONSKIN_DELAY;
  433. else
  434. rpl.n_usec = (uint32_t) usec;
  435. }
  436. if (write_all(fd, (void*)&rpl, sizeof(rpl), 1) != sizeof(rpl)) {
  437. log_err(LD_BUG,"writing response buf failed. Exiting.");
  438. goto end;
  439. }
  440. log_debug(LD_OR,"finished writing response.");
  441. } else if (req.task == CPUWORKER_TASK_SHUTDOWN) {
  442. log_info(LD_OR,"Clean shutdown: exiting");
  443. goto end;
  444. }
  445. memwipe(&req, 0, sizeof(req));
  446. memwipe(&rpl, 0, sizeof(req));
  447. }
  448. end:
  449. memwipe(&req, 0, sizeof(req));
  450. memwipe(&rpl, 0, sizeof(req));
  451. release_server_onion_keys(&onion_keys);
  452. tor_close_socket(fd);
  453. crypto_thread_cleanup();
  454. spawn_exit();
  455. }
  456. /** Launch a new cpuworker. Return 0 if we're happy, -1 if we failed.
  457. */
  458. static int
  459. spawn_cpuworker(void)
  460. {
  461. tor_socket_t *fdarray;
  462. tor_socket_t fd;
  463. connection_t *conn;
  464. int err;
  465. fdarray = tor_malloc(sizeof(tor_socket_t)*2);
  466. if ((err = tor_socketpair(AF_UNIX, SOCK_STREAM, 0, fdarray)) < 0) {
  467. log_warn(LD_NET, "Couldn't construct socketpair for cpuworker: %s",
  468. tor_socket_strerror(-err));
  469. tor_free(fdarray);
  470. return -1;
  471. }
  472. tor_assert(SOCKET_OK(fdarray[0]));
  473. tor_assert(SOCKET_OK(fdarray[1]));
  474. fd = fdarray[0];
  475. spawn_func(cpuworker_main, (void*)fdarray);
  476. log_debug(LD_OR,"just spawned a cpu worker.");
  477. #ifndef TOR_IS_MULTITHREADED
  478. tor_close_socket(fdarray[1]); /* don't need the worker's side of the pipe */
  479. tor_free(fdarray);
  480. #endif
  481. conn = connection_new(CONN_TYPE_CPUWORKER, AF_UNIX);
  482. set_socket_nonblocking(fd);
  483. /* set up conn so it's got all the data we need to remember */
  484. conn->s = fd;
  485. conn->address = tor_strdup("localhost");
  486. tor_addr_make_unspec(&conn->addr);
  487. if (connection_add(conn) < 0) { /* no space, forget it */
  488. log_warn(LD_NET,"connection_add for cpuworker failed. Giving up.");
  489. connection_free(conn); /* this closes fd */
  490. return -1;
  491. }
  492. conn->state = CPUWORKER_STATE_IDLE;
  493. connection_start_reading(conn);
  494. return 0; /* success */
  495. }
  496. /** If we have too few or too many active cpuworkers, try to spawn new ones
  497. * or kill idle ones.
  498. */
  499. static void
  500. spawn_enough_cpuworkers(void)
  501. {
  502. int num_cpuworkers_needed = get_num_cpus(get_options());
  503. if (num_cpuworkers_needed < MIN_CPUWORKERS)
  504. num_cpuworkers_needed = MIN_CPUWORKERS;
  505. if (num_cpuworkers_needed > MAX_CPUWORKERS)
  506. num_cpuworkers_needed = MAX_CPUWORKERS;
  507. while (num_cpuworkers < num_cpuworkers_needed) {
  508. if (spawn_cpuworker() < 0) {
  509. log_warn(LD_GENERAL,"Cpuworker spawn failed. Will try again later.");
  510. return;
  511. }
  512. num_cpuworkers++;
  513. }
  514. }
  515. /** Take a pending task from the queue and assign it to 'cpuworker'. */
  516. static void
  517. process_pending_task(connection_t *cpuworker)
  518. {
  519. or_circuit_t *circ;
  520. create_cell_t *onionskin = NULL;
  521. tor_assert(cpuworker);
  522. /* for now only process onion tasks */
  523. circ = onion_next_task(&onionskin);
  524. if (!circ)
  525. return;
  526. if (assign_onionskin_to_cpuworker(cpuworker, circ, onionskin))
  527. log_warn(LD_OR,"assign_to_cpuworker failed. Ignoring.");
  528. }
  529. /** How long should we let a cpuworker stay busy before we give
  530. * up on it and decide that we have a bug or infinite loop?
  531. * This value is high because some servers with low memory/cpu
  532. * sometimes spend an hour or more swapping, and Tor starves. */
  533. #define CPUWORKER_BUSY_TIMEOUT (60*60*12)
  534. /** We have a bug that I can't find. Sometimes, very rarely, cpuworkers get
  535. * stuck in the 'busy' state, even though the cpuworker process thinks of
  536. * itself as idle. I don't know why. But here's a workaround to kill any
  537. * cpuworker that's been busy for more than CPUWORKER_BUSY_TIMEOUT.
  538. */
  539. static void
  540. cull_wedged_cpuworkers(void)
  541. {
  542. time_t now = time(NULL);
  543. smartlist_t *conns = get_connection_array();
  544. SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
  545. if (!conn->marked_for_close &&
  546. conn->type == CONN_TYPE_CPUWORKER &&
  547. conn->state == CPUWORKER_STATE_BUSY_ONION &&
  548. conn->timestamp_lastwritten + CPUWORKER_BUSY_TIMEOUT < now) {
  549. log_notice(LD_BUG,
  550. "closing wedged cpuworker. Can somebody find the bug?");
  551. num_cpuworkers_busy--;
  552. num_cpuworkers--;
  553. connection_mark_for_close(conn);
  554. }
  555. } SMARTLIST_FOREACH_END(conn);
  556. }
  557. /** Try to tell a cpuworker to perform the public key operations necessary to
  558. * respond to <b>onionskin</b> for the circuit <b>circ</b>.
  559. *
  560. * If <b>cpuworker</b> is defined, assert that he's idle, and use him. Else,
  561. * look for an idle cpuworker and use him. If none idle, queue task onto the
  562. * pending onion list and return. Return 0 if we successfully assign the
  563. * task, or -1 on failure.
  564. */
  565. int
  566. assign_onionskin_to_cpuworker(connection_t *cpuworker,
  567. or_circuit_t *circ,
  568. create_cell_t *onionskin)
  569. {
  570. cpuworker_request_t req;
  571. time_t now = approx_time();
  572. static time_t last_culled_cpuworkers = 0;
  573. int should_time;
  574. /* Checking for wedged cpuworkers requires a linear search over all
  575. * connections, so let's do it only once a minute.
  576. */
  577. #define CULL_CPUWORKERS_INTERVAL 60
  578. if (last_culled_cpuworkers + CULL_CPUWORKERS_INTERVAL <= now) {
  579. cull_wedged_cpuworkers();
  580. spawn_enough_cpuworkers();
  581. last_culled_cpuworkers = now;
  582. }
  583. if (1) {
  584. if (num_cpuworkers_busy == num_cpuworkers) {
  585. log_debug(LD_OR,"No idle cpuworkers. Queuing.");
  586. if (onion_pending_add(circ, onionskin) < 0) {
  587. tor_free(onionskin);
  588. return -1;
  589. }
  590. return 0;
  591. }
  592. if (!cpuworker)
  593. cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER,
  594. CPUWORKER_STATE_IDLE);
  595. tor_assert(cpuworker);
  596. if (!circ->p_chan) {
  597. log_info(LD_OR,"circ->p_chan gone. Failing circ.");
  598. tor_free(onionskin);
  599. return -1;
  600. }
  601. should_time = should_time_request(onionskin->handshake_type);
  602. memset(&req, 0, sizeof(req));
  603. req.magic = CPUWORKER_REQUEST_MAGIC;
  604. tag_pack(req.tag, circ->p_chan->global_identifier,
  605. circ->p_circ_id);
  606. req.timed = should_time;
  607. cpuworker->state = CPUWORKER_STATE_BUSY_ONION;
  608. /* touch the lastwritten timestamp, since that's how we check to
  609. * see how long it's been since we asked the question, and sometimes
  610. * we check before the first call to connection_handle_write(). */
  611. cpuworker->timestamp_lastwritten = now;
  612. num_cpuworkers_busy++;
  613. req.task = CPUWORKER_TASK_ONION;
  614. memcpy(&req.create_cell, onionskin, sizeof(create_cell_t));
  615. tor_free(onionskin);
  616. if (should_time)
  617. tor_gettimeofday(&req.started_at);
  618. connection_write_to_buf((void*)&req, sizeof(req), cpuworker);
  619. memwipe(&req, 0, sizeof(req));
  620. }
  621. return 0;
  622. }