cpuworker.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. /* Copyright (c) 2003-2004, Roger Dingledine.
  2. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  3. * Copyright (c) 2007-2012, The Tor Project, Inc. */
  4. /* See LICENSE for licensing information */
  5. /**
  6. * \file cpuworker.c
  7. * \brief Implements a farm of 'CPU worker' processes to perform
  8. * CPU-intensive tasks in another thread or process, to not
  9. * interrupt the main thread.
  10. *
  11. * Right now, we only use this for processing onionskins.
  12. **/
  13. #include "or.h"
  14. #include "buffers.h"
  15. #include "channel.h"
  16. #include "channeltls.h"
  17. #include "circuitbuild.h"
  18. #include "circuitlist.h"
  19. #include "config.h"
  20. #include "connection.h"
  21. #include "cpuworker.h"
  22. #include "main.h"
  23. #include "onion.h"
  24. #include "router.h"
  25. /** The maximum number of cpuworker processes we will keep around. */
  26. #define MAX_CPUWORKERS 16
  27. /** The minimum number of cpuworker processes we will keep around. */
  28. #define MIN_CPUWORKERS 1
  29. /** The tag specifies which circuit this onionskin was from. */
  30. #define TAG_LEN 10
  31. /** How many cpuworkers we have running right now. */
  32. static int num_cpuworkers=0;
  33. /** How many of the running cpuworkers have an assigned task right now. */
  34. static int num_cpuworkers_busy=0;
  35. /** We need to spawn new cpuworkers whenever we rotate the onion keys
  36. * on platforms where execution contexts==processes. This variable stores
  37. * the last time we got a key rotation event. */
  38. static time_t last_rotation_time=0;
  39. static void cpuworker_main(void *data) ATTR_NORETURN;
  40. static int spawn_cpuworker(void);
  41. static void spawn_enough_cpuworkers(void);
  42. static void process_pending_task(connection_t *cpuworker);
  43. /** Initialize the cpuworker subsystem.
  44. */
  45. void
  46. cpu_init(void)
  47. {
  48. cpuworkers_rotate();
  49. }
  50. /** Called when we're done sending a request to a cpuworker. */
  51. int
  52. connection_cpu_finished_flushing(connection_t *conn)
  53. {
  54. tor_assert(conn);
  55. tor_assert(conn->type == CONN_TYPE_CPUWORKER);
  56. return 0;
  57. }
  58. /** Pack global_id and circ_id; set *tag to the result. (See note on
  59. * cpuworker_main for wire format.) */
  60. static void
  61. tag_pack(uint8_t *tag, uint64_t chan_id, circid_t circ_id)
  62. {
  63. /*XXXX RETHINK THIS WHOLE MESS !!!! !NM NM NM NM*/
  64. /*XXXX DOUBLEPLUSTHIS!!!! AS AS AS AS*/
  65. set_uint64(tag, chan_id);
  66. set_uint16(tag+8, circ_id);
  67. }
  68. /** Unpack <b>tag</b> into addr, port, and circ_id.
  69. */
  70. static void
  71. tag_unpack(const uint8_t *tag, uint64_t *chan_id, circid_t *circ_id)
  72. {
  73. *chan_id = get_uint64(tag);
  74. *circ_id = get_uint16(tag+8);
  75. }
  76. /** DOCDOC */
  77. #define CPUWORKER_REQUEST_MAGIC 0xda4afeed
  78. #define CPUWORKER_REPLY_MAGIC 0x5eedf00d
  79. /**DOCDOC*/
  80. typedef struct cpuworker_request_t {
  81. uint32_t magic;
  82. /** Opaque tag to identify the job */
  83. uint8_t tag[TAG_LEN];
  84. uint8_t task;
  85. create_cell_t create_cell;
  86. /* Turn the above into a tagged union if needed. */
  87. } cpuworker_request_t;
  88. /**DOCDOC*/
  89. typedef struct cpuworker_reply_t {
  90. uint32_t magic;
  91. uint8_t tag[TAG_LEN];
  92. uint8_t success;
  93. created_cell_t created_cell;
  94. uint8_t keys[CPATH_KEY_MATERIAL_LEN];
  95. uint8_t rend_auth_material[DIGEST_LEN];
  96. } cpuworker_reply_t;
  97. /** Called when the onion key has changed and we need to spawn new
  98. * cpuworkers. Close all currently idle cpuworkers, and mark the last
  99. * rotation time as now.
  100. */
  101. void
  102. cpuworkers_rotate(void)
  103. {
  104. connection_t *cpuworker;
  105. while ((cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER,
  106. CPUWORKER_STATE_IDLE))) {
  107. connection_mark_for_close(cpuworker);
  108. --num_cpuworkers;
  109. }
  110. last_rotation_time = time(NULL);
  111. if (server_mode(get_options()))
  112. spawn_enough_cpuworkers();
  113. }
  114. /** If the cpuworker closes the connection,
  115. * mark it as closed and spawn a new one as needed. */
  116. int
  117. connection_cpu_reached_eof(connection_t *conn)
  118. {
  119. log_warn(LD_GENERAL,"Read eof. CPU worker died unexpectedly.");
  120. if (conn->state != CPUWORKER_STATE_IDLE) {
  121. /* the circ associated with this cpuworker will have to wait until
  122. * it gets culled in run_connection_housekeeping(), since we have
  123. * no way to find out which circ it was. */
  124. log_warn(LD_GENERAL,"...and it left a circuit queued; abandoning circ.");
  125. num_cpuworkers_busy--;
  126. }
  127. num_cpuworkers--;
  128. spawn_enough_cpuworkers(); /* try to regrow. hope we don't end up
  129. spinning. */
  130. connection_mark_for_close(conn);
  131. return 0;
  132. }
  133. /** Called when we get data from a cpuworker. If the answer is not complete,
  134. * wait for a complete answer. If the answer is complete,
  135. * process it as appropriate.
  136. */
  137. int
  138. connection_cpu_process_inbuf(connection_t *conn)
  139. {
  140. uint64_t chan_id;
  141. circid_t circ_id;
  142. channel_t *p_chan = NULL;
  143. circuit_t *circ;
  144. tor_assert(conn);
  145. tor_assert(conn->type == CONN_TYPE_CPUWORKER);
  146. if (!connection_get_inbuf_len(conn))
  147. return 0;
  148. if (conn->state == CPUWORKER_STATE_BUSY_ONION) {
  149. cpuworker_reply_t rpl;
  150. if (connection_get_inbuf_len(conn) < sizeof(cpuworker_reply_t))
  151. return 0; /* not yet */
  152. tor_assert(connection_get_inbuf_len(conn) == sizeof(cpuworker_reply_t));
  153. connection_fetch_from_buf((void*)&rpl,sizeof(cpuworker_reply_t),conn);
  154. tor_assert(rpl.magic == CPUWORKER_REPLY_MAGIC);
  155. /* parse out the circ it was talking about */
  156. tag_unpack(rpl.tag, &chan_id, &circ_id);
  157. circ = NULL;
  158. log_debug(LD_OR,
  159. "Unpacking cpuworker reply, chan_id is " U64_FORMAT
  160. ", circ_id is %d",
  161. U64_PRINTF_ARG(chan_id), circ_id);
  162. p_chan = channel_find_by_global_id(chan_id);
  163. if (p_chan)
  164. circ = circuit_get_by_circid_channel(circ_id, p_chan);
  165. if (rpl.success == 0) {
  166. log_debug(LD_OR,
  167. "decoding onionskin failed. "
  168. "(Old key or bad software.) Closing.");
  169. if (circ)
  170. circuit_mark_for_close(circ, END_CIRC_REASON_TORPROTOCOL);
  171. goto done_processing;
  172. }
  173. if (!circ) {
  174. /* This happens because somebody sends us a destroy cell and the
  175. * circuit goes away, while the cpuworker is working. This is also
  176. * why our tag doesn't include a pointer to the circ, because we'd
  177. * never know if it's still valid.
  178. */
  179. log_debug(LD_OR,"processed onion for a circ that's gone. Dropping.");
  180. goto done_processing;
  181. }
  182. tor_assert(! CIRCUIT_IS_ORIGIN(circ));
  183. if (onionskin_answer(TO_OR_CIRCUIT(circ),
  184. rpl.created_cell.cell_type,
  185. (const char*)rpl.created_cell.reply,
  186. rpl.created_cell.handshake_len,
  187. (const char*)rpl.keys,
  188. rpl.rend_auth_material) < 0) {
  189. log_warn(LD_OR,"onionskin_answer failed. Closing.");
  190. circuit_mark_for_close(circ, END_CIRC_REASON_INTERNAL);
  191. goto done_processing;
  192. }
  193. log_debug(LD_OR,"onionskin_answer succeeded. Yay.");
  194. } else {
  195. tor_assert(0); /* don't ask me to do handshakes yet */
  196. }
  197. done_processing:
  198. conn->state = CPUWORKER_STATE_IDLE;
  199. num_cpuworkers_busy--;
  200. if (conn->timestamp_created < last_rotation_time) {
  201. connection_mark_for_close(conn);
  202. num_cpuworkers--;
  203. spawn_enough_cpuworkers();
  204. } else {
  205. process_pending_task(conn);
  206. }
  207. return 0;
  208. }
  209. /** Implement a cpuworker. 'data' is an fdarray as returned by socketpair.
  210. * Read and writes from fdarray[1]. Reads requests, writes answers.
  211. *
  212. * Request format:
  213. * cpuworker_request_t.
  214. * Response format:
  215. * cpuworker_reply_t
  216. */
  217. static void
  218. cpuworker_main(void *data)
  219. {
  220. /* For talking to the parent thread/process */
  221. tor_socket_t *fdarray = data;
  222. tor_socket_t fd;
  223. /* variables for onion processing */
  224. server_onion_keys_t onion_keys;
  225. cpuworker_request_t req;
  226. cpuworker_reply_t rpl;
  227. fd = fdarray[1]; /* this side is ours */
  228. #ifndef TOR_IS_MULTITHREADED
  229. tor_close_socket(fdarray[0]); /* this is the side of the socketpair the
  230. * parent uses */
  231. tor_free_all(1); /* so the child doesn't hold the parent's fd's open */
  232. handle_signals(0); /* ignore interrupts from the keyboard, etc */
  233. #endif
  234. tor_free(data);
  235. setup_server_onion_keys(&onion_keys);
  236. for (;;) {
  237. if (read_all(fd, (void *)&req, sizeof(req), 1) != sizeof(req)) {
  238. log_info(LD_OR, "read request failed. Exiting.");
  239. goto end;
  240. }
  241. tor_assert(req.magic == CPUWORKER_REQUEST_MAGIC);
  242. memset(&rpl, 0, sizeof(rpl));
  243. if (req.task == CPUWORKER_TASK_ONION) {
  244. const create_cell_t *cc = &req.create_cell;
  245. created_cell_t *cell_out = &rpl.created_cell;
  246. int n;
  247. n = onion_skin_server_handshake(cc->handshake_type,
  248. cc->onionskin, cc->handshake_len,
  249. &onion_keys,
  250. cell_out->reply,
  251. rpl.keys, CPATH_KEY_MATERIAL_LEN,
  252. rpl.rend_auth_material);
  253. if (n < 0) {
  254. /* failure */
  255. log_debug(LD_OR,"onion_skin_server_handshake failed.");
  256. memset(&rpl, 0, sizeof(rpl));
  257. memcpy(rpl.tag, req.tag, TAG_LEN);
  258. rpl.success = 0;
  259. } else {
  260. /* success */
  261. log_debug(LD_OR,"onion_skin_server_handshake succeeded.");
  262. memcpy(rpl.tag, req.tag, TAG_LEN);
  263. cell_out->handshake_len = n;
  264. switch (cc->cell_type) {
  265. case CELL_CREATE:
  266. cell_out->cell_type = CELL_CREATED; break;
  267. case CELL_CREATE2:
  268. cell_out->cell_type = CELL_CREATED2; break;
  269. case CELL_CREATE_FAST:
  270. cell_out->cell_type = CELL_CREATED_FAST; break;
  271. default:
  272. tor_assert(0);
  273. goto end;
  274. }
  275. rpl.success = 1;
  276. }
  277. rpl.magic = CPUWORKER_REPLY_MAGIC;
  278. if (write_all(fd, (void*)&rpl, sizeof(rpl), 1) != sizeof(rpl)) {
  279. log_err(LD_BUG,"writing response buf failed. Exiting.");
  280. goto end;
  281. }
  282. log_debug(LD_OR,"finished writing response.");
  283. } else if (req.task == CPUWORKER_TASK_SHUTDOWN) {
  284. log_info(LD_OR,"Clean shutdown: exiting");
  285. goto end;
  286. }
  287. memwipe(&req, 0, sizeof(req));
  288. memwipe(&rpl, 0, sizeof(req));
  289. }
  290. end:
  291. memwipe(&req, 0, sizeof(req));
  292. memwipe(&rpl, 0, sizeof(req));
  293. release_server_onion_keys(&onion_keys);
  294. tor_close_socket(fd);
  295. crypto_thread_cleanup();
  296. spawn_exit();
  297. }
  298. /** Launch a new cpuworker. Return 0 if we're happy, -1 if we failed.
  299. */
  300. static int
  301. spawn_cpuworker(void)
  302. {
  303. tor_socket_t *fdarray;
  304. tor_socket_t fd;
  305. connection_t *conn;
  306. int err;
  307. fdarray = tor_malloc(sizeof(tor_socket_t)*2);
  308. if ((err = tor_socketpair(AF_UNIX, SOCK_STREAM, 0, fdarray)) < 0) {
  309. log_warn(LD_NET, "Couldn't construct socketpair for cpuworker: %s",
  310. tor_socket_strerror(-err));
  311. tor_free(fdarray);
  312. return -1;
  313. }
  314. tor_assert(SOCKET_OK(fdarray[0]));
  315. tor_assert(SOCKET_OK(fdarray[1]));
  316. fd = fdarray[0];
  317. spawn_func(cpuworker_main, (void*)fdarray);
  318. log_debug(LD_OR,"just spawned a cpu worker.");
  319. #ifndef TOR_IS_MULTITHREADED
  320. tor_close_socket(fdarray[1]); /* don't need the worker's side of the pipe */
  321. tor_free(fdarray);
  322. #endif
  323. conn = connection_new(CONN_TYPE_CPUWORKER, AF_UNIX);
  324. set_socket_nonblocking(fd);
  325. /* set up conn so it's got all the data we need to remember */
  326. conn->s = fd;
  327. conn->address = tor_strdup("localhost");
  328. tor_addr_make_unspec(&conn->addr);
  329. if (connection_add(conn) < 0) { /* no space, forget it */
  330. log_warn(LD_NET,"connection_add for cpuworker failed. Giving up.");
  331. connection_free(conn); /* this closes fd */
  332. return -1;
  333. }
  334. conn->state = CPUWORKER_STATE_IDLE;
  335. connection_start_reading(conn);
  336. return 0; /* success */
  337. }
  338. /** If we have too few or too many active cpuworkers, try to spawn new ones
  339. * or kill idle ones.
  340. */
  341. static void
  342. spawn_enough_cpuworkers(void)
  343. {
  344. int num_cpuworkers_needed = get_num_cpus(get_options());
  345. if (num_cpuworkers_needed < MIN_CPUWORKERS)
  346. num_cpuworkers_needed = MIN_CPUWORKERS;
  347. if (num_cpuworkers_needed > MAX_CPUWORKERS)
  348. num_cpuworkers_needed = MAX_CPUWORKERS;
  349. while (num_cpuworkers < num_cpuworkers_needed) {
  350. if (spawn_cpuworker() < 0) {
  351. log_warn(LD_GENERAL,"Cpuworker spawn failed. Will try again later.");
  352. return;
  353. }
  354. num_cpuworkers++;
  355. }
  356. }
  357. /** Take a pending task from the queue and assign it to 'cpuworker'. */
  358. static void
  359. process_pending_task(connection_t *cpuworker)
  360. {
  361. or_circuit_t *circ;
  362. create_cell_t *onionskin = NULL;
  363. tor_assert(cpuworker);
  364. /* for now only process onion tasks */
  365. circ = onion_next_task(&onionskin);
  366. if (!circ)
  367. return;
  368. if (assign_onionskin_to_cpuworker(cpuworker, circ, onionskin))
  369. log_warn(LD_OR,"assign_to_cpuworker failed. Ignoring.");
  370. }
  371. /** How long should we let a cpuworker stay busy before we give
  372. * up on it and decide that we have a bug or infinite loop?
  373. * This value is high because some servers with low memory/cpu
  374. * sometimes spend an hour or more swapping, and Tor starves. */
  375. #define CPUWORKER_BUSY_TIMEOUT (60*60*12)
  376. /** We have a bug that I can't find. Sometimes, very rarely, cpuworkers get
  377. * stuck in the 'busy' state, even though the cpuworker process thinks of
  378. * itself as idle. I don't know why. But here's a workaround to kill any
  379. * cpuworker that's been busy for more than CPUWORKER_BUSY_TIMEOUT.
  380. */
  381. static void
  382. cull_wedged_cpuworkers(void)
  383. {
  384. time_t now = time(NULL);
  385. smartlist_t *conns = get_connection_array();
  386. SMARTLIST_FOREACH_BEGIN(conns, connection_t *, conn) {
  387. if (!conn->marked_for_close &&
  388. conn->type == CONN_TYPE_CPUWORKER &&
  389. conn->state == CPUWORKER_STATE_BUSY_ONION &&
  390. conn->timestamp_lastwritten + CPUWORKER_BUSY_TIMEOUT < now) {
  391. log_notice(LD_BUG,
  392. "closing wedged cpuworker. Can somebody find the bug?");
  393. num_cpuworkers_busy--;
  394. num_cpuworkers--;
  395. connection_mark_for_close(conn);
  396. }
  397. } SMARTLIST_FOREACH_END(conn);
  398. }
  399. /** Try to tell a cpuworker to perform the public key operations necessary to
  400. * respond to <b>onionskin</b> for the circuit <b>circ</b>.
  401. *
  402. * If <b>cpuworker</b> is defined, assert that he's idle, and use him. Else,
  403. * look for an idle cpuworker and use him. If none idle, queue task onto the
  404. * pending onion list and return. Return 0 if we successfully assign the
  405. * task, or -1 on failure.
  406. */
  407. int
  408. assign_onionskin_to_cpuworker(connection_t *cpuworker,
  409. or_circuit_t *circ,
  410. create_cell_t *onionskin)
  411. {
  412. cpuworker_request_t req;
  413. time_t now = approx_time();
  414. static time_t last_culled_cpuworkers = 0;
  415. /* Checking for wedged cpuworkers requires a linear search over all
  416. * connections, so let's do it only once a minute.
  417. */
  418. #define CULL_CPUWORKERS_INTERVAL 60
  419. if (last_culled_cpuworkers + CULL_CPUWORKERS_INTERVAL <= now) {
  420. cull_wedged_cpuworkers();
  421. spawn_enough_cpuworkers();
  422. last_culled_cpuworkers = now;
  423. }
  424. if (1) {
  425. if (num_cpuworkers_busy == num_cpuworkers) {
  426. log_debug(LD_OR,"No idle cpuworkers. Queuing.");
  427. if (onion_pending_add(circ, onionskin) < 0) {
  428. tor_free(onionskin);
  429. return -1;
  430. }
  431. return 0;
  432. }
  433. if (!cpuworker)
  434. cpuworker = connection_get_by_type_state(CONN_TYPE_CPUWORKER,
  435. CPUWORKER_STATE_IDLE);
  436. tor_assert(cpuworker);
  437. if (!circ->p_chan) {
  438. log_info(LD_OR,"circ->p_chan gone. Failing circ.");
  439. tor_free(onionskin);
  440. return -1;
  441. }
  442. memset(&req, 0, sizeof(req));
  443. req.magic = CPUWORKER_REQUEST_MAGIC;
  444. tag_pack(req.tag, circ->p_chan->global_identifier,
  445. circ->p_circ_id);
  446. cpuworker->state = CPUWORKER_STATE_BUSY_ONION;
  447. /* touch the lastwritten timestamp, since that's how we check to
  448. * see how long it's been since we asked the question, and sometimes
  449. * we check before the first call to connection_handle_write(). */
  450. cpuworker->timestamp_lastwritten = time(NULL);
  451. num_cpuworkers_busy++;
  452. req.task = CPUWORKER_TASK_ONION;
  453. memcpy(&req.create_cell, onionskin, sizeof(create_cell_t));
  454. tor_free(onionskin);
  455. connection_write_to_buf((void*)&req, sizeof(req), cpuworker);
  456. memwipe(&req, 0, sizeof(req));
  457. }
  458. return 0;
  459. }