compat_threads.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /* Copyright (c) 2003-2004, Roger Dingledine
  2. * Copyright (c) 2004-2006, Roger Dingledine, Nick Mathewson.
  3. * Copyright (c) 2007-2017, The Tor Project, Inc. */
  4. /* See LICENSE for licensing information */
  5. /**
  6. * \file compat_threads.c
  7. *
  8. * \brief Cross-platform threading and inter-thread communication logic.
  9. * (Platform-specific parts are written in the other compat_*threads
  10. * modules.)
  11. */
  12. #include "orconfig.h"
  13. #include <stdlib.h>
  14. #include "compat.h"
  15. #include "compat_threads.h"
  16. #include "util.h"
  17. #include "torlog.h"
  18. #ifdef HAVE_SYS_EVENTFD_H
  19. #include <sys/eventfd.h>
  20. #endif
  21. #ifdef HAVE_FCNTL_H
  22. #include <fcntl.h>
  23. #endif
  24. #ifdef HAVE_UNISTD_H
  25. #include <unistd.h>
  26. #endif
  27. /** Return a newly allocated, ready-for-use mutex. */
  28. tor_mutex_t *
  29. tor_mutex_new(void)
  30. {
  31. tor_mutex_t *m = tor_malloc_zero(sizeof(tor_mutex_t));
  32. tor_mutex_init(m);
  33. return m;
  34. }
  35. /** Return a newly allocated, ready-for-use mutex. This one might be
  36. * non-recursive, if that's faster. */
  37. tor_mutex_t *
  38. tor_mutex_new_nonrecursive(void)
  39. {
  40. tor_mutex_t *m = tor_malloc_zero(sizeof(tor_mutex_t));
  41. tor_mutex_init_nonrecursive(m);
  42. return m;
  43. }
  44. /** Release all storage and system resources held by <b>m</b>. */
  45. void
  46. tor_mutex_free(tor_mutex_t *m)
  47. {
  48. if (!m)
  49. return;
  50. tor_mutex_uninit(m);
  51. tor_free(m);
  52. }
  53. /** Allocate and return a new condition variable. */
  54. tor_cond_t *
  55. tor_cond_new(void)
  56. {
  57. tor_cond_t *cond = tor_malloc(sizeof(tor_cond_t));
  58. if (BUG(tor_cond_init(cond)<0))
  59. tor_free(cond); // LCOV_EXCL_LINE
  60. return cond;
  61. }
  62. /** Free all storage held in <b>c</b>. */
  63. void
  64. tor_cond_free(tor_cond_t *c)
  65. {
  66. if (!c)
  67. return;
  68. tor_cond_uninit(c);
  69. tor_free(c);
  70. }
  71. /** Identity of the "main" thread */
  72. static unsigned long main_thread_id = -1;
  73. /** Start considering the current thread to be the 'main thread'. This has
  74. * no effect on anything besides in_main_thread(). */
  75. void
  76. set_main_thread(void)
  77. {
  78. main_thread_id = tor_get_thread_id();
  79. }
  80. /** Return true iff called from the main thread. */
  81. int
  82. in_main_thread(void)
  83. {
  84. return main_thread_id == tor_get_thread_id();
  85. }
  86. #if defined(HAVE_EVENTFD) || defined(HAVE_PIPE)
  87. /* As write(), but retry on EINTR, and return the negative error code on
  88. * error. */
  89. static int
  90. write_ni(int fd, const void *buf, size_t n)
  91. {
  92. int r;
  93. again:
  94. r = (int) write(fd, buf, n);
  95. if (r < 0) {
  96. if (errno == EINTR)
  97. goto again;
  98. else
  99. return -errno;
  100. }
  101. return r;
  102. }
  103. /* As read(), but retry on EINTR, and return the negative error code on error.
  104. */
  105. static int
  106. read_ni(int fd, void *buf, size_t n)
  107. {
  108. int r;
  109. again:
  110. r = (int) read(fd, buf, n);
  111. if (r < 0) {
  112. if (errno == EINTR)
  113. goto again;
  114. else
  115. return -errno;
  116. }
  117. return r;
  118. }
  119. #endif
  120. /** As send(), but retry on EINTR, and return the negative error code on
  121. * error. */
  122. static int
  123. send_ni(int fd, const void *buf, size_t n, int flags)
  124. {
  125. int r;
  126. again:
  127. r = (int) send(fd, buf, n, flags);
  128. if (r < 0) {
  129. int error = tor_socket_errno(fd);
  130. if (ERRNO_IS_EINTR(error))
  131. goto again;
  132. else
  133. return -error;
  134. }
  135. return r;
  136. }
  137. /** As recv(), but retry on EINTR, and return the negative error code on
  138. * error. */
  139. static int
  140. recv_ni(int fd, void *buf, size_t n, int flags)
  141. {
  142. int r;
  143. again:
  144. r = (int) recv(fd, buf, n, flags);
  145. if (r < 0) {
  146. int error = tor_socket_errno(fd);
  147. if (ERRNO_IS_EINTR(error))
  148. goto again;
  149. else
  150. return -error;
  151. }
  152. return r;
  153. }
  154. #ifdef HAVE_EVENTFD
  155. /* Increment the event count on an eventfd <b>fd</b> */
  156. static int
  157. eventfd_alert(int fd)
  158. {
  159. uint64_t u = 1;
  160. int r = write_ni(fd, (void*)&u, sizeof(u));
  161. if (r < 0 && -r != EAGAIN)
  162. return -1;
  163. return 0;
  164. }
  165. /* Drain all events from an eventfd <b>fd</b>. */
  166. static int
  167. eventfd_drain(int fd)
  168. {
  169. uint64_t u = 0;
  170. int r = read_ni(fd, (void*)&u, sizeof(u));
  171. if (r < 0 && -r != EAGAIN)
  172. return r;
  173. return 0;
  174. }
  175. #endif
  176. #ifdef HAVE_PIPE
  177. /** Send a byte over a pipe. Return 0 on success or EAGAIN; -1 on error */
  178. static int
  179. pipe_alert(int fd)
  180. {
  181. ssize_t r = write_ni(fd, "x", 1);
  182. if (r < 0 && -r != EAGAIN)
  183. return (int)r;
  184. return 0;
  185. }
  186. /** Drain all input from a pipe <b>fd</b> and ignore it. Return 0 on
  187. * success, -1 on error. */
  188. static int
  189. pipe_drain(int fd)
  190. {
  191. char buf[32];
  192. ssize_t r;
  193. do {
  194. r = read_ni(fd, buf, sizeof(buf));
  195. } while (r > 0);
  196. if (r < 0 && errno != EAGAIN)
  197. return -errno;
  198. /* A value of r = 0 means EOF on the fd so successfully drained. */
  199. return 0;
  200. }
  201. #endif
  202. /** Send a byte on socket <b>fd</b>t. Return 0 on success or EAGAIN,
  203. * -1 on error. */
  204. static int
  205. sock_alert(tor_socket_t fd)
  206. {
  207. ssize_t r = send_ni(fd, "x", 1, 0);
  208. if (r < 0 && !ERRNO_IS_EAGAIN(-r))
  209. return (int)r;
  210. return 0;
  211. }
  212. /** Drain all the input from a socket <b>fd</b>, and ignore it. Return 0 on
  213. * success, -errno on error. */
  214. static int
  215. sock_drain(tor_socket_t fd)
  216. {
  217. char buf[32];
  218. ssize_t r;
  219. do {
  220. r = recv_ni(fd, buf, sizeof(buf), 0);
  221. } while (r > 0);
  222. if (r < 0 && !ERRNO_IS_EAGAIN(-r))
  223. return (int)r;
  224. /* A value of r = 0 means EOF on the fd so successfully drained. */
  225. return 0;
  226. }
  227. /** Allocate a new set of alert sockets, and set the appropriate function
  228. * pointers, in <b>socks_out</b>. */
  229. int
  230. alert_sockets_create(alert_sockets_t *socks_out, uint32_t flags)
  231. {
  232. tor_socket_t socks[2] = { TOR_INVALID_SOCKET, TOR_INVALID_SOCKET };
  233. #ifdef HAVE_EVENTFD
  234. /* First, we try the Linux eventfd() syscall. This gives a 64-bit counter
  235. * associated with a single file descriptor. */
  236. #if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
  237. if (!(flags & ASOCKS_NOEVENTFD2))
  238. socks[0] = eventfd(0, EFD_CLOEXEC|EFD_NONBLOCK);
  239. #endif
  240. if (socks[0] < 0 && !(flags & ASOCKS_NOEVENTFD)) {
  241. socks[0] = eventfd(0,0);
  242. if (socks[0] >= 0) {
  243. if (fcntl(socks[0], F_SETFD, FD_CLOEXEC) < 0 ||
  244. set_socket_nonblocking(socks[0]) < 0) {
  245. // LCOV_EXCL_START -- if eventfd succeeds, fcntl will.
  246. tor_assert_nonfatal_unreached();
  247. close(socks[0]);
  248. return -1;
  249. // LCOV_EXCL_STOP
  250. }
  251. }
  252. }
  253. if (socks[0] >= 0) {
  254. socks_out->read_fd = socks_out->write_fd = socks[0];
  255. socks_out->alert_fn = eventfd_alert;
  256. socks_out->drain_fn = eventfd_drain;
  257. return 0;
  258. }
  259. #endif
  260. #ifdef HAVE_PIPE2
  261. /* Now we're going to try pipes. First type the pipe2() syscall, if we
  262. * have it, so we can save some calls... */
  263. if (!(flags & ASOCKS_NOPIPE2) &&
  264. pipe2(socks, O_NONBLOCK|O_CLOEXEC) == 0) {
  265. socks_out->read_fd = socks[0];
  266. socks_out->write_fd = socks[1];
  267. socks_out->alert_fn = pipe_alert;
  268. socks_out->drain_fn = pipe_drain;
  269. return 0;
  270. }
  271. #endif
  272. #ifdef HAVE_PIPE
  273. /* Now try the regular pipe() syscall. Pipes have a bit lower overhead than
  274. * socketpairs, fwict. */
  275. if (!(flags & ASOCKS_NOPIPE) &&
  276. pipe(socks) == 0) {
  277. if (fcntl(socks[0], F_SETFD, FD_CLOEXEC) < 0 ||
  278. fcntl(socks[1], F_SETFD, FD_CLOEXEC) < 0 ||
  279. set_socket_nonblocking(socks[0]) < 0 ||
  280. set_socket_nonblocking(socks[1]) < 0) {
  281. // LCOV_EXCL_START -- if pipe succeeds, you can fcntl the output
  282. tor_assert_nonfatal_unreached();
  283. close(socks[0]);
  284. close(socks[1]);
  285. return -1;
  286. // LCOV_EXCL_STOP
  287. }
  288. socks_out->read_fd = socks[0];
  289. socks_out->write_fd = socks[1];
  290. socks_out->alert_fn = pipe_alert;
  291. socks_out->drain_fn = pipe_drain;
  292. return 0;
  293. }
  294. #endif
  295. /* If nothing else worked, fall back on socketpair(). */
  296. if (!(flags & ASOCKS_NOSOCKETPAIR) &&
  297. tor_socketpair(AF_UNIX, SOCK_STREAM, 0, socks) == 0) {
  298. if (set_socket_nonblocking(socks[0]) < 0 ||
  299. set_socket_nonblocking(socks[1])) {
  300. // LCOV_EXCL_START -- if socketpair worked, you can make it nonblocking.
  301. tor_assert_nonfatal_unreached();
  302. tor_close_socket(socks[0]);
  303. tor_close_socket(socks[1]);
  304. return -1;
  305. // LCOV_EXCL_STOP
  306. }
  307. socks_out->read_fd = socks[0];
  308. socks_out->write_fd = socks[1];
  309. socks_out->alert_fn = sock_alert;
  310. socks_out->drain_fn = sock_drain;
  311. return 0;
  312. }
  313. return -1;
  314. }
  315. /** Close the sockets in <b>socks</b>. */
  316. void
  317. alert_sockets_close(alert_sockets_t *socks)
  318. {
  319. if (socks->alert_fn == sock_alert) {
  320. /* they are sockets. */
  321. tor_close_socket(socks->read_fd);
  322. tor_close_socket(socks->write_fd);
  323. } else {
  324. close(socks->read_fd);
  325. if (socks->write_fd != socks->read_fd)
  326. close(socks->write_fd);
  327. }
  328. socks->read_fd = socks->write_fd = -1;
  329. }
  330. /*
  331. * XXXX We might be smart to move to compiler intrinsics or real atomic
  332. * XXXX operations at some point. But not yet.
  333. *
  334. */
  335. /** Initialize a new atomic counter with the value 0 */
  336. void
  337. atomic_counter_init(atomic_counter_t *counter)
  338. {
  339. memset(counter, 0, sizeof(*counter));
  340. tor_mutex_init_nonrecursive(&counter->mutex);
  341. }
  342. /** Clean up all resources held by an atomic counter. */
  343. void
  344. atomic_counter_destroy(atomic_counter_t *counter)
  345. {
  346. tor_mutex_uninit(&counter->mutex);
  347. memset(counter, 0, sizeof(*counter));
  348. }
  349. /** Add a value to an atomic counter. */
  350. void
  351. atomic_counter_add(atomic_counter_t *counter, size_t add)
  352. {
  353. tor_mutex_acquire(&counter->mutex);
  354. counter->val += add;
  355. tor_mutex_release(&counter->mutex);
  356. }
  357. /** Subtract a value from an atomic counter. */
  358. void
  359. atomic_counter_sub(atomic_counter_t *counter, size_t sub)
  360. {
  361. // this relies on unsigned overflow, but that's fine.
  362. atomic_counter_add(counter, -sub);
  363. }
  364. /** Return the current value of an atomic counter */
  365. size_t
  366. atomic_counter_get(atomic_counter_t *counter)
  367. {
  368. size_t val;
  369. tor_mutex_acquire(&counter->mutex);
  370. val = counter->val;
  371. tor_mutex_release(&counter->mutex);
  372. return val;
  373. }