shim_poll.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_poll.c
  17. *
  18. * Implementation of system call "poll", "ppoll", "select" and "pselect6".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_utils.h>
  23. #include <shim_thread.h>
  24. #include <shim_handle.h>
  25. #include <shim_fs.h>
  26. #include <shim_profile.h>
  27. #include <pal.h>
  28. #include <pal_error.h>
  29. #include <list.h>
  30. #include <errno.h>
  31. #include <linux/fcntl.h>
  32. void __attribute__ ((noreturn))
  33. fortify_fail (const char *msg)
  34. {
  35. /* The loop is added only to keep gcc happy. */
  36. while (1)
  37. debug("*** %s ***\n", msg);
  38. }
  39. void __attribute__ ((noreturn))
  40. chk_fail (void)
  41. {
  42. fortify_fail ("buffer overflow detected");
  43. }
  44. static inline __attribute__((always_inline))
  45. void * __try_alloca (struct shim_thread * cur, int size)
  46. {
  47. if (!size)
  48. return NULL;
  49. if (check_stack_size(cur, size))
  50. return __alloca(size);
  51. else
  52. return malloc(size);
  53. }
  54. static inline __attribute__((always_inline))
  55. void __try_free (struct shim_thread * cur, void * mem)
  56. {
  57. if (mem && !check_on_stack(cur, mem))
  58. free(mem);
  59. }
  60. DEFINE_PROFILE_CATAGORY(__do_poll, select);
  61. DEFINE_PROFILE_INTERVAL(do_poll_get_handle, __do_poll);
  62. DEFINE_PROFILE_INTERVAL(do_poll_search_repeat, __do_poll);
  63. DEFINE_PROFILE_INTERVAL(do_poll_set_bookkeeping, __do_poll);
  64. DEFINE_PROFILE_INTERVAL(do_poll_check_accmode, __do_poll);
  65. DEFINE_PROFILE_INTERVAL(do_poll_vfs_polling, __do_poll);
  66. DEFINE_PROFILE_INTERVAL(do_poll_update_bookkeeping, __do_poll);
  67. DEFINE_PROFILE_INTERVAL(do_poll_first_loop, __do_poll);
  68. DEFINE_PROFILE_INTERVAL(do_poll_second_loop, __do_poll);
  69. DEFINE_PROFILE_INTERVAL(do_poll_wait_any, __do_poll);
  70. DEFINE_PROFILE_INTERVAL(do_poll_wait_any_peek, __do_poll);
  71. DEFINE_PROFILE_INTERVAL(do_poll_third_loop, __do_poll);
  72. DEFINE_PROFILE_INTERVAL(do_poll_fourth_loop, __do_poll);
  73. #define DO_R 0001
  74. #define DO_W 0002
  75. #define KNOWN_R 0004
  76. #define KNOWN_W 0010
  77. #define RET_R 0020
  78. #define RET_W 0040
  79. #define RET_E 0100
  80. #define POLL_R 0200
  81. #define POLL_W 0400
  82. struct poll_handle {
  83. unsigned short flags;
  84. FDTYPE fd;
  85. struct shim_handle * handle;
  86. struct poll_handle * next;
  87. struct poll_handle * children;
  88. } __attribute__((packed));
  89. #define POLL_NOTIMEOUT ((unsigned long) -1)
  90. static int __do_poll (int npolls, struct poll_handle * polls,
  91. unsigned long timeout)
  92. {
  93. struct shim_thread * cur = get_cur_thread();
  94. struct shim_handle_map * map = cur->handle_map;
  95. int npals = 0;
  96. bool has_r = false, has_known = false;
  97. struct poll_handle * polling = NULL;
  98. struct poll_handle * p, ** n, * q;
  99. PAL_HANDLE * pals = NULL;
  100. int ret = 0;
  101. #ifdef PROFILE
  102. unsigned long begin_time = GET_PROFILE_INTERVAL();
  103. BEGIN_PROFILE_INTERVAL_SET(begin_time);
  104. #endif
  105. lock(map->lock);
  106. for (p = polls ; p < &polls[npolls] ; p++) {
  107. bool do_r = p->flags & DO_R;
  108. bool do_w = p->flags & DO_W;
  109. if (!do_r && !do_w) {
  110. no_op:
  111. p->flags = 0;
  112. p->handle = NULL;
  113. UPDATE_PROFILE_INTERVAL();
  114. continue;
  115. }
  116. struct shim_handle * hdl = __get_fd_handle(p->fd, NULL, map);
  117. if (!hdl->fs || !hdl->fs->fs_ops)
  118. goto no_op;
  119. SAVE_PROFILE_INTERVAL(do_poll_get_handle);
  120. /* search for a repeated entry */
  121. struct poll_handle * rep = polling;
  122. for ( ; rep ; rep = rep->next)
  123. if (rep->handle == hdl)
  124. break;
  125. SAVE_PROFILE_INTERVAL(do_poll_search_repeat);
  126. p->flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  127. p->handle = NULL;
  128. p->next = NULL;
  129. p->children = NULL;
  130. if (rep) {
  131. /* if there is repeated handles and we already know the
  132. result, let's skip them */
  133. if (rep->flags & (KNOWN_R|POLL_R)) {
  134. p->flags = rep->flags & (KNOWN_R|RET_R|RET_E|POLL_R);
  135. do_r = false;
  136. }
  137. if (rep->flags & (KNOWN_W|POLL_W)) {
  138. p->flags = rep->flags & (KNOWN_W|RET_W|RET_E|POLL_W);
  139. do_w = false;
  140. }
  141. p->next = rep->children;
  142. rep->children = p;
  143. if (!do_r && !do_w) {
  144. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  145. continue;
  146. }
  147. } else {
  148. get_handle(hdl);
  149. p->handle = hdl;
  150. p->next = polling;
  151. polling = p;
  152. }
  153. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  154. /* do the easiest check, check handle's access mode */
  155. if (do_r && !(hdl->acc_mode & MAY_READ)) {
  156. p->flags |= KNOWN_R;
  157. debug("fd %d known to be not readable\n", p->fd);
  158. do_r = false;
  159. }
  160. if (do_w && !(hdl->acc_mode & MAY_WRITE)) {
  161. p->flags |= KNOWN_W;
  162. debug("fd %d known to be not writeable\n", p->fd);
  163. do_w = false;
  164. }
  165. SAVE_PROFILE_INTERVAL(do_poll_check_accmode);
  166. if (!do_r && !do_w)
  167. goto done_finding;
  168. /* if fs provides a poll operator, let's try it. */
  169. if (hdl->fs->fs_ops->poll) {
  170. int need_poll = 0;
  171. if (do_r && !(p->flags & POLL_R))
  172. need_poll |= FS_POLL_RD;
  173. if (do_w && !(p->flags & POLL_W))
  174. need_poll |= FS_POLL_WR;
  175. if (need_poll) {
  176. int polled = hdl->fs->fs_ops->poll(hdl, need_poll);
  177. if (polled < 0) {
  178. if (polled != -EAGAIN) {
  179. unlock(map->lock);
  180. ret = polled;
  181. goto done_polling;
  182. }
  183. } else {
  184. if (polled & FS_POLL_ER) {
  185. debug("fd %d known to have error\n", p->fd);
  186. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  187. do_r = do_w = false;
  188. }
  189. if ((polled & FS_POLL_RD)) {
  190. debug("fd %d known to be readable\n", p->fd);
  191. p->flags |= KNOWN_R|RET_R;
  192. do_r = false;
  193. }
  194. if (polled & FS_POLL_WR) {
  195. debug("fd %d known to be writeable\n", p->fd);
  196. p->flags |= KNOWN_W|RET_W;
  197. do_w = false;
  198. }
  199. }
  200. }
  201. SAVE_PROFILE_INTERVAL(do_poll_vfs_polling);
  202. if (!do_r && !do_w)
  203. goto done_finding;
  204. }
  205. struct poll_handle * to_poll = rep ? : p;
  206. if (!(to_poll->flags & (POLL_R|POLL_W))) {
  207. if (!hdl->pal_handle) {
  208. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  209. do_r = do_w = false;
  210. goto done_finding;
  211. }
  212. debug("polling fd %d\n", to_poll->fd);
  213. npals++;
  214. }
  215. to_poll->flags |= (do_r ? POLL_R : 0)|(do_w ? POLL_W : 0);
  216. done_finding:
  217. /* feedback the new knowledge of repeated handles */
  218. if (rep)
  219. rep->flags |= p->flags &
  220. (KNOWN_R|KNOWN_W|RET_R|RET_W|RET_E|POLL_R|POLL_W);
  221. if (do_r)
  222. has_r = true;
  223. if (p->flags & (RET_R|RET_W|RET_E))
  224. has_known = true;
  225. SAVE_PROFILE_INTERVAL(do_poll_update_bookkeeping);
  226. }
  227. unlock(map->lock);
  228. SAVE_PROFILE_INTERVAL_SINCE(do_poll_first_loop, begin_time);
  229. if (!npals) {
  230. ret = 0;
  231. goto done_polling;
  232. }
  233. pals = __try_alloca(cur, sizeof(PAL_HANDLE) * npals);
  234. npals = 0;
  235. n = &polling;
  236. for (p = polling ; p ; p = p->next) {
  237. assert(p->handle);
  238. if (!(p->flags & (POLL_R|POLL_W))) {
  239. *n = p->next;
  240. put_handle(p->handle);
  241. p->handle = NULL;
  242. continue;
  243. }
  244. pals[npals++] = p->handle->pal_handle;
  245. n = &p->next;
  246. }
  247. SAVE_PROFILE_INTERVAL(do_poll_second_loop);
  248. while (npals) {
  249. int pal_timeout = (has_r && !has_known) ? timeout : 0;
  250. PAL_HANDLE polled = DkObjectsWaitAny(npals, pals, pal_timeout);
  251. if (pal_timeout)
  252. SAVE_PROFILE_INTERVAL(do_poll_wait_any);
  253. else
  254. SAVE_PROFILE_INTERVAL(do_poll_wait_any_peek);
  255. if (!polled)
  256. break;
  257. PAL_STREAM_ATTR attr;
  258. if (!DkStreamAttributesQuerybyHandle(polled, &attr))
  259. break;
  260. n = &polling;
  261. for (p = polling ; p ; p = p->next) {
  262. if (p->handle->pal_handle == polled)
  263. break;
  264. n = &p->next;
  265. }
  266. if (!p)
  267. break;
  268. debug("handle %s is polled\n", qstrgetstr(&p->handle->uri));
  269. p->flags |= KNOWN_R|KNOWN_W;
  270. if (attr.disconnected) {
  271. debug("handle is polled to be disconnected\n");
  272. p->flags |= RET_E;
  273. }
  274. if (attr.readable) {
  275. debug("handle is polled to be readable\n");
  276. p->flags |= RET_R;
  277. }
  278. if (attr.writeable) {
  279. debug("handle is polled to be writeable\n");
  280. p->flags |= RET_W;
  281. }
  282. for (q = p->children ; q ; q = q->next)
  283. q->flags |= p->flags & (KNOWN_R|KNOWN_W|RET_W|RET_R|RET_E);
  284. if ((p->flags & (POLL_R|KNOWN_R)) != (POLL_R|KNOWN_R) &&
  285. (p->flags & (POLL_W|KNOWN_W)) != (POLL_W|KNOWN_W))
  286. continue;
  287. has_known = true;
  288. *n = p->next;
  289. put_handle(p->handle);
  290. p->handle = NULL;
  291. int nskip = 0;
  292. for (int i = 0 ; i < npals ; i++)
  293. if (pals[i] == polled) {
  294. nskip = 1;
  295. } else if (nskip) {
  296. pals[i - nskip] = pals[i];
  297. }
  298. npals -= nskip;
  299. SAVE_PROFILE_INTERVAL(do_poll_third_loop);
  300. }
  301. ret = 0;
  302. done_polling:
  303. for (p = polling ; p ; p = p->next)
  304. put_handle(p->handle);
  305. SAVE_PROFILE_INTERVAL(do_poll_fourth_loop);
  306. if (pals)
  307. __try_free(cur, pals);
  308. return ret;
  309. }
  310. int shim_do_poll (struct pollfd * fds, nfds_t nfds, int timeout)
  311. {
  312. struct shim_thread * cur = get_cur_thread();
  313. struct poll_handle * polls =
  314. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  315. for (int i = 0 ; i < nfds ; i++) {
  316. polls[i].fd = fds[i].fd;
  317. polls[i].flags = 0;
  318. if (fds[i].events & (POLLIN|POLLRDNORM))
  319. polls[i].flags |= DO_R;
  320. if (fds[i].events & (POLLOUT|POLLWRNORM))
  321. polls[i].flags |= DO_W;
  322. }
  323. int ret = __do_poll(nfds, polls,
  324. timeout < 0 ? POLL_NOTIMEOUT : timeout * 1000ULL);
  325. if (ret < 0)
  326. goto out;
  327. ret = 0;
  328. for (int i = 0 ; i < nfds ; i++) {
  329. fds[i].revents = 0;
  330. if (polls[i].flags & RET_R)
  331. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  332. if (polls[i].flags & RET_W)
  333. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  334. if (polls[i].flags & RET_E)
  335. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  336. if (fds[i].revents)
  337. ret++;
  338. }
  339. out:
  340. __try_free(cur, polls);
  341. return ret;
  342. }
  343. int shim_do_ppoll (struct pollfd * fds, int nfds, struct timespec * tsp,
  344. const __sigset_t * sigmask, size_t sigsetsize)
  345. {
  346. struct shim_thread * cur = get_cur_thread();
  347. struct poll_handle * polls =
  348. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  349. for (int i = 0 ; i < nfds ; i++) {
  350. polls[i].fd = fds[i].fd;
  351. polls[i].flags = 0;
  352. if (fds[i].events & (POLLIN|POLLRDNORM))
  353. polls[i].flags |= DO_R;
  354. if (fds[i].events & (POLLOUT|POLLWRNORM))
  355. polls[i].flags |= DO_W;
  356. }
  357. unsigned long timeout = tsp ?
  358. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  359. POLL_NOTIMEOUT;
  360. int ret = __do_poll(nfds, polls, timeout);
  361. if (ret < 0)
  362. goto out;
  363. ret = 0;
  364. for (int i = 0 ; i < nfds ; i++) {
  365. fds[i].revents = 0;
  366. if (polls[i].flags & RET_R)
  367. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  368. if (polls[i].flags & RET_W)
  369. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  370. if (polls[i].flags & RET_E)
  371. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  372. if (fds[i].revents)
  373. ret++;
  374. }
  375. out:
  376. __try_free(cur, polls);
  377. return ret;
  378. }
  379. typedef long int __fd_mask;
  380. #ifndef __NFDBITS
  381. #define __NFDBITS (8 * (int) sizeof (__fd_mask))
  382. #endif
  383. #ifndef __FDS_BITS
  384. #define __FDS_BITS(set) ((set)->fds_bits)
  385. #endif
  386. /* We don't use `memset' because this would require a prototype and
  387. the array isn't too big. */
  388. # define __FD_ZERO(set) \
  389. do { \
  390. unsigned int __i; \
  391. fd_set *__arr = (set); \
  392. for (__i = 0; __i < sizeof (fd_set) / sizeof (__fd_mask); ++__i) \
  393. __FDS_BITS (__arr)[__i] = 0; \
  394. } while (0)
  395. #define __FD_ELT(d) ((d) / __NFDBITS)
  396. #define __FD_MASK(d) ((__fd_mask) 1 << ((d) % __NFDBITS))
  397. #define __FD_SET(d, set) \
  398. ((void) (__FDS_BITS (set)[__FD_ELT (d)] |= __FD_MASK (d)))
  399. #define __FD_CLR(d, set) \
  400. ((void) (__FDS_BITS (set)[__FD_ELT (d)] &= ~__FD_MASK (d)))
  401. #define __FD_ISSET(d, set) \
  402. ((__FDS_BITS (set)[__FD_ELT (d)] & __FD_MASK (d)) != 0)
  403. DEFINE_PROFILE_CATAGORY(select, );
  404. DEFINE_PROFILE_INTERVAL(select_tryalloca_1, select);
  405. DEFINE_PROFILE_INTERVAL(select_setup_array, select);
  406. DEFINE_PROFILE_INTERVAL(select_do_poll, select);
  407. DEFINE_PROFILE_INTERVAL(select_fd_zero, select);
  408. DEFINE_PROFILE_INTERVAL(select_fd_sets, select);
  409. DEFINE_PROFILE_INTERVAL(select_try_free, select);
  410. int shim_do_select (int nfds, fd_set * readfds, fd_set * writefds,
  411. fd_set * errorfds, struct __kernel_timeval * tsv)
  412. {
  413. BEGIN_PROFILE_INTERVAL();
  414. if (!nfds) {
  415. if (!tsv)
  416. return -EINVAL;
  417. struct __kernel_timespec tsp;
  418. tsp.tv_sec = tsv->tv_sec;
  419. tsp.tv_nsec = tsv->tv_usec * 1000;
  420. return shim_do_nanosleep (&tsp, NULL);
  421. }
  422. struct shim_thread * cur = get_cur_thread();
  423. struct poll_handle * polls =
  424. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  425. int npolls = 0;
  426. SAVE_PROFILE_INTERVAL(select_tryalloca_1);
  427. for (int fd = 0 ; fd < nfds ; fd++) {
  428. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  429. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  430. if (!do_r && !do_w)
  431. continue;
  432. debug("poll fd %d %s%s\n", fd, do_r ? "R" : "", do_w ? "W" : "");
  433. polls[npolls].fd = fd;
  434. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  435. npolls++;
  436. }
  437. SAVE_PROFILE_INTERVAL(select_setup_array);
  438. unsigned long timeout = tsv ?
  439. tsv->tv_sec * 1000000ULL + tsv->tv_usec :
  440. POLL_NOTIMEOUT;
  441. int ret = __do_poll(npolls, polls, timeout);
  442. SAVE_PROFILE_INTERVAL(select_do_poll);
  443. if (ret < 0)
  444. goto out;
  445. ret = 0;
  446. if (readfds)
  447. __FD_ZERO(readfds);
  448. if (writefds)
  449. __FD_ZERO(writefds);
  450. if (errorfds)
  451. __FD_ZERO(errorfds);
  452. SAVE_PROFILE_INTERVAL(select_fd_zero);
  453. for (int i = 0 ; i < npolls ; i++) {
  454. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  455. __FD_SET(polls[i].fd, readfds);
  456. ret++;
  457. }
  458. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  459. __FD_SET(polls[i].fd, writefds);
  460. ret++;
  461. }
  462. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  463. __FD_SET(polls[i].fd, errorfds);
  464. ret++;
  465. }
  466. }
  467. SAVE_PROFILE_INTERVAL(select_fd_sets);
  468. out:
  469. __try_free(cur, polls);
  470. SAVE_PROFILE_INTERVAL(select_try_free);
  471. return ret;
  472. }
  473. int shim_do_pselect6 (int nfds, fd_set * readfds, fd_set * writefds,
  474. fd_set * errorfds, const struct __kernel_timespec * tsp,
  475. const __sigset_t * sigmask)
  476. {
  477. if (!nfds)
  478. return tsp ? shim_do_nanosleep (tsp, NULL) : -EINVAL;
  479. struct shim_thread * cur = get_cur_thread();
  480. struct poll_handle * polls =
  481. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  482. int npolls = 0;
  483. for (int fd = 0 ; fd < nfds ; fd++) {
  484. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  485. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  486. if (!do_r && !do_w)
  487. continue;
  488. polls[npolls].fd = fd;
  489. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  490. npolls++;
  491. }
  492. unsigned long timeout = tsp ?
  493. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  494. POLL_NOTIMEOUT;
  495. int ret = __do_poll(npolls, polls, timeout);
  496. if (ret < 0)
  497. goto out;
  498. ret = 0;
  499. if (readfds)
  500. __FD_ZERO(readfds);
  501. if (writefds)
  502. __FD_ZERO(writefds);
  503. if (errorfds)
  504. __FD_ZERO(errorfds);
  505. for (int i = 0 ; i < npolls ; i++) {
  506. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  507. __FD_SET(polls[i].fd, readfds);
  508. ret++;
  509. }
  510. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  511. __FD_SET(polls[i].fd, writefds);
  512. ret++;
  513. }
  514. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  515. __FD_SET(polls[i].fd, errorfds);
  516. ret++;
  517. }
  518. }
  519. out:
  520. __try_free(cur, polls);
  521. return ret;
  522. }