shim_poll.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_poll.c
  17. *
  18. * Implementation of system call "poll", "ppoll", "select" and "pselect6".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_utils.h>
  23. #include <shim_thread.h>
  24. #include <shim_handle.h>
  25. #include <shim_fs.h>
  26. #include <shim_profile.h>
  27. #include <pal.h>
  28. #include <pal_error.h>
  29. #include <list.h>
  30. #include <errno.h>
  31. #include <linux/fcntl.h>
  32. void __attribute__ ((noreturn))
  33. fortify_fail (const char *msg)
  34. {
  35. /* The loop is added only to keep gcc happy. */
  36. while (1)
  37. debug("*** %s ***\n", msg);
  38. }
  39. void __attribute__ ((noreturn))
  40. chk_fail (void)
  41. {
  42. fortify_fail ("buffer overflow detected");
  43. }
  44. static inline __attribute__((always_inline))
  45. void * __try_alloca (struct shim_thread * cur, int size)
  46. {
  47. if (!size)
  48. return NULL;
  49. if (check_stack_size(cur, size))
  50. return __alloca(size);
  51. else
  52. return malloc(size);
  53. }
  54. static inline __attribute__((always_inline))
  55. void __try_free (struct shim_thread * cur, void * mem)
  56. {
  57. if (mem && !check_on_stack(cur, mem))
  58. free(mem);
  59. }
  60. DEFINE_PROFILE_CATAGORY(__do_poll, select);
  61. DEFINE_PROFILE_INTERVAL(do_poll_get_handle, __do_poll);
  62. DEFINE_PROFILE_INTERVAL(do_poll_search_repeat, __do_poll);
  63. DEFINE_PROFILE_INTERVAL(do_poll_set_bookkeeping, __do_poll);
  64. DEFINE_PROFILE_INTERVAL(do_poll_check_accmode, __do_poll);
  65. DEFINE_PROFILE_INTERVAL(do_poll_vfs_polling, __do_poll);
  66. DEFINE_PROFILE_INTERVAL(do_poll_update_bookkeeping, __do_poll);
  67. DEFINE_PROFILE_INTERVAL(do_poll_first_loop, __do_poll);
  68. DEFINE_PROFILE_INTERVAL(do_poll_second_loop, __do_poll);
  69. DEFINE_PROFILE_INTERVAL(do_poll_wait_any, __do_poll);
  70. DEFINE_PROFILE_INTERVAL(do_poll_wait_any_peek, __do_poll);
  71. DEFINE_PROFILE_INTERVAL(do_poll_third_loop, __do_poll);
  72. DEFINE_PROFILE_INTERVAL(do_poll_fourth_loop, __do_poll);
  73. #define DO_R 0001
  74. #define DO_W 0002
  75. #define KNOWN_R 0004
  76. #define KNOWN_W 0010
  77. #define RET_R 0020
  78. #define RET_W 0040
  79. #define RET_E 0100
  80. #define POLL_R 0200
  81. #define POLL_W 0400
  82. struct poll_handle {
  83. unsigned short flags;
  84. FDTYPE fd;
  85. struct shim_handle * handle;
  86. struct poll_handle * next;
  87. struct poll_handle * children;
  88. } __attribute__((packed));
  89. #define POLL_NOTIMEOUT ((unsigned long) -1)
  90. static int __do_poll (int npolls, struct poll_handle * polls,
  91. unsigned long timeout)
  92. {
  93. struct shim_thread * cur = get_cur_thread();
  94. struct shim_handle_map * map = cur->handle_map;
  95. int npals = 0;
  96. bool has_r = false, has_known = false;
  97. struct poll_handle * polling = NULL;
  98. struct poll_handle * p, ** n, * q;
  99. PAL_HANDLE * pals = NULL;
  100. int ret = 0;
  101. #ifdef PROFILE
  102. unsigned long begin_time = GET_PROFILE_INTERVAL();
  103. BEGIN_PROFILE_INTERVAL_SET(begin_time);
  104. #endif
  105. lock(map->lock);
  106. for (p = polls ; p < &polls[npolls] ; p++) {
  107. bool do_r = p->flags & DO_R;
  108. bool do_w = p->flags & DO_W;
  109. if (!do_r && !do_w) {
  110. no_op:
  111. p->flags = 0;
  112. p->handle = NULL;
  113. UPDATE_PROFILE_INTERVAL();
  114. continue;
  115. }
  116. struct shim_handle * hdl = __get_fd_handle(p->fd, NULL, map);
  117. if (!hdl->fs || !hdl->fs->fs_ops)
  118. goto no_op;
  119. SAVE_PROFILE_INTERVAL(do_poll_get_handle);
  120. /* search for a repeated entry */
  121. struct poll_handle * rep = polling;
  122. for ( ; rep ; rep = rep->next)
  123. if (rep->handle == hdl)
  124. break;
  125. SAVE_PROFILE_INTERVAL(do_poll_search_repeat);
  126. p->flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  127. p->handle = NULL;
  128. p->next = NULL;
  129. p->children = NULL;
  130. if (rep) {
  131. /* if there is repeated handles and we already know the
  132. result, let's skip them */
  133. if (rep->flags & (KNOWN_R|POLL_R)) {
  134. p->flags = rep->flags & (KNOWN_R|RET_R|RET_E|POLL_R);
  135. do_r = false;
  136. }
  137. if (rep->flags & (KNOWN_W|POLL_W)) {
  138. p->flags = rep->flags & (KNOWN_W|RET_W|RET_E|POLL_W);
  139. do_w = false;
  140. }
  141. p->next = rep->children;
  142. rep->children = p;
  143. if (!do_r && !do_w) {
  144. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  145. continue;
  146. }
  147. } else {
  148. get_handle(hdl);
  149. p->handle = hdl;
  150. p->next = polling;
  151. polling = p;
  152. }
  153. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  154. /* do the easiest check, check handle's access mode */
  155. if (do_r && !(hdl->acc_mode & MAY_READ)) {
  156. p->flags |= KNOWN_R;
  157. debug("fd %d known to be not readable\n", p->fd);
  158. do_r = false;
  159. }
  160. if (do_w && !(hdl->acc_mode & MAY_WRITE)) {
  161. p->flags |= KNOWN_W;
  162. debug("fd %d known to be not writeable\n", p->fd);
  163. do_w = false;
  164. }
  165. SAVE_PROFILE_INTERVAL(do_poll_check_accmode);
  166. if (!do_r && !do_w)
  167. goto done_finding;
  168. /* if fs provides a poll operator, let's try it. */
  169. if (hdl->fs->fs_ops->poll) {
  170. int need_poll = 0;
  171. if (do_r && !(p->flags & POLL_R))
  172. need_poll |= FS_POLL_RD;
  173. if (do_w && !(p->flags & POLL_W))
  174. need_poll |= FS_POLL_WR;
  175. if (need_poll) {
  176. int polled = hdl->fs->fs_ops->poll(hdl, need_poll);
  177. if (polled < 0) {
  178. if (polled != -EAGAIN) {
  179. ret = polled;
  180. goto done_polling;
  181. }
  182. } else {
  183. if (polled & FS_POLL_ER) {
  184. debug("fd %d known to have error\n", p->fd);
  185. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  186. do_r = do_w = false;
  187. }
  188. if ((polled & FS_POLL_RD)) {
  189. debug("fd %d known to be readable\n", p->fd);
  190. p->flags |= KNOWN_R|RET_R;
  191. do_r = false;
  192. }
  193. if (polled & FS_POLL_WR) {
  194. debug("fd %d known to be writeable\n", p->fd);
  195. p->flags |= KNOWN_W|RET_W;
  196. do_w = false;
  197. }
  198. }
  199. }
  200. SAVE_PROFILE_INTERVAL(do_poll_vfs_polling);
  201. if (!do_r && !do_w)
  202. goto done_finding;
  203. }
  204. struct poll_handle * to_poll = rep ? : p;
  205. if (!(to_poll->flags & (POLL_R|POLL_W))) {
  206. if (!hdl->pal_handle) {
  207. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  208. do_r = do_w = false;
  209. goto done_finding;
  210. }
  211. debug("polling fd %d\n", to_poll->fd);
  212. npals++;
  213. }
  214. to_poll->flags |= (do_r ? POLL_R : 0)|(do_w ? POLL_W : 0);
  215. done_finding:
  216. /* feedback the new knowledge of repeated handles */
  217. if (rep)
  218. rep->flags |= p->flags &
  219. (KNOWN_R|KNOWN_W|RET_R|RET_W|RET_E|POLL_R|POLL_W);
  220. if (do_r)
  221. has_r = true;
  222. if (p->flags & (RET_R|RET_W|RET_E))
  223. has_known = true;
  224. SAVE_PROFILE_INTERVAL(do_poll_update_bookkeeping);
  225. }
  226. unlock(cur->handle_map->lock);
  227. SAVE_PROFILE_INTERVAL_SINCE(do_poll_first_loop, begin_time);
  228. if (!npals) {
  229. ret = 0;
  230. goto done_polling;
  231. }
  232. pals = __try_alloca(cur, sizeof(PAL_HANDLE) * npals);
  233. npals = 0;
  234. n = &polling;
  235. for (p = polling ; p ; p = p->next) {
  236. assert(p->handle);
  237. if (!(p->flags & (POLL_R|POLL_W))) {
  238. *n = p->next;
  239. put_handle(p->handle);
  240. p->handle = NULL;
  241. continue;
  242. }
  243. pals[npals++] = p->handle->pal_handle;
  244. n = &p->next;
  245. }
  246. SAVE_PROFILE_INTERVAL(do_poll_second_loop);
  247. while (npals) {
  248. int pal_timeout = (has_r && !has_known) ? timeout : 0;
  249. PAL_HANDLE polled = DkObjectsWaitAny(npals, pals, pal_timeout);
  250. if (pal_timeout)
  251. SAVE_PROFILE_INTERVAL(do_poll_wait_any);
  252. else
  253. SAVE_PROFILE_INTERVAL(do_poll_wait_any_peek);
  254. if (!polled)
  255. break;
  256. PAL_STREAM_ATTR attr;
  257. if (!DkStreamAttributesQuerybyHandle(polled, &attr))
  258. break;
  259. n = &polling;
  260. for (p = polling ; p ; p = p->next) {
  261. if (p->handle->pal_handle == polled)
  262. break;
  263. n = &p->next;
  264. }
  265. if (!p)
  266. break;
  267. debug("handle %s is polled\n", qstrgetstr(&p->handle->uri));
  268. p->flags |= KNOWN_R|KNOWN_W;
  269. if (attr.disconnected) {
  270. debug("handle is polled to be disconnected\n");
  271. p->flags |= RET_E;
  272. }
  273. if (attr.readable) {
  274. debug("handle is polled to be readable\n");
  275. p->flags |= RET_R;
  276. }
  277. if (attr.writeable) {
  278. debug("handle is polled to be writeable\n");
  279. p->flags |= RET_W;
  280. }
  281. for (q = p->children ; q ; q = q->next)
  282. q->flags |= p->flags & (KNOWN_R|KNOWN_W|RET_W|RET_R|RET_E);
  283. if ((p->flags & (POLL_R|KNOWN_R)) != (POLL_R|KNOWN_R) &&
  284. (p->flags & (POLL_W|KNOWN_W)) != (POLL_W|KNOWN_W))
  285. continue;
  286. has_known = true;
  287. *n = p->next;
  288. put_handle(p->handle);
  289. p->handle = NULL;
  290. int nskip = 0;
  291. for (int i = 0 ; i < npals ; i++)
  292. if (pals[i] == polled) {
  293. nskip = 1;
  294. } else if (nskip) {
  295. pals[i - nskip] = pals[i];
  296. }
  297. npals -= nskip;
  298. SAVE_PROFILE_INTERVAL(do_poll_third_loop);
  299. }
  300. ret = 0;
  301. done_polling:
  302. for (p = polling ; p ; p = p->next)
  303. put_handle(p->handle);
  304. SAVE_PROFILE_INTERVAL(do_poll_fourth_loop);
  305. if (pals)
  306. __try_free(cur, pals);
  307. return ret;
  308. }
  309. int shim_do_poll (struct pollfd * fds, nfds_t nfds, int timeout)
  310. {
  311. struct shim_thread * cur = get_cur_thread();
  312. struct poll_handle * polls =
  313. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  314. for (int i = 0 ; i < nfds ; i++) {
  315. polls[i].fd = fds[i].fd;
  316. polls[i].flags = 0;
  317. if (fds[i].events & (POLLIN|POLLRDNORM))
  318. polls[i].flags |= DO_R;
  319. if (fds[i].events & (POLLOUT|POLLWRNORM))
  320. polls[i].flags |= DO_W;
  321. }
  322. int ret = __do_poll(nfds, polls,
  323. timeout < 0 ? POLL_NOTIMEOUT : timeout * 1000ULL);
  324. if (ret < 0)
  325. goto out;
  326. ret = 0;
  327. for (int i = 0 ; i < nfds ; i++) {
  328. fds[i].revents = 0;
  329. if (polls[i].flags & RET_R)
  330. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  331. if (polls[i].flags & RET_W)
  332. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  333. if (polls[i].flags & RET_E)
  334. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  335. if (fds[i].revents)
  336. ret++;
  337. }
  338. out:
  339. __try_free(cur, polls);
  340. return ret;
  341. }
  342. int shim_do_ppoll (struct pollfd * fds, int nfds, struct timespec * tsp,
  343. const __sigset_t * sigmask, size_t sigsetsize)
  344. {
  345. struct shim_thread * cur = get_cur_thread();
  346. struct poll_handle * polls =
  347. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  348. for (int i = 0 ; i < nfds ; i++) {
  349. polls[i].fd = fds[i].fd;
  350. polls[i].flags = 0;
  351. if (fds[i].events & (POLLIN|POLLRDNORM))
  352. polls[i].flags |= DO_R;
  353. if (fds[i].events & (POLLOUT|POLLWRNORM))
  354. polls[i].flags |= DO_W;
  355. }
  356. unsigned long timeout = tsp ?
  357. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  358. POLL_NOTIMEOUT;
  359. int ret = __do_poll(nfds, polls, timeout);
  360. if (ret < 0)
  361. goto out;
  362. ret = 0;
  363. for (int i = 0 ; i < nfds ; i++) {
  364. fds[i].revents = 0;
  365. if (polls[i].flags & RET_R)
  366. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  367. if (polls[i].flags & RET_W)
  368. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  369. if (polls[i].flags & RET_E)
  370. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  371. if (fds[i].revents)
  372. ret++;
  373. }
  374. out:
  375. __try_free(cur, polls);
  376. return ret;
  377. }
  378. typedef long int __fd_mask;
  379. #ifndef __NFDBITS
  380. #define __NFDBITS (8 * (int) sizeof (__fd_mask))
  381. #endif
  382. #ifndef __FDS_BITS
  383. #define __FDS_BITS(set) ((set)->fds_bits)
  384. #endif
  385. /* We don't use `memset' because this would require a prototype and
  386. the array isn't too big. */
  387. # define __FD_ZERO(set) \
  388. do { \
  389. unsigned int __i; \
  390. fd_set *__arr = (set); \
  391. for (__i = 0; __i < sizeof (fd_set) / sizeof (__fd_mask); ++__i) \
  392. __FDS_BITS (__arr)[__i] = 0; \
  393. } while (0)
  394. #define __FD_ELT(d) ((d) / __NFDBITS)
  395. #define __FD_MASK(d) ((__fd_mask) 1 << ((d) % __NFDBITS))
  396. #define __FD_SET(d, set) \
  397. ((void) (__FDS_BITS (set)[__FD_ELT (d)] |= __FD_MASK (d)))
  398. #define __FD_CLR(d, set) \
  399. ((void) (__FDS_BITS (set)[__FD_ELT (d)] &= ~__FD_MASK (d)))
  400. #define __FD_ISSET(d, set) \
  401. ((__FDS_BITS (set)[__FD_ELT (d)] & __FD_MASK (d)) != 0)
  402. DEFINE_PROFILE_CATAGORY(select, );
  403. DEFINE_PROFILE_INTERVAL(select_tryalloca_1, select);
  404. DEFINE_PROFILE_INTERVAL(select_setup_array, select);
  405. DEFINE_PROFILE_INTERVAL(select_do_poll, select);
  406. DEFINE_PROFILE_INTERVAL(select_fd_zero, select);
  407. DEFINE_PROFILE_INTERVAL(select_fd_sets, select);
  408. DEFINE_PROFILE_INTERVAL(select_try_free, select);
  409. int shim_do_select (int nfds, fd_set * readfds, fd_set * writefds,
  410. fd_set * errorfds, struct __kernel_timeval * tsv)
  411. {
  412. BEGIN_PROFILE_INTERVAL();
  413. if (!nfds) {
  414. if (!tsv)
  415. return -EINVAL;
  416. struct __kernel_timespec tsp;
  417. tsp.tv_sec = tsv->tv_sec;
  418. tsp.tv_nsec = tsv->tv_usec * 1000;
  419. return shim_do_nanosleep (&tsp, NULL);
  420. }
  421. struct shim_thread * cur = get_cur_thread();
  422. struct poll_handle * polls =
  423. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  424. int npolls = 0;
  425. SAVE_PROFILE_INTERVAL(select_tryalloca_1);
  426. for (int fd = 0 ; fd < nfds ; fd++) {
  427. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  428. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  429. if (!do_r && !do_w)
  430. continue;
  431. debug("poll fd %d %s%s\n", fd, do_r ? "R" : "", do_w ? "W" : "");
  432. polls[npolls].fd = fd;
  433. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  434. npolls++;
  435. }
  436. SAVE_PROFILE_INTERVAL(select_setup_array);
  437. unsigned long timeout = tsv ?
  438. tsv->tv_sec * 1000000ULL + tsv->tv_usec :
  439. POLL_NOTIMEOUT;
  440. int ret = __do_poll(npolls, polls, timeout);
  441. SAVE_PROFILE_INTERVAL(select_do_poll);
  442. if (ret < 0)
  443. goto out;
  444. ret = 0;
  445. if (readfds)
  446. __FD_ZERO(readfds);
  447. if (writefds)
  448. __FD_ZERO(writefds);
  449. if (errorfds)
  450. __FD_ZERO(errorfds);
  451. SAVE_PROFILE_INTERVAL(select_fd_zero);
  452. for (int i = 0 ; i < npolls ; i++) {
  453. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  454. __FD_SET(polls[i].fd, readfds);
  455. ret++;
  456. }
  457. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  458. __FD_SET(polls[i].fd, writefds);
  459. ret++;
  460. }
  461. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  462. __FD_SET(polls[i].fd, errorfds);
  463. ret++;
  464. }
  465. }
  466. SAVE_PROFILE_INTERVAL(select_fd_sets);
  467. out:
  468. __try_free(cur, polls);
  469. SAVE_PROFILE_INTERVAL(select_try_free);
  470. return ret;
  471. }
  472. int shim_do_pselect6 (int nfds, fd_set * readfds, fd_set * writefds,
  473. fd_set * errorfds, const struct __kernel_timespec * tsp,
  474. const __sigset_t * sigmask)
  475. {
  476. if (!nfds)
  477. return tsp ? shim_do_nanosleep (tsp, NULL) : -EINVAL;
  478. struct shim_thread * cur = get_cur_thread();
  479. struct poll_handle * polls =
  480. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  481. int npolls = 0;
  482. for (int fd = 0 ; fd < nfds ; fd++) {
  483. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  484. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  485. if (!do_r && !do_w)
  486. continue;
  487. polls[npolls].fd = fd;
  488. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  489. npolls++;
  490. }
  491. unsigned long timeout = tsp ?
  492. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  493. POLL_NOTIMEOUT;
  494. int ret = __do_poll(npolls, polls, timeout);
  495. if (ret < 0)
  496. goto out;
  497. ret = 0;
  498. if (readfds)
  499. __FD_ZERO(readfds);
  500. if (writefds)
  501. __FD_ZERO(writefds);
  502. if (errorfds)
  503. __FD_ZERO(errorfds);
  504. for (int i = 0 ; i < npolls ; i++) {
  505. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  506. __FD_SET(polls[i].fd, readfds);
  507. ret++;
  508. }
  509. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  510. __FD_SET(polls[i].fd, writefds);
  511. ret++;
  512. }
  513. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  514. __FD_SET(polls[i].fd, errorfds);
  515. ret++;
  516. }
  517. }
  518. out:
  519. __try_free(cur, polls);
  520. return ret;
  521. }