shim_poll.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_poll.c
  15. *
  16. * Implementation of system call "poll", "ppoll", "select" and "pselect6".
  17. */
  18. #include <shim_internal.h>
  19. #include <shim_table.h>
  20. #include <shim_utils.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_fs.h>
  24. #include <shim_profile.h>
  25. #include <pal.h>
  26. #include <pal_error.h>
  27. #include <list.h>
  28. #include <errno.h>
  29. #include <linux/fcntl.h>
  30. noreturn void
  31. fortify_fail (const char *msg)
  32. {
  33. /* The loop is added only to keep gcc happy. */
  34. while (1)
  35. debug("*** %s ***\n", msg);
  36. }
  37. noreturn void
  38. chk_fail (void)
  39. {
  40. fortify_fail ("buffer overflow detected");
  41. }
  42. static inline __attribute__((always_inline))
  43. void * __try_alloca (struct shim_thread * cur, int size)
  44. {
  45. if (!size)
  46. return NULL;
  47. if (check_stack_size(cur, size))
  48. return __alloca(size);
  49. else
  50. return malloc(size);
  51. }
  52. static inline __attribute__((always_inline))
  53. void __try_free (struct shim_thread * cur, void * mem)
  54. {
  55. if (mem && !check_on_stack(cur, mem))
  56. free(mem);
  57. }
  58. DEFINE_PROFILE_CATEGORY(__do_poll, select);
  59. DEFINE_PROFILE_INTERVAL(do_poll_get_handle, __do_poll);
  60. DEFINE_PROFILE_INTERVAL(do_poll_search_repeat, __do_poll);
  61. DEFINE_PROFILE_INTERVAL(do_poll_set_bookkeeping, __do_poll);
  62. DEFINE_PROFILE_INTERVAL(do_poll_check_accmode, __do_poll);
  63. DEFINE_PROFILE_INTERVAL(do_poll_vfs_polling, __do_poll);
  64. DEFINE_PROFILE_INTERVAL(do_poll_update_bookkeeping, __do_poll);
  65. DEFINE_PROFILE_INTERVAL(do_poll_first_loop, __do_poll);
  66. DEFINE_PROFILE_INTERVAL(do_poll_second_loop, __do_poll);
  67. DEFINE_PROFILE_INTERVAL(do_poll_wait_any, __do_poll);
  68. DEFINE_PROFILE_INTERVAL(do_poll_wait_any_peek, __do_poll);
  69. DEFINE_PROFILE_INTERVAL(do_poll_third_loop, __do_poll);
  70. DEFINE_PROFILE_INTERVAL(do_poll_fourth_loop, __do_poll);
  71. #define DO_R 0001
  72. #define DO_W 0002
  73. #define KNOWN_R 0004
  74. #define KNOWN_W 0010
  75. #define RET_R 0020
  76. #define RET_W 0040
  77. #define RET_E 0100
  78. #define POLL_R 0200
  79. #define POLL_W 0400
  80. struct poll_handle {
  81. unsigned short flags;
  82. FDTYPE fd;
  83. struct shim_handle * handle;
  84. struct poll_handle * next;
  85. struct poll_handle * children;
  86. } __attribute__((packed));
  87. #define POLL_NOTIMEOUT ((uint64_t)-1)
  88. static int __do_poll(int npolls, struct poll_handle* polls, uint64_t timeout_us)
  89. {
  90. struct shim_thread * cur = get_cur_thread();
  91. struct shim_handle_map * map = cur->handle_map;
  92. int npals = 0;
  93. bool has_r = false, has_known = false;
  94. struct poll_handle * polling = NULL;
  95. struct poll_handle * p, ** n, * q;
  96. PAL_HANDLE * pals = NULL;
  97. int ret = 0;
  98. #ifdef PROFILE
  99. unsigned long begin_time = GET_PROFILE_INTERVAL();
  100. BEGIN_PROFILE_INTERVAL_SET(begin_time);
  101. #endif
  102. lock(&map->lock);
  103. for (p = polls ; p < polls + npolls ; p++) {
  104. bool do_r = p->flags & DO_R;
  105. bool do_w = p->flags & DO_W;
  106. if (!do_r && !do_w) {
  107. no_op:
  108. p->flags = 0;
  109. p->handle = NULL;
  110. UPDATE_PROFILE_INTERVAL();
  111. continue;
  112. }
  113. struct shim_handle * hdl = __get_fd_handle(p->fd, NULL, map);
  114. if (!hdl->fs || !hdl->fs->fs_ops)
  115. goto no_op;
  116. SAVE_PROFILE_INTERVAL(do_poll_get_handle);
  117. /* search for a repeated entry */
  118. struct poll_handle * rep = polling;
  119. for ( ; rep ; rep = rep->next)
  120. if (rep->handle == hdl)
  121. break;
  122. SAVE_PROFILE_INTERVAL(do_poll_search_repeat);
  123. p->flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  124. p->handle = NULL;
  125. p->next = NULL;
  126. p->children = NULL;
  127. if (rep) {
  128. /* if there is repeated handles and we already know the
  129. result, let's skip them */
  130. if (rep->flags & (KNOWN_R|POLL_R)) {
  131. p->flags = rep->flags & (KNOWN_R|RET_R|RET_E|POLL_R);
  132. do_r = false;
  133. }
  134. if (rep->flags & (KNOWN_W|POLL_W)) {
  135. p->flags = rep->flags & (KNOWN_W|RET_W|RET_E|POLL_W);
  136. do_w = false;
  137. }
  138. p->next = rep->children;
  139. rep->children = p;
  140. if (!do_r && !do_w) {
  141. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  142. continue;
  143. }
  144. } else {
  145. get_handle(hdl);
  146. p->handle = hdl;
  147. p->next = polling;
  148. polling = p;
  149. }
  150. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  151. /* do the easiest check, check handle's access mode */
  152. if (do_r && !(hdl->acc_mode & MAY_READ)) {
  153. p->flags |= KNOWN_R;
  154. debug("fd %d known to be not readable\n", p->fd);
  155. do_r = false;
  156. }
  157. if (do_w && !(hdl->acc_mode & MAY_WRITE)) {
  158. p->flags |= KNOWN_W;
  159. debug("fd %d known to be not writable\n", p->fd);
  160. do_w = false;
  161. }
  162. SAVE_PROFILE_INTERVAL(do_poll_check_accmode);
  163. if (!do_r && !do_w)
  164. goto done_finding;
  165. /* if fs provides a poll operator, let's try it. */
  166. if (hdl->fs->fs_ops->poll) {
  167. int need_poll = 0;
  168. if (do_r && !(p->flags & POLL_R))
  169. need_poll |= FS_POLL_RD;
  170. if (do_w && !(p->flags & POLL_W))
  171. need_poll |= FS_POLL_WR;
  172. if (need_poll) {
  173. int polled = hdl->fs->fs_ops->poll(hdl, need_poll);
  174. if (polled < 0) {
  175. if (polled != -EAGAIN) {
  176. unlock(&map->lock);
  177. ret = polled;
  178. goto done_polling;
  179. }
  180. } else {
  181. if (polled & FS_POLL_ER) {
  182. debug("fd %d known to have error\n", p->fd);
  183. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  184. do_r = do_w = false;
  185. }
  186. if ((polled & FS_POLL_RD)) {
  187. debug("fd %d known to be readable\n", p->fd);
  188. p->flags |= KNOWN_R|RET_R;
  189. do_r = false;
  190. }
  191. if (polled & FS_POLL_WR) {
  192. debug("fd %d known to be writable\n", p->fd);
  193. p->flags |= KNOWN_W|RET_W;
  194. do_w = false;
  195. }
  196. }
  197. }
  198. SAVE_PROFILE_INTERVAL(do_poll_vfs_polling);
  199. if (!do_r && !do_w)
  200. goto done_finding;
  201. }
  202. struct poll_handle * to_poll = rep ? : p;
  203. if (!(to_poll->flags & (POLL_R|POLL_W))) {
  204. if (!hdl->pal_handle) {
  205. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  206. do_r = do_w = false;
  207. goto done_finding;
  208. }
  209. debug("polling fd %d\n", to_poll->fd);
  210. npals++;
  211. }
  212. to_poll->flags |= (do_r ? POLL_R : 0)|(do_w ? POLL_W : 0);
  213. done_finding:
  214. /* feedback the new knowledge of repeated handles */
  215. if (rep)
  216. rep->flags |= p->flags &
  217. (KNOWN_R|KNOWN_W|RET_R|RET_W|RET_E|POLL_R|POLL_W);
  218. if (do_r)
  219. has_r = true;
  220. if (p->flags & (RET_R|RET_W|RET_E))
  221. has_known = true;
  222. SAVE_PROFILE_INTERVAL(do_poll_update_bookkeeping);
  223. }
  224. unlock(&map->lock);
  225. SAVE_PROFILE_INTERVAL_SINCE(do_poll_first_loop, begin_time);
  226. if (!npals) {
  227. ret = 0;
  228. goto done_polling;
  229. }
  230. pals = __try_alloca(cur, sizeof(PAL_HANDLE) * npals);
  231. npals = 0;
  232. n = &polling;
  233. for (p = polling ; p ; p = p->next) {
  234. assert(p->handle);
  235. if (!(p->flags & (POLL_R|POLL_W))) {
  236. *n = p->next;
  237. put_handle(p->handle);
  238. p->handle = NULL;
  239. continue;
  240. }
  241. pals[npals++] = p->handle->pal_handle;
  242. n = &p->next;
  243. }
  244. SAVE_PROFILE_INTERVAL(do_poll_second_loop);
  245. while (npals) {
  246. int pal_timeout_us = (has_r && !has_known) ? timeout_us : 0;
  247. PAL_HANDLE polled = DkObjectsWaitAny(npals, pals, pal_timeout_us);
  248. if (pal_timeout_us)
  249. SAVE_PROFILE_INTERVAL(do_poll_wait_any);
  250. else
  251. SAVE_PROFILE_INTERVAL(do_poll_wait_any_peek);
  252. if (!polled)
  253. break;
  254. PAL_STREAM_ATTR attr;
  255. if (!DkStreamAttributesQueryByHandle(polled, &attr))
  256. break;
  257. n = &polling;
  258. for (p = polling ; p ; p = p->next) {
  259. if (p->handle->pal_handle == polled)
  260. break;
  261. n = &p->next;
  262. }
  263. if (!p)
  264. break;
  265. debug("handle %s is polled\n", qstrgetstr(&p->handle->uri));
  266. p->flags |= KNOWN_R|KNOWN_W;
  267. if (attr.disconnected) {
  268. debug("handle is polled to be disconnected\n");
  269. p->flags |= RET_E;
  270. }
  271. if (attr.readable) {
  272. debug("handle is polled to be readable\n");
  273. p->flags |= RET_R;
  274. }
  275. if (attr.writable) {
  276. debug("handle is polled to be writable\n");
  277. p->flags |= RET_W;
  278. }
  279. for (q = p->children ; q ; q = q->next)
  280. q->flags |= p->flags & (KNOWN_R|KNOWN_W|RET_W|RET_R|RET_E);
  281. if ((p->flags & (POLL_R|KNOWN_R)) != (POLL_R|KNOWN_R) &&
  282. (p->flags & (POLL_W|KNOWN_W)) != (POLL_W|KNOWN_W))
  283. continue;
  284. has_known = true;
  285. *n = p->next;
  286. put_handle(p->handle);
  287. p->handle = NULL;
  288. int nskip = 0;
  289. for (int i = 0 ; i < npals ; i++)
  290. if (pals[i] == polled) {
  291. nskip = 1;
  292. } else if (nskip) {
  293. pals[i - nskip] = pals[i];
  294. }
  295. npals -= nskip;
  296. SAVE_PROFILE_INTERVAL(do_poll_third_loop);
  297. }
  298. ret = 0;
  299. done_polling:
  300. for (p = polling ; p ; p = p->next)
  301. put_handle(p->handle);
  302. SAVE_PROFILE_INTERVAL(do_poll_fourth_loop);
  303. if (pals)
  304. __try_free(cur, pals);
  305. return ret;
  306. }
  307. int shim_do_poll (struct pollfd * fds, nfds_t nfds, int timeout_ms)
  308. {
  309. struct shim_thread * cur = get_cur_thread();
  310. struct poll_handle * polls =
  311. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  312. for (size_t i = 0 ; i < nfds ; i++) {
  313. polls[i].fd = fds[i].fd;
  314. polls[i].flags = 0;
  315. if (fds[i].events & (POLLIN|POLLRDNORM))
  316. polls[i].flags |= DO_R;
  317. if (fds[i].events & (POLLOUT|POLLWRNORM))
  318. polls[i].flags |= DO_W;
  319. }
  320. int ret = __do_poll(nfds, polls,
  321. timeout_ms < 0 ? POLL_NOTIMEOUT : timeout_ms * 1000ULL);
  322. if (ret < 0)
  323. goto out;
  324. ret = 0;
  325. for (size_t i = 0 ; i < nfds ; i++) {
  326. fds[i].revents = 0;
  327. if (polls[i].flags & RET_R)
  328. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  329. if (polls[i].flags & RET_W)
  330. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  331. if (polls[i].flags & RET_E)
  332. fds[i].revents |= (POLLERR|POLLHUP);
  333. if (fds[i].revents)
  334. ret++;
  335. }
  336. out:
  337. __try_free(cur, polls);
  338. return ret;
  339. }
  340. int shim_do_ppoll (struct pollfd * fds, int nfds, struct timespec * tsp,
  341. const __sigset_t * sigmask, size_t sigsetsize)
  342. {
  343. __UNUSED(sigmask);
  344. __UNUSED(sigsetsize);
  345. struct shim_thread * cur = get_cur_thread();
  346. struct poll_handle * polls =
  347. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  348. for (int i = 0 ; i < nfds ; i++) {
  349. polls[i].fd = fds[i].fd;
  350. polls[i].flags = 0;
  351. if (fds[i].events & (POLLIN|POLLRDNORM))
  352. polls[i].flags |= DO_R;
  353. if (fds[i].events & (POLLOUT|POLLWRNORM))
  354. polls[i].flags |= DO_W;
  355. }
  356. uint64_t timeout_us = tsp ? tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 : POLL_NOTIMEOUT;
  357. int ret = __do_poll(nfds, polls, timeout_us);
  358. if (ret < 0)
  359. goto out;
  360. ret = 0;
  361. for (int i = 0 ; i < nfds ; i++) {
  362. fds[i].revents = 0;
  363. if (polls[i].flags & RET_R)
  364. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  365. if (polls[i].flags & RET_W)
  366. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  367. if (polls[i].flags & RET_E)
  368. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  369. if (fds[i].revents)
  370. ret++;
  371. }
  372. out:
  373. __try_free(cur, polls);
  374. return ret;
  375. }
  376. typedef long int __fd_mask;
  377. #ifndef __NFDBITS
  378. #define __NFDBITS (8 * (int)sizeof(__fd_mask))
  379. #endif
  380. #ifndef __FDS_BITS
  381. #define __FDS_BITS(set) ((set)->fds_bits)
  382. #endif
  383. /* We don't use `memset' because this would require a prototype and
  384. the array isn't too big. */
  385. # define __FD_ZERO(set) \
  386. do { \
  387. unsigned int __i; \
  388. fd_set *__arr = (set); \
  389. for (__i = 0; __i < sizeof (fd_set) / sizeof (__fd_mask); ++__i) \
  390. __FDS_BITS (__arr)[__i] = 0; \
  391. } while (0)
  392. #define __FD_ELT(d) ((d) / __NFDBITS)
  393. #define __FD_MASK(d) ((__fd_mask)1 << ((d) % __NFDBITS))
  394. #define __FD_SET(d, set) \
  395. ((void)(__FDS_BITS(set)[__FD_ELT(d)] |= __FD_MASK(d)))
  396. #define __FD_CLR(d, set) \
  397. ((void)(__FDS_BITS(set)[__FD_ELT(d)] &= ~__FD_MASK(d)))
  398. #define __FD_ISSET(d, set) \
  399. ((__FDS_BITS(set)[__FD_ELT(d)] & __FD_MASK(d)) != 0)
  400. DEFINE_PROFILE_CATEGORY(select, );
  401. DEFINE_PROFILE_INTERVAL(select_tryalloca_1, select);
  402. DEFINE_PROFILE_INTERVAL(select_setup_array, select);
  403. DEFINE_PROFILE_INTERVAL(select_do_poll, select);
  404. DEFINE_PROFILE_INTERVAL(select_fd_zero, select);
  405. DEFINE_PROFILE_INTERVAL(select_fd_sets, select);
  406. DEFINE_PROFILE_INTERVAL(select_try_free, select);
  407. int shim_do_select (int nfds, fd_set * readfds, fd_set * writefds,
  408. fd_set * errorfds, struct __kernel_timeval * tsv)
  409. {
  410. BEGIN_PROFILE_INTERVAL();
  411. if (!nfds) {
  412. if (!tsv)
  413. return -EINVAL;
  414. struct __kernel_timespec tsp;
  415. tsp.tv_sec = tsv->tv_sec;
  416. tsp.tv_nsec = tsv->tv_usec * 1000;
  417. return shim_do_nanosleep (&tsp, NULL);
  418. }
  419. struct shim_thread * cur = get_cur_thread();
  420. struct poll_handle * polls =
  421. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  422. int npolls = 0;
  423. SAVE_PROFILE_INTERVAL(select_tryalloca_1);
  424. for (int fd = 0 ; fd < nfds ; fd++) {
  425. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  426. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  427. if (!do_r && !do_w)
  428. continue;
  429. debug("poll fd %d %s%s\n", fd, do_r ? "R" : "", do_w ? "W" : "");
  430. polls[npolls].fd = fd;
  431. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  432. npolls++;
  433. }
  434. SAVE_PROFILE_INTERVAL(select_setup_array);
  435. uint64_t timeout_us = tsv ? tsv->tv_sec * 1000000ULL + tsv->tv_usec : POLL_NOTIMEOUT;
  436. int ret = __do_poll(npolls, polls, timeout_us);
  437. SAVE_PROFILE_INTERVAL(select_do_poll);
  438. if (ret < 0)
  439. goto out;
  440. ret = 0;
  441. if (readfds)
  442. __FD_ZERO(readfds);
  443. if (writefds)
  444. __FD_ZERO(writefds);
  445. if (errorfds)
  446. __FD_ZERO(errorfds);
  447. SAVE_PROFILE_INTERVAL(select_fd_zero);
  448. for (int i = 0 ; i < npolls ; i++) {
  449. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  450. __FD_SET(polls[i].fd, readfds);
  451. ret++;
  452. }
  453. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  454. __FD_SET(polls[i].fd, writefds);
  455. ret++;
  456. }
  457. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  458. __FD_SET(polls[i].fd, errorfds);
  459. ret++;
  460. }
  461. }
  462. SAVE_PROFILE_INTERVAL(select_fd_sets);
  463. out:
  464. __try_free(cur, polls);
  465. SAVE_PROFILE_INTERVAL(select_try_free);
  466. return ret;
  467. }
  468. int shim_do_pselect6 (int nfds, fd_set * readfds, fd_set * writefds,
  469. fd_set * errorfds, const struct __kernel_timespec * tsp,
  470. const __sigset_t * sigmask)
  471. {
  472. __UNUSED(sigmask);
  473. if (!nfds)
  474. return tsp ? shim_do_nanosleep (tsp, NULL) : -EINVAL;
  475. struct shim_thread * cur = get_cur_thread();
  476. struct poll_handle * polls =
  477. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  478. int npolls = 0;
  479. for (int fd = 0 ; fd < nfds ; fd++) {
  480. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  481. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  482. if (!do_r && !do_w)
  483. continue;
  484. polls[npolls].fd = fd;
  485. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  486. npolls++;
  487. }
  488. uint64_t timeout_us = tsp ? tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 : POLL_NOTIMEOUT;
  489. int ret = __do_poll(npolls, polls, timeout_us);
  490. if (ret < 0)
  491. goto out;
  492. ret = 0;
  493. if (readfds)
  494. __FD_ZERO(readfds);
  495. if (writefds)
  496. __FD_ZERO(writefds);
  497. if (errorfds)
  498. __FD_ZERO(errorfds);
  499. for (int i = 0 ; i < npolls ; i++) {
  500. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  501. __FD_SET(polls[i].fd, readfds);
  502. ret++;
  503. }
  504. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  505. __FD_SET(polls[i].fd, writefds);
  506. ret++;
  507. }
  508. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  509. __FD_SET(polls[i].fd, errorfds);
  510. ret++;
  511. }
  512. }
  513. out:
  514. __try_free(cur, polls);
  515. return ret;
  516. }