shim_poll.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_poll.c
  17. *
  18. * Implementation of system call "poll", "ppoll", "select" and "pselect6".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_utils.h>
  23. #include <shim_thread.h>
  24. #include <shim_handle.h>
  25. #include <shim_fs.h>
  26. #include <shim_profile.h>
  27. #include <pal.h>
  28. #include <pal_error.h>
  29. #include <linux_list.h>
  30. #include <errno.h>
  31. #include <linux/fcntl.h>
  32. void __attribute__ ((noreturn))
  33. fortify_fail (const char *msg)
  34. {
  35. /* The loop is added only to keep gcc happy. */
  36. while (1)
  37. debug("*** %s ***\n", msg);
  38. }
  39. void __attribute__ ((noreturn))
  40. chk_fail (void)
  41. {
  42. fortify_fail ("buffer overflow detected");
  43. }
  44. static inline __attribute__((always_inline))
  45. void * __try_alloca (struct shim_thread * cur, int size)
  46. {
  47. if (!size)
  48. return NULL;
  49. if (check_stack_size(cur, size))
  50. return __alloca(size);
  51. else
  52. return malloc(size);
  53. }
  54. static inline __attribute__((always_inline))
  55. void __try_free (struct shim_thread * cur, void * mem)
  56. {
  57. if (mem && !check_on_stack(cur, mem))
  58. free(mem);
  59. }
  60. DEFINE_PROFILE_CATAGORY(__do_poll, select);
  61. DEFINE_PROFILE_INTERVAL(do_poll_get_handle, __do_poll);
  62. DEFINE_PROFILE_INTERVAL(do_poll_search_repeat, __do_poll);
  63. DEFINE_PROFILE_INTERVAL(do_poll_set_bookkeeping, __do_poll);
  64. DEFINE_PROFILE_INTERVAL(do_poll_check_accmode, __do_poll);
  65. DEFINE_PROFILE_INTERVAL(do_poll_vfs_polling, __do_poll);
  66. DEFINE_PROFILE_INTERVAL(do_poll_update_bookkeeping, __do_poll);
  67. DEFINE_PROFILE_INTERVAL(do_poll_first_loop, __do_poll);
  68. DEFINE_PROFILE_INTERVAL(do_poll_second_loop, __do_poll);
  69. DEFINE_PROFILE_INTERVAL(do_poll_wait_any, __do_poll);
  70. DEFINE_PROFILE_INTERVAL(do_poll_wait_any_peek, __do_poll);
  71. DEFINE_PROFILE_INTERVAL(do_poll_third_loop, __do_poll);
  72. DEFINE_PROFILE_INTERVAL(do_poll_fourth_loop, __do_poll);
  73. #define DO_R 0001
  74. #define DO_W 0002
  75. #define KNOWN_R 0004
  76. #define KNOWN_W 0010
  77. #define RET_R 0020
  78. #define RET_W 0040
  79. #define RET_E 0100
  80. #define POLL_R 0200
  81. #define POLL_W 0400
  82. struct poll_handle {
  83. unsigned short flags;
  84. FDTYPE fd;
  85. struct shim_handle * handle;
  86. struct poll_handle * next;
  87. struct poll_handle * children;
  88. } __attribute__((packed));
  89. #define POLL_NOTIMEOUT ((unsigned long) -1)
  90. static int __do_poll (int npolls, struct poll_handle * polls,
  91. unsigned long timeout)
  92. {
  93. struct shim_thread * cur = get_cur_thread();
  94. struct shim_handle_map * map = cur->handle_map;
  95. int npals = 0;
  96. bool has_r = false, has_known = false;
  97. struct poll_handle * polling = NULL;
  98. struct poll_handle * p, ** n, * q;
  99. PAL_HANDLE * pals = NULL;
  100. #ifdef PROFILE
  101. unsigned long begin_time = GET_PROFILE_INTERVAL();
  102. BEGIN_PROFILE_INTERVAL_SET(begin_time);
  103. #endif
  104. lock(map->lock);
  105. for (p = polls ; p < &polls[npolls] ; p++) {
  106. bool do_r = p->flags & DO_R;
  107. bool do_w = p->flags & DO_W;
  108. if (!do_r && !do_w) {
  109. no_op:
  110. p->flags = 0;
  111. p->handle = NULL;
  112. UPDATE_PROFILE_INTERVAL();
  113. continue;
  114. }
  115. struct shim_handle * hdl = __get_fd_handle(p->fd, NULL, map);
  116. if (!hdl->fs || !hdl->fs->fs_ops)
  117. goto no_op;
  118. SAVE_PROFILE_INTERVAL(do_poll_get_handle);
  119. /* search for a repeated entry */
  120. struct poll_handle * rep = polling;
  121. for ( ; rep ; rep = rep->next)
  122. if (rep->handle == hdl)
  123. break;
  124. SAVE_PROFILE_INTERVAL(do_poll_search_repeat);
  125. p->flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  126. p->handle = NULL;
  127. p->next = NULL;
  128. p->children = NULL;
  129. if (rep) {
  130. /* if there is repeated handles and we already know the
  131. result, let's skip them */
  132. if (rep->flags & (KNOWN_R|POLL_R)) {
  133. p->flags = rep->flags & (KNOWN_R|RET_R|RET_E|POLL_R);
  134. do_r = false;
  135. }
  136. if (rep->flags & (KNOWN_W|POLL_W)) {
  137. p->flags = rep->flags & (KNOWN_W|RET_W|RET_E|POLL_W);
  138. do_w = false;
  139. }
  140. p->next = rep->children;
  141. rep->children = p;
  142. if (!do_r && !do_w) {
  143. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  144. continue;
  145. }
  146. } else {
  147. get_handle(hdl);
  148. p->handle = hdl;
  149. p->next = polling;
  150. polling = p;
  151. }
  152. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  153. /* do the easiest check, check handle's access mode */
  154. if (do_r && !(hdl->acc_mode & MAY_READ)) {
  155. p->flags |= KNOWN_R;
  156. debug("fd %d known to be not readable\n", p->fd);
  157. do_r = false;
  158. }
  159. if (do_w && !(hdl->acc_mode & MAY_WRITE)) {
  160. p->flags |= KNOWN_W;
  161. debug("fd %d known to be not writeable\n", p->fd);
  162. do_w = false;
  163. }
  164. SAVE_PROFILE_INTERVAL(do_poll_check_accmode);
  165. if (!do_r && !do_w)
  166. goto done_finding;
  167. /* if fs provides a poll operator, let's try it. */
  168. if (hdl->fs->fs_ops->poll) {
  169. int need_poll = 0;
  170. if (do_r && !(p->flags & POLL_R))
  171. need_poll |= FS_POLL_RD;
  172. if (do_w && !(p->flags & POLL_W))
  173. need_poll |= FS_POLL_WR;
  174. if (need_poll) {
  175. int polled = hdl->fs->fs_ops->poll(hdl, need_poll);
  176. if (polled & FS_POLL_ER) {
  177. debug("fd %d known to have error\n", p->fd);
  178. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  179. do_r = do_w = false;
  180. }
  181. if ((polled & FS_POLL_RD)) {
  182. debug("fd %d known to be readable\n", p->fd);
  183. p->flags |= KNOWN_R|RET_R;
  184. do_r = false;
  185. }
  186. if (polled & FS_POLL_WR) {
  187. debug("fd %d known to be writeable\n", p->fd);
  188. p->flags |= KNOWN_W|RET_W;
  189. do_w = false;
  190. }
  191. }
  192. SAVE_PROFILE_INTERVAL(do_poll_vfs_polling);
  193. if (!do_r && !do_w)
  194. goto done_finding;
  195. }
  196. struct poll_handle * to_poll = rep ? : p;
  197. if (!(to_poll->flags & (POLL_R|POLL_W))) {
  198. if (!hdl->pal_handle) {
  199. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  200. do_r = do_w = false;
  201. goto done_finding;
  202. }
  203. debug("polling fd %d\n", to_poll->fd);
  204. npals++;
  205. }
  206. to_poll->flags |= (do_r ? POLL_R : 0)|(do_w ? POLL_W : 0);
  207. done_finding:
  208. /* feedback the new knowledge of repeated handles */
  209. if (rep)
  210. rep->flags |= p->flags &
  211. (KNOWN_R|KNOWN_W|RET_R|RET_W|RET_E|POLL_R|POLL_W);
  212. if (do_r)
  213. has_r = true;
  214. if (p->flags & (RET_R|RET_W|RET_E))
  215. has_known = true;
  216. SAVE_PROFILE_INTERVAL(do_poll_update_bookkeeping);
  217. }
  218. unlock(cur->handle_map->lock);
  219. SAVE_PROFILE_INTERVAL_SINCE(do_poll_first_loop, begin_time);
  220. if (!npals)
  221. goto done_polling;
  222. pals = __try_alloca(cur, sizeof(PAL_HANDLE) * npals);
  223. npals = 0;
  224. n = &polling;
  225. for (p = polling ; p ; p = p->next) {
  226. assert(p->handle);
  227. if (!(p->flags & (POLL_R|POLL_W))) {
  228. *n = p->next;
  229. put_handle(p->handle);
  230. p->handle = NULL;
  231. continue;
  232. }
  233. pals[npals++] = p->handle->pal_handle;
  234. n = &p->next;
  235. }
  236. SAVE_PROFILE_INTERVAL(do_poll_second_loop);
  237. while (npals) {
  238. int pal_timeout = (has_r && !has_known) ? timeout : 0;
  239. PAL_HANDLE polled = DkObjectsWaitAny(npals, pals, pal_timeout);
  240. if (pal_timeout)
  241. SAVE_PROFILE_INTERVAL(do_poll_wait_any);
  242. else
  243. SAVE_PROFILE_INTERVAL(do_poll_wait_any_peek);
  244. if (!polled)
  245. break;
  246. PAL_STREAM_ATTR attr;
  247. if (!DkStreamAttributesQuerybyHandle(polled, &attr))
  248. break;
  249. n = &polling;
  250. for (p = polling ; p ; p = p->next) {
  251. if (p->handle->pal_handle == polled)
  252. break;
  253. n = &p->next;
  254. }
  255. if (!p)
  256. break;
  257. debug("handle %s is polled\n", qstrgetstr(&p->handle->uri));
  258. p->flags |= KNOWN_R|KNOWN_W;
  259. if (attr.disconnected) {
  260. debug("handle is polled to be disconnected\n");
  261. p->flags |= RET_E;
  262. }
  263. if (attr.readable) {
  264. debug("handle is polled to be readable\n");
  265. p->flags |= RET_R;
  266. }
  267. if (attr.writeable) {
  268. debug("handle is polled to be writeable\n");
  269. p->flags |= RET_W;
  270. }
  271. for (q = p->children ; q ; q = q->next)
  272. q->flags |= p->flags & (KNOWN_R|KNOWN_W|RET_W|RET_R|RET_E);
  273. if ((p->flags & (POLL_R|KNOWN_R)) != (POLL_R|KNOWN_R) &&
  274. (p->flags & (POLL_W|KNOWN_W)) != (POLL_W|KNOWN_W))
  275. continue;
  276. has_known = true;
  277. *n = p->next;
  278. put_handle(p->handle);
  279. p->handle = NULL;
  280. int nskip = 0;
  281. for (int i = 0 ; i < npals ; i++)
  282. if (pals[i] == polled) {
  283. nskip = 1;
  284. } else if (nskip) {
  285. pals[i - nskip] = pals[i];
  286. }
  287. npals -= nskip;
  288. SAVE_PROFILE_INTERVAL(do_poll_third_loop);
  289. }
  290. done_polling:
  291. for (p = polling ; p ; p = p->next)
  292. put_handle(p->handle);
  293. SAVE_PROFILE_INTERVAL(do_poll_fourth_loop);
  294. if (pals)
  295. __try_free(cur, pals);
  296. return 0;
  297. }
  298. int shim_do_poll (struct pollfd * fds, nfds_t nfds, int timeout)
  299. {
  300. struct shim_thread * cur = get_cur_thread();
  301. struct poll_handle * polls =
  302. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  303. for (int i = 0 ; i < nfds ; i++) {
  304. polls[i].fd = fds[i].fd;
  305. polls[i].flags = 0;
  306. if (fds[i].events & (POLLIN|POLLRDNORM))
  307. polls[i].flags |= DO_R;
  308. if (fds[i].events & (POLLOUT|POLLWRNORM))
  309. polls[i].flags |= DO_W;
  310. }
  311. int ret = __do_poll(nfds, polls,
  312. timeout < 0 ? POLL_NOTIMEOUT : timeout * 1000ULL);
  313. if (ret < 0)
  314. goto out;
  315. ret = 0;
  316. for (int i = 0 ; i < nfds ; i++) {
  317. fds[i].revents = 0;
  318. if (polls[i].flags & RET_R)
  319. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  320. if (polls[i].flags & RET_W)
  321. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  322. if (polls[i].flags & RET_E)
  323. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  324. if (fds[i].revents)
  325. ret++;
  326. }
  327. out:
  328. __try_free(cur, polls);
  329. return ret;
  330. }
  331. int shim_do_ppoll (struct pollfd * fds, int nfds, struct timespec * tsp,
  332. const __sigset_t * sigmask, size_t sigsetsize)
  333. {
  334. struct shim_thread * cur = get_cur_thread();
  335. struct poll_handle * polls =
  336. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  337. for (int i = 0 ; i < nfds ; i++) {
  338. polls[i].fd = fds[i].fd;
  339. polls[i].flags = 0;
  340. if (fds[i].events & (POLLIN|POLLRDNORM))
  341. polls[i].flags |= DO_R;
  342. if (fds[i].events & (POLLOUT|POLLWRNORM))
  343. polls[i].flags |= DO_W;
  344. }
  345. unsigned long timeout = tsp ?
  346. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  347. POLL_NOTIMEOUT;
  348. int ret = __do_poll(nfds, polls, timeout);
  349. if (ret < 0)
  350. goto out;
  351. ret = 0;
  352. for (int i = 0 ; i < nfds ; i++) {
  353. fds[i].revents = 0;
  354. if (polls[i].flags & RET_R)
  355. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  356. if (polls[i].flags & RET_W)
  357. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  358. if (polls[i].flags & RET_E)
  359. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  360. if (fds[i].revents)
  361. ret++;
  362. }
  363. out:
  364. __try_free(cur, polls);
  365. return ret;
  366. }
  367. typedef long int __fd_mask;
  368. #ifndef __NFDBITS
  369. #define __NFDBITS (8 * (int) sizeof (__fd_mask))
  370. #endif
  371. #ifndef __FDS_BITS
  372. #define __FDS_BITS(set) ((set)->fds_bits)
  373. #endif
  374. /* We don't use `memset' because this would require a prototype and
  375. the array isn't too big. */
  376. # define __FD_ZERO(set) \
  377. do { \
  378. unsigned int __i; \
  379. fd_set *__arr = (set); \
  380. for (__i = 0; __i < sizeof (fd_set) / sizeof (__fd_mask); ++__i) \
  381. __FDS_BITS (__arr)[__i] = 0; \
  382. } while (0)
  383. #define __FD_ELT(d) ((d) / __NFDBITS)
  384. #define __FD_MASK(d) ((__fd_mask) 1 << ((d) % __NFDBITS))
  385. #define __FD_SET(d, set) \
  386. ((void) (__FDS_BITS (set)[__FD_ELT (d)] |= __FD_MASK (d)))
  387. #define __FD_CLR(d, set) \
  388. ((void) (__FDS_BITS (set)[__FD_ELT (d)] &= ~__FD_MASK (d)))
  389. #define __FD_ISSET(d, set) \
  390. ((__FDS_BITS (set)[__FD_ELT (d)] & __FD_MASK (d)) != 0)
  391. DEFINE_PROFILE_CATAGORY(select, );
  392. DEFINE_PROFILE_INTERVAL(select_tryalloca_1, select);
  393. DEFINE_PROFILE_INTERVAL(select_setup_array, select);
  394. DEFINE_PROFILE_INTERVAL(select_do_poll, select);
  395. DEFINE_PROFILE_INTERVAL(select_fd_zero, select);
  396. DEFINE_PROFILE_INTERVAL(select_fd_sets, select);
  397. DEFINE_PROFILE_INTERVAL(select_try_free, select);
  398. int shim_do_select (int nfds, fd_set * readfds, fd_set * writefds,
  399. fd_set * errorfds, struct __kernel_timeval * tsv)
  400. {
  401. BEGIN_PROFILE_INTERVAL();
  402. if (!nfds) {
  403. if (!tsv)
  404. return -EINVAL;
  405. struct __kernel_timespec tsp;
  406. tsp.tv_sec = tsv->tv_sec;
  407. tsp.tv_nsec = tsv->tv_usec * 1000;
  408. return shim_do_nanosleep (&tsp, NULL);
  409. }
  410. struct shim_thread * cur = get_cur_thread();
  411. struct poll_handle * polls =
  412. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  413. int npolls = 0;
  414. SAVE_PROFILE_INTERVAL(select_tryalloca_1);
  415. for (int fd = 0 ; fd < nfds ; fd++) {
  416. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  417. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  418. if (!do_r && !do_w)
  419. continue;
  420. debug("poll fd %d %s%s\n", fd, do_r ? "R" : "", do_w ? "W" : "");
  421. polls[npolls].fd = fd;
  422. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  423. npolls++;
  424. }
  425. SAVE_PROFILE_INTERVAL(select_setup_array);
  426. unsigned long timeout = tsv ?
  427. tsv->tv_sec * 1000000ULL + tsv->tv_usec :
  428. POLL_NOTIMEOUT;
  429. int ret = __do_poll(npolls, polls, timeout);
  430. SAVE_PROFILE_INTERVAL(select_do_poll);
  431. if (ret < 0)
  432. goto out;
  433. ret = 0;
  434. if (readfds)
  435. __FD_ZERO(readfds);
  436. if (writefds)
  437. __FD_ZERO(writefds);
  438. if (errorfds)
  439. __FD_ZERO(errorfds);
  440. SAVE_PROFILE_INTERVAL(select_fd_zero);
  441. for (int i = 0 ; i < npolls ; i++) {
  442. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  443. __FD_SET(polls[i].fd, readfds);
  444. ret++;
  445. }
  446. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  447. __FD_SET(polls[i].fd, writefds);
  448. ret++;
  449. }
  450. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  451. __FD_SET(polls[i].fd, errorfds);
  452. ret++;
  453. }
  454. }
  455. SAVE_PROFILE_INTERVAL(select_fd_sets);
  456. out:
  457. __try_free(cur, polls);
  458. SAVE_PROFILE_INTERVAL(select_try_free);
  459. return ret;
  460. }
  461. int shim_do_pselect6 (int nfds, fd_set * readfds, fd_set * writefds,
  462. fd_set * errorfds, const struct __kernel_timespec * tsp,
  463. const __sigset_t * sigmask)
  464. {
  465. if (!nfds)
  466. return tsp ? shim_do_nanosleep (tsp, NULL) : -EINVAL;
  467. struct shim_thread * cur = get_cur_thread();
  468. struct poll_handle * polls =
  469. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  470. int npolls = 0;
  471. for (int fd = 0 ; fd < nfds ; fd++) {
  472. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  473. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  474. if (!do_r && !do_w)
  475. continue;
  476. polls[npolls].fd = fd;
  477. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  478. npolls++;
  479. }
  480. unsigned long timeout = tsp ?
  481. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  482. POLL_NOTIMEOUT;
  483. int ret = __do_poll(npolls, polls, timeout);
  484. if (ret < 0)
  485. goto out;
  486. ret = 0;
  487. if (readfds)
  488. __FD_ZERO(readfds);
  489. if (writefds)
  490. __FD_ZERO(writefds);
  491. if (errorfds)
  492. __FD_ZERO(errorfds);
  493. for (int i = 0 ; i < npolls ; i++) {
  494. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  495. __FD_SET(polls[i].fd, readfds);
  496. ret++;
  497. }
  498. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  499. __FD_SET(polls[i].fd, writefds);
  500. ret++;
  501. }
  502. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  503. __FD_SET(polls[i].fd, errorfds);
  504. ret++;
  505. }
  506. }
  507. out:
  508. __try_free(cur, polls);
  509. return ret;
  510. }