shim_poll.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_poll.c
  15. *
  16. * Implementation of system call "poll", "ppoll", "select" and "pselect6".
  17. */
  18. #include <shim_internal.h>
  19. #include <shim_table.h>
  20. #include <shim_utils.h>
  21. #include <shim_thread.h>
  22. #include <shim_handle.h>
  23. #include <shim_fs.h>
  24. #include <shim_profile.h>
  25. #include <pal.h>
  26. #include <pal_error.h>
  27. #include <list.h>
  28. #include <errno.h>
  29. #include <linux/fcntl.h>
  30. noreturn void
  31. fortify_fail (const char *msg)
  32. {
  33. /* The loop is added only to keep gcc happy. */
  34. while (1)
  35. debug("*** %s ***\n", msg);
  36. }
  37. noreturn void
  38. chk_fail (void)
  39. {
  40. fortify_fail ("buffer overflow detected");
  41. }
  42. static inline __attribute__((always_inline))
  43. void * __try_alloca (struct shim_thread * cur, int size)
  44. {
  45. if (!size)
  46. return NULL;
  47. if (check_stack_size(cur, size))
  48. return __alloca(size);
  49. else
  50. return malloc(size);
  51. }
  52. static inline __attribute__((always_inline))
  53. void __try_free (struct shim_thread * cur, void * mem)
  54. {
  55. if (mem && !check_on_stack(cur, mem))
  56. free(mem);
  57. }
  58. DEFINE_PROFILE_CATEGORY(__do_poll, select);
  59. DEFINE_PROFILE_INTERVAL(do_poll_get_handle, __do_poll);
  60. DEFINE_PROFILE_INTERVAL(do_poll_search_repeat, __do_poll);
  61. DEFINE_PROFILE_INTERVAL(do_poll_set_bookkeeping, __do_poll);
  62. DEFINE_PROFILE_INTERVAL(do_poll_check_accmode, __do_poll);
  63. DEFINE_PROFILE_INTERVAL(do_poll_vfs_polling, __do_poll);
  64. DEFINE_PROFILE_INTERVAL(do_poll_update_bookkeeping, __do_poll);
  65. DEFINE_PROFILE_INTERVAL(do_poll_first_loop, __do_poll);
  66. DEFINE_PROFILE_INTERVAL(do_poll_second_loop, __do_poll);
  67. DEFINE_PROFILE_INTERVAL(do_poll_wait_any, __do_poll);
  68. DEFINE_PROFILE_INTERVAL(do_poll_wait_any_peek, __do_poll);
  69. DEFINE_PROFILE_INTERVAL(do_poll_third_loop, __do_poll);
  70. DEFINE_PROFILE_INTERVAL(do_poll_fourth_loop, __do_poll);
  71. #define DO_R 0001
  72. #define DO_W 0002
  73. #define KNOWN_R 0004
  74. #define KNOWN_W 0010
  75. #define RET_R 0020
  76. #define RET_W 0040
  77. #define RET_E 0100
  78. #define POLL_R 0200
  79. #define POLL_W 0400
  80. struct poll_handle {
  81. unsigned short flags;
  82. FDTYPE fd;
  83. struct shim_handle * handle;
  84. struct poll_handle * next;
  85. struct poll_handle * children;
  86. } __attribute__((packed));
  87. #define POLL_NOTIMEOUT ((unsigned long)-1)
  88. static int __do_poll (int npolls, struct poll_handle * polls,
  89. unsigned long timeout)
  90. {
  91. struct shim_thread * cur = get_cur_thread();
  92. struct shim_handle_map * map = cur->handle_map;
  93. int npals = 0;
  94. bool has_r = false, has_known = false;
  95. struct poll_handle * polling = NULL;
  96. struct poll_handle * p, ** n, * q;
  97. PAL_HANDLE * pals = NULL;
  98. int ret = 0;
  99. #ifdef PROFILE
  100. unsigned long begin_time = GET_PROFILE_INTERVAL();
  101. BEGIN_PROFILE_INTERVAL_SET(begin_time);
  102. #endif
  103. lock(&map->lock);
  104. for (p = polls ; p < polls + npolls ; p++) {
  105. bool do_r = p->flags & DO_R;
  106. bool do_w = p->flags & DO_W;
  107. if (!do_r && !do_w) {
  108. no_op:
  109. p->flags = 0;
  110. p->handle = NULL;
  111. UPDATE_PROFILE_INTERVAL();
  112. continue;
  113. }
  114. struct shim_handle * hdl = __get_fd_handle(p->fd, NULL, map);
  115. if (!hdl->fs || !hdl->fs->fs_ops)
  116. goto no_op;
  117. SAVE_PROFILE_INTERVAL(do_poll_get_handle);
  118. /* search for a repeated entry */
  119. struct poll_handle * rep = polling;
  120. for ( ; rep ; rep = rep->next)
  121. if (rep->handle == hdl)
  122. break;
  123. SAVE_PROFILE_INTERVAL(do_poll_search_repeat);
  124. p->flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  125. p->handle = NULL;
  126. p->next = NULL;
  127. p->children = NULL;
  128. if (rep) {
  129. /* if there is repeated handles and we already know the
  130. result, let's skip them */
  131. if (rep->flags & (KNOWN_R|POLL_R)) {
  132. p->flags = rep->flags & (KNOWN_R|RET_R|RET_E|POLL_R);
  133. do_r = false;
  134. }
  135. if (rep->flags & (KNOWN_W|POLL_W)) {
  136. p->flags = rep->flags & (KNOWN_W|RET_W|RET_E|POLL_W);
  137. do_w = false;
  138. }
  139. p->next = rep->children;
  140. rep->children = p;
  141. if (!do_r && !do_w) {
  142. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  143. continue;
  144. }
  145. } else {
  146. get_handle(hdl);
  147. p->handle = hdl;
  148. p->next = polling;
  149. polling = p;
  150. }
  151. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  152. /* do the easiest check, check handle's access mode */
  153. if (do_r && !(hdl->acc_mode & MAY_READ)) {
  154. p->flags |= KNOWN_R;
  155. debug("fd %d known to be not readable\n", p->fd);
  156. do_r = false;
  157. }
  158. if (do_w && !(hdl->acc_mode & MAY_WRITE)) {
  159. p->flags |= KNOWN_W;
  160. debug("fd %d known to be not writable\n", p->fd);
  161. do_w = false;
  162. }
  163. SAVE_PROFILE_INTERVAL(do_poll_check_accmode);
  164. if (!do_r && !do_w)
  165. goto done_finding;
  166. /* if fs provides a poll operator, let's try it. */
  167. if (hdl->fs->fs_ops->poll) {
  168. int need_poll = 0;
  169. if (do_r && !(p->flags & POLL_R))
  170. need_poll |= FS_POLL_RD;
  171. if (do_w && !(p->flags & POLL_W))
  172. need_poll |= FS_POLL_WR;
  173. if (need_poll) {
  174. int polled = hdl->fs->fs_ops->poll(hdl, need_poll);
  175. if (polled < 0) {
  176. if (polled != -EAGAIN) {
  177. unlock(&map->lock);
  178. ret = polled;
  179. goto done_polling;
  180. }
  181. } else {
  182. if (polled & FS_POLL_ER) {
  183. debug("fd %d known to have error\n", p->fd);
  184. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  185. do_r = do_w = false;
  186. }
  187. if ((polled & FS_POLL_RD)) {
  188. debug("fd %d known to be readable\n", p->fd);
  189. p->flags |= KNOWN_R|RET_R;
  190. do_r = false;
  191. }
  192. if (polled & FS_POLL_WR) {
  193. debug("fd %d known to be writable\n", p->fd);
  194. p->flags |= KNOWN_W|RET_W;
  195. do_w = false;
  196. }
  197. }
  198. }
  199. SAVE_PROFILE_INTERVAL(do_poll_vfs_polling);
  200. if (!do_r && !do_w)
  201. goto done_finding;
  202. }
  203. struct poll_handle * to_poll = rep ? : p;
  204. if (!(to_poll->flags & (POLL_R|POLL_W))) {
  205. if (!hdl->pal_handle) {
  206. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  207. do_r = do_w = false;
  208. goto done_finding;
  209. }
  210. debug("polling fd %d\n", to_poll->fd);
  211. npals++;
  212. }
  213. to_poll->flags |= (do_r ? POLL_R : 0)|(do_w ? POLL_W : 0);
  214. done_finding:
  215. /* feedback the new knowledge of repeated handles */
  216. if (rep)
  217. rep->flags |= p->flags &
  218. (KNOWN_R|KNOWN_W|RET_R|RET_W|RET_E|POLL_R|POLL_W);
  219. if (do_r)
  220. has_r = true;
  221. if (p->flags & (RET_R|RET_W|RET_E))
  222. has_known = true;
  223. SAVE_PROFILE_INTERVAL(do_poll_update_bookkeeping);
  224. }
  225. unlock(&map->lock);
  226. SAVE_PROFILE_INTERVAL_SINCE(do_poll_first_loop, begin_time);
  227. if (!npals) {
  228. ret = 0;
  229. goto done_polling;
  230. }
  231. pals = __try_alloca(cur, sizeof(PAL_HANDLE) * npals);
  232. npals = 0;
  233. n = &polling;
  234. for (p = polling ; p ; p = p->next) {
  235. assert(p->handle);
  236. if (!(p->flags & (POLL_R|POLL_W))) {
  237. *n = p->next;
  238. put_handle(p->handle);
  239. p->handle = NULL;
  240. continue;
  241. }
  242. pals[npals++] = p->handle->pal_handle;
  243. n = &p->next;
  244. }
  245. SAVE_PROFILE_INTERVAL(do_poll_second_loop);
  246. while (npals) {
  247. int pal_timeout = (has_r && !has_known) ? timeout : 0;
  248. PAL_HANDLE polled = DkObjectsWaitAny(npals, pals, pal_timeout);
  249. if (pal_timeout)
  250. SAVE_PROFILE_INTERVAL(do_poll_wait_any);
  251. else
  252. SAVE_PROFILE_INTERVAL(do_poll_wait_any_peek);
  253. if (!polled)
  254. break;
  255. PAL_STREAM_ATTR attr;
  256. if (!DkStreamAttributesQueryByHandle(polled, &attr))
  257. break;
  258. n = &polling;
  259. for (p = polling ; p ; p = p->next) {
  260. if (p->handle->pal_handle == polled)
  261. break;
  262. n = &p->next;
  263. }
  264. if (!p)
  265. break;
  266. debug("handle %s is polled\n", qstrgetstr(&p->handle->uri));
  267. p->flags |= KNOWN_R|KNOWN_W;
  268. if (attr.disconnected) {
  269. debug("handle is polled to be disconnected\n");
  270. p->flags |= RET_E;
  271. }
  272. if (attr.readable) {
  273. debug("handle is polled to be readable\n");
  274. p->flags |= RET_R;
  275. }
  276. if (attr.writable) {
  277. debug("handle is polled to be writable\n");
  278. p->flags |= RET_W;
  279. }
  280. for (q = p->children ; q ; q = q->next)
  281. q->flags |= p->flags & (KNOWN_R|KNOWN_W|RET_W|RET_R|RET_E);
  282. if ((p->flags & (POLL_R|KNOWN_R)) != (POLL_R|KNOWN_R) &&
  283. (p->flags & (POLL_W|KNOWN_W)) != (POLL_W|KNOWN_W))
  284. continue;
  285. has_known = true;
  286. *n = p->next;
  287. put_handle(p->handle);
  288. p->handle = NULL;
  289. int nskip = 0;
  290. for (int i = 0 ; i < npals ; i++)
  291. if (pals[i] == polled) {
  292. nskip = 1;
  293. } else if (nskip) {
  294. pals[i - nskip] = pals[i];
  295. }
  296. npals -= nskip;
  297. SAVE_PROFILE_INTERVAL(do_poll_third_loop);
  298. }
  299. ret = 0;
  300. done_polling:
  301. for (p = polling ; p ; p = p->next)
  302. put_handle(p->handle);
  303. SAVE_PROFILE_INTERVAL(do_poll_fourth_loop);
  304. if (pals)
  305. __try_free(cur, pals);
  306. return ret;
  307. }
  308. int shim_do_poll (struct pollfd * fds, nfds_t nfds, int timeout)
  309. {
  310. struct shim_thread * cur = get_cur_thread();
  311. struct poll_handle * polls =
  312. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  313. for (int i = 0 ; i < nfds ; i++) {
  314. polls[i].fd = fds[i].fd;
  315. polls[i].flags = 0;
  316. if (fds[i].events & (POLLIN|POLLRDNORM))
  317. polls[i].flags |= DO_R;
  318. if (fds[i].events & (POLLOUT|POLLWRNORM))
  319. polls[i].flags |= DO_W;
  320. }
  321. int ret = __do_poll(nfds, polls,
  322. timeout < 0 ? POLL_NOTIMEOUT : timeout * 1000ULL);
  323. if (ret < 0)
  324. goto out;
  325. ret = 0;
  326. for (int i = 0 ; i < nfds ; i++) {
  327. fds[i].revents = 0;
  328. if (polls[i].flags & RET_R)
  329. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  330. if (polls[i].flags & RET_W)
  331. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  332. if (polls[i].flags & RET_E)
  333. fds[i].revents |= (POLLERR|POLLHUP);
  334. if (fds[i].revents)
  335. ret++;
  336. }
  337. out:
  338. __try_free(cur, polls);
  339. return ret;
  340. }
  341. int shim_do_ppoll (struct pollfd * fds, int nfds, struct timespec * tsp,
  342. const __sigset_t * sigmask, size_t sigsetsize)
  343. {
  344. struct shim_thread * cur = get_cur_thread();
  345. struct poll_handle * polls =
  346. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  347. for (int i = 0 ; i < nfds ; i++) {
  348. polls[i].fd = fds[i].fd;
  349. polls[i].flags = 0;
  350. if (fds[i].events & (POLLIN|POLLRDNORM))
  351. polls[i].flags |= DO_R;
  352. if (fds[i].events & (POLLOUT|POLLWRNORM))
  353. polls[i].flags |= DO_W;
  354. }
  355. unsigned long timeout = tsp ?
  356. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  357. POLL_NOTIMEOUT;
  358. int ret = __do_poll(nfds, polls, timeout);
  359. if (ret < 0)
  360. goto out;
  361. ret = 0;
  362. for (int i = 0 ; i < nfds ; i++) {
  363. fds[i].revents = 0;
  364. if (polls[i].flags & RET_R)
  365. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  366. if (polls[i].flags & RET_W)
  367. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  368. if (polls[i].flags & RET_E)
  369. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  370. if (fds[i].revents)
  371. ret++;
  372. }
  373. out:
  374. __try_free(cur, polls);
  375. return ret;
  376. }
  377. typedef long int __fd_mask;
  378. #ifndef __NFDBITS
  379. #define __NFDBITS (8 * (int)sizeof(__fd_mask))
  380. #endif
  381. #ifndef __FDS_BITS
  382. #define __FDS_BITS(set) ((set)->fds_bits)
  383. #endif
  384. /* We don't use `memset' because this would require a prototype and
  385. the array isn't too big. */
  386. # define __FD_ZERO(set) \
  387. do { \
  388. unsigned int __i; \
  389. fd_set *__arr = (set); \
  390. for (__i = 0; __i < sizeof (fd_set) / sizeof (__fd_mask); ++__i) \
  391. __FDS_BITS (__arr)[__i] = 0; \
  392. } while (0)
  393. #define __FD_ELT(d) ((d) / __NFDBITS)
  394. #define __FD_MASK(d) ((__fd_mask)1 << ((d) % __NFDBITS))
  395. #define __FD_SET(d, set) \
  396. ((void)(__FDS_BITS(set)[__FD_ELT(d)] |= __FD_MASK(d)))
  397. #define __FD_CLR(d, set) \
  398. ((void)(__FDS_BITS(set)[__FD_ELT(d)] &= ~__FD_MASK(d)))
  399. #define __FD_ISSET(d, set) \
  400. ((__FDS_BITS(set)[__FD_ELT(d)] & __FD_MASK(d)) != 0)
  401. DEFINE_PROFILE_CATEGORY(select, );
  402. DEFINE_PROFILE_INTERVAL(select_tryalloca_1, select);
  403. DEFINE_PROFILE_INTERVAL(select_setup_array, select);
  404. DEFINE_PROFILE_INTERVAL(select_do_poll, select);
  405. DEFINE_PROFILE_INTERVAL(select_fd_zero, select);
  406. DEFINE_PROFILE_INTERVAL(select_fd_sets, select);
  407. DEFINE_PROFILE_INTERVAL(select_try_free, select);
  408. int shim_do_select (int nfds, fd_set * readfds, fd_set * writefds,
  409. fd_set * errorfds, struct __kernel_timeval * tsv)
  410. {
  411. BEGIN_PROFILE_INTERVAL();
  412. if (!nfds) {
  413. if (!tsv)
  414. return -EINVAL;
  415. struct __kernel_timespec tsp;
  416. tsp.tv_sec = tsv->tv_sec;
  417. tsp.tv_nsec = tsv->tv_usec * 1000;
  418. return shim_do_nanosleep (&tsp, NULL);
  419. }
  420. struct shim_thread * cur = get_cur_thread();
  421. struct poll_handle * polls =
  422. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  423. int npolls = 0;
  424. SAVE_PROFILE_INTERVAL(select_tryalloca_1);
  425. for (int fd = 0 ; fd < nfds ; fd++) {
  426. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  427. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  428. if (!do_r && !do_w)
  429. continue;
  430. debug("poll fd %d %s%s\n", fd, do_r ? "R" : "", do_w ? "W" : "");
  431. polls[npolls].fd = fd;
  432. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  433. npolls++;
  434. }
  435. SAVE_PROFILE_INTERVAL(select_setup_array);
  436. unsigned long timeout = tsv ?
  437. tsv->tv_sec * 1000000ULL + tsv->tv_usec :
  438. POLL_NOTIMEOUT;
  439. int ret = __do_poll(npolls, polls, timeout);
  440. SAVE_PROFILE_INTERVAL(select_do_poll);
  441. if (ret < 0)
  442. goto out;
  443. ret = 0;
  444. if (readfds)
  445. __FD_ZERO(readfds);
  446. if (writefds)
  447. __FD_ZERO(writefds);
  448. if (errorfds)
  449. __FD_ZERO(errorfds);
  450. SAVE_PROFILE_INTERVAL(select_fd_zero);
  451. for (int i = 0 ; i < npolls ; i++) {
  452. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  453. __FD_SET(polls[i].fd, readfds);
  454. ret++;
  455. }
  456. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  457. __FD_SET(polls[i].fd, writefds);
  458. ret++;
  459. }
  460. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  461. __FD_SET(polls[i].fd, errorfds);
  462. ret++;
  463. }
  464. }
  465. SAVE_PROFILE_INTERVAL(select_fd_sets);
  466. out:
  467. __try_free(cur, polls);
  468. SAVE_PROFILE_INTERVAL(select_try_free);
  469. return ret;
  470. }
  471. int shim_do_pselect6 (int nfds, fd_set * readfds, fd_set * writefds,
  472. fd_set * errorfds, const struct __kernel_timespec * tsp,
  473. const __sigset_t * sigmask)
  474. {
  475. if (!nfds)
  476. return tsp ? shim_do_nanosleep (tsp, NULL) : -EINVAL;
  477. struct shim_thread * cur = get_cur_thread();
  478. struct poll_handle * polls =
  479. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  480. int npolls = 0;
  481. for (int fd = 0 ; fd < nfds ; fd++) {
  482. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  483. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  484. if (!do_r && !do_w)
  485. continue;
  486. polls[npolls].fd = fd;
  487. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  488. npolls++;
  489. }
  490. unsigned long timeout = tsp ?
  491. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  492. POLL_NOTIMEOUT;
  493. int ret = __do_poll(npolls, polls, timeout);
  494. if (ret < 0)
  495. goto out;
  496. ret = 0;
  497. if (readfds)
  498. __FD_ZERO(readfds);
  499. if (writefds)
  500. __FD_ZERO(writefds);
  501. if (errorfds)
  502. __FD_ZERO(errorfds);
  503. for (int i = 0 ; i < npolls ; i++) {
  504. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  505. __FD_SET(polls[i].fd, readfds);
  506. ret++;
  507. }
  508. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  509. __FD_SET(polls[i].fd, writefds);
  510. ret++;
  511. }
  512. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  513. __FD_SET(polls[i].fd, errorfds);
  514. ret++;
  515. }
  516. }
  517. out:
  518. __try_free(cur, polls);
  519. return ret;
  520. }