shim_poll.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_poll.c
  17. *
  18. * Implementation of system call "poll", "ppoll", "select" and "pselect6".
  19. */
  20. #include <shim_internal.h>
  21. #include <shim_table.h>
  22. #include <shim_utils.h>
  23. #include <shim_thread.h>
  24. #include <shim_handle.h>
  25. #include <shim_fs.h>
  26. #include <shim_profile.h>
  27. #include <pal.h>
  28. #include <pal_error.h>
  29. #include <linux_list.h>
  30. #include <errno.h>
  31. #include <linux/fcntl.h>
  32. void __attribute__ ((noreturn))
  33. fortify_fail (const char *msg)
  34. {
  35. /* The loop is added only to keep gcc happy. */
  36. while (1)
  37. debug("*** %s ***\n", msg);
  38. }
  39. void __attribute__ ((noreturn))
  40. chk_fail (void)
  41. {
  42. fortify_fail ("buffer overflow detected");
  43. }
  44. static inline __attribute__((always_inline))
  45. void * __try_alloca (struct shim_thread * cur, int size)
  46. {
  47. if (!size)
  48. return NULL;
  49. if (check_stack_size(cur, size))
  50. return __alloca(size);
  51. else
  52. return malloc(size);
  53. }
  54. static inline __attribute__((always_inline))
  55. void __try_free (struct shim_thread * cur, void * mem)
  56. {
  57. if (mem && !check_on_stack(cur, mem))
  58. free(mem);
  59. }
  60. DEFINE_PROFILE_CATAGORY(__do_poll, select);
  61. DEFINE_PROFILE_INTERVAL(do_poll_get_handle, __do_poll);
  62. DEFINE_PROFILE_INTERVAL(do_poll_search_repeat, __do_poll);
  63. DEFINE_PROFILE_INTERVAL(do_poll_set_bookkeeping, __do_poll);
  64. DEFINE_PROFILE_INTERVAL(do_poll_check_accmode, __do_poll);
  65. DEFINE_PROFILE_INTERVAL(do_poll_vfs_polling, __do_poll);
  66. DEFINE_PROFILE_INTERVAL(do_poll_update_bookkeeping, __do_poll);
  67. DEFINE_PROFILE_INTERVAL(do_poll_first_loop, __do_poll);
  68. DEFINE_PROFILE_INTERVAL(do_poll_second_loop, __do_poll);
  69. DEFINE_PROFILE_INTERVAL(do_poll_wait_any, __do_poll);
  70. DEFINE_PROFILE_INTERVAL(do_poll_wait_any_peek, __do_poll);
  71. DEFINE_PROFILE_INTERVAL(do_poll_third_loop, __do_poll);
  72. DEFINE_PROFILE_INTERVAL(do_poll_fourth_loop, __do_poll);
  73. #define DO_R 0001
  74. #define DO_W 0002
  75. #define KNOWN_R 0004
  76. #define KNOWN_W 0010
  77. #define RET_R 0020
  78. #define RET_W 0040
  79. #define RET_E 0100
  80. #define POLL_R 0200
  81. #define POLL_W 0400
  82. struct poll_handle {
  83. unsigned short flags;
  84. FDTYPE fd;
  85. struct shim_handle * handle;
  86. struct poll_handle * next;
  87. struct poll_handle * children;
  88. } __attribute__((packed));
  89. #define POLL_NOTIMEOUT ((unsigned long) -1)
  90. static int __do_poll (int npolls, struct poll_handle * polls,
  91. unsigned long timeout)
  92. {
  93. struct shim_thread * cur = get_cur_thread();
  94. struct shim_handle_map * map = cur->handle_map;
  95. int npals = 0;
  96. bool has_r = false, has_known = false;
  97. struct poll_handle * polling = NULL;
  98. struct poll_handle * p, ** n, * q;
  99. PAL_HANDLE * pals = NULL;
  100. #ifdef PROFILE
  101. unsigned long begin_time = GET_PROFILE_INTERVAL();
  102. BEGIN_PROFILE_INTERVAL_SET(begin_time);
  103. #endif
  104. lock(map->lock);
  105. for (p = polls ; p < &polls[npolls] ; p++) {
  106. bool do_r = p->flags & DO_R;
  107. bool do_w = p->flags & DO_W;
  108. if (!do_r && !do_w) {
  109. no_op:
  110. p->flags = 0;
  111. p->handle = NULL;
  112. UPDATE_PROFILE_INTERVAL();
  113. continue;
  114. }
  115. struct shim_handle * hdl = __get_fd_handle(p->fd, NULL, map);
  116. if (!hdl->fs || !hdl->fs->fs_ops)
  117. goto no_op;
  118. SAVE_PROFILE_INTERVAL(do_poll_get_handle);
  119. /* search for a repeated entry */
  120. struct poll_handle * rep = polling;
  121. for ( ; rep ; rep = rep->next)
  122. if (rep->handle == hdl)
  123. break;
  124. SAVE_PROFILE_INTERVAL(do_poll_search_repeat);
  125. p->flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  126. p->handle = NULL;
  127. p->next = NULL;
  128. p->children = NULL;
  129. if (rep) {
  130. /* if there is repeated handles and we already know the
  131. result, let's skip them */
  132. if (rep->flags & (KNOWN_R|POLL_R)) {
  133. p->flags = rep->flags & (KNOWN_R|RET_R|RET_E|POLL_R);
  134. do_r = false;
  135. }
  136. if (rep->flags & (KNOWN_W|POLL_W)) {
  137. p->flags = rep->flags & (KNOWN_W|RET_W|RET_E|POLL_W);
  138. do_w = false;
  139. }
  140. p->next = rep->children;
  141. rep->children = p;
  142. if (!do_r && !do_w) {
  143. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  144. continue;
  145. }
  146. } else {
  147. get_handle(hdl);
  148. p->handle = hdl;
  149. p->next = polling;
  150. polling = p;
  151. }
  152. SAVE_PROFILE_INTERVAL(do_poll_set_bookkeeping);
  153. /* do the easiest check, check handle's access mode */
  154. if (do_r && !(hdl->acc_mode & MAY_READ)) {
  155. p->flags |= KNOWN_R;
  156. debug("fd %d known to be not readable\n", p->fd);
  157. do_r = false;
  158. }
  159. if (do_w && !(hdl->acc_mode & MAY_WRITE)) {
  160. p->flags |= KNOWN_W;
  161. debug("fd %d known to be not writeable\n", p->fd);
  162. do_w = false;
  163. }
  164. SAVE_PROFILE_INTERVAL(do_poll_check_accmode);
  165. if (!do_r && !do_w)
  166. goto done_finding;
  167. /* if fs provides a poll operator, let's try it. */
  168. if (hdl->fs->fs_ops->poll) {
  169. int need_poll = 0;
  170. if (do_r && !(p->flags & POLL_R))
  171. need_poll |= FS_POLL_RD;
  172. if (do_w && !(p->flags & POLL_W))
  173. need_poll |= FS_POLL_WR;
  174. if (need_poll) {
  175. int polled = hdl->fs->fs_ops->poll(hdl, need_poll);
  176. if (polled != -EAGAIN) {
  177. if (polled & FS_POLL_ER) {
  178. debug("fd %d known to have error\n", p->fd);
  179. p->flags |= KNOWN_R|KNOWN_W|RET_E;
  180. }
  181. if (do_r && (polled & FS_POLL_RD)) {
  182. debug("fd %d known to be readable\n", p->fd);
  183. p->flags |= KNOWN_R|RET_R;
  184. do_r = false;
  185. }
  186. if (do_w && (polled & FS_POLL_WR)) {
  187. debug("fd %d known to be writeable\n", p->fd);
  188. p->flags |= KNOWN_W|RET_W;
  189. do_w = false;
  190. }
  191. }
  192. }
  193. SAVE_PROFILE_INTERVAL(do_poll_vfs_polling);
  194. if (!do_r && !do_w)
  195. goto done_finding;
  196. }
  197. struct poll_handle * to_poll = rep ? : p;
  198. if (!(to_poll->flags & (POLL_R|POLL_W))) {
  199. if (!hdl->pal_handle) {
  200. p->flags |= (KNOWN_R|KNOWN_W|RET_E);
  201. do_r = do_w = false;
  202. goto done_finding;
  203. }
  204. debug("polling fd %d\n", to_poll->fd);
  205. npals++;
  206. }
  207. to_poll->flags |= (do_r ? POLL_R : 0)|(do_w ? POLL_W : 0);
  208. done_finding:
  209. /* feedback the new knowledge of repeated handles */
  210. if (rep)
  211. rep->flags |= p->flags &
  212. (KNOWN_R|KNOWN_W|RET_R|RET_W|RET_E|POLL_R|POLL_W);
  213. if (do_r)
  214. has_r = true;
  215. if (p->flags & (RET_R|RET_W|RET_E))
  216. has_known = true;
  217. SAVE_PROFILE_INTERVAL(do_poll_update_bookkeeping);
  218. }
  219. unlock(cur->handle_map->lock);
  220. SAVE_PROFILE_INTERVAL_SINCE(do_poll_first_loop, begin_time);
  221. if (!npals)
  222. goto done_polling;
  223. pals = __try_alloca(cur, sizeof(PAL_HANDLE) * npals);
  224. npals = 0;
  225. for (n = &polling, p = polling ; p ; n = &p->next, p = p->next) {
  226. if (!(p->flags & (POLL_R|POLL_W))) {
  227. *n = p->next;
  228. put_handle(p->handle);
  229. p->handle = NULL;
  230. continue;
  231. }
  232. pals[npals++] = p->handle->pal_handle;
  233. }
  234. SAVE_PROFILE_INTERVAL(do_poll_second_loop);
  235. while (npals) {
  236. int pal_timeout = (has_r && !has_known) ? timeout : 0;
  237. PAL_HANDLE polled = DkObjectsWaitAny(npals, pals, pal_timeout);
  238. if (pal_timeout)
  239. SAVE_PROFILE_INTERVAL(do_poll_wait_any);
  240. else
  241. SAVE_PROFILE_INTERVAL(do_poll_wait_any_peek);
  242. if (!polled)
  243. break;
  244. PAL_STREAM_ATTR attr;
  245. if (!DkStreamAttributesQuerybyHandle(polled, &attr))
  246. break;
  247. for (n = &polling, p = polling ; p ; n = &p->next, p = p->next)
  248. if (p->handle->pal_handle == polled)
  249. break;
  250. if (!p)
  251. break;
  252. debug("handle %s is polled\n", qstrgetstr(&p->handle->uri));
  253. if (attr.disconnected) {
  254. debug("handle is polled to be disconnected\n");
  255. p->flags |= (KNOWN_R|KNOWN_W|RET_E);
  256. }
  257. if (attr.readable) {
  258. debug("handle is polled to be readable\n");
  259. p->flags |= (KNOWN_R|RET_R);
  260. }
  261. if (attr.writeable) {
  262. debug("handle is polled to be writeable\n");
  263. p->flags |= (KNOWN_W|RET_W);
  264. }
  265. for (q = p->children ; q ; q = q->next)
  266. q->flags |= p->flags & (KNOWN_R|KNOWN_W|RET_W|RET_R|RET_E);
  267. if ((p->flags & (POLL_R|KNOWN_R)) != (POLL_R|KNOWN_R) &&
  268. (p->flags & (POLL_W|KNOWN_W)) != (POLL_W|KNOWN_W))
  269. continue;
  270. has_known = true;
  271. *n = p->next;
  272. put_handle(p->handle);
  273. p->handle = NULL;
  274. int nskip = 0;
  275. for (int i = 0 ; i < npals ; i++)
  276. if (pals[i] == polled) {
  277. nskip = 1;
  278. } else if (nskip) {
  279. pals[i - nskip] = pals[i];
  280. }
  281. npals -= nskip;
  282. SAVE_PROFILE_INTERVAL(do_poll_third_loop);
  283. }
  284. done_polling:
  285. for (p = polling ; p ; p = p->next)
  286. put_handle(p->handle);
  287. SAVE_PROFILE_INTERVAL(do_poll_fourth_loop);
  288. if (pals)
  289. __try_free(cur, pals);
  290. return 0;
  291. }
  292. int shim_do_poll (struct pollfd * fds, nfds_t nfds, int timeout)
  293. {
  294. struct shim_thread * cur = get_cur_thread();
  295. struct poll_handle * polls =
  296. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  297. for (int i = 0 ; i < nfds ; i++) {
  298. polls[i].fd = fds[i].fd;
  299. polls[i].flags = 0;
  300. if (fds[i].events & (POLLIN|POLLRDNORM))
  301. polls[i].flags |= DO_R;
  302. if (fds[i].events & (POLLOUT|POLLWRNORM))
  303. polls[i].flags |= DO_W;
  304. }
  305. int ret = __do_poll(nfds, polls,
  306. timeout < 0 ? POLL_NOTIMEOUT : timeout * 1000ULL);
  307. if (ret < 0)
  308. goto out;
  309. ret = 0;
  310. for (int i = 0 ; i < nfds ; i++) {
  311. fds[i].revents = 0;
  312. if (polls[i].flags & RET_R)
  313. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  314. if (polls[i].flags & RET_W)
  315. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  316. if (polls[i].flags & RET_E)
  317. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  318. if (fds[i].revents)
  319. ret++;
  320. }
  321. out:
  322. __try_free(cur, polls);
  323. return ret;
  324. }
  325. int shim_do_ppoll (struct pollfd * fds, int nfds, struct timespec * tsp,
  326. const __sigset_t * sigmask, size_t sigsetsize)
  327. {
  328. struct shim_thread * cur = get_cur_thread();
  329. struct poll_handle * polls =
  330. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  331. for (int i = 0 ; i < nfds ; i++) {
  332. polls[i].fd = fds[i].fd;
  333. polls[i].flags = 0;
  334. if (fds[i].events & (POLLIN|POLLRDNORM))
  335. polls[i].flags |= DO_R;
  336. if (fds[i].events & (POLLOUT|POLLWRNORM))
  337. polls[i].flags |= DO_W;
  338. }
  339. unsigned long timeout = tsp ?
  340. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  341. POLL_NOTIMEOUT;
  342. int ret = __do_poll(nfds, polls, timeout);
  343. if (ret < 0)
  344. goto out;
  345. ret = 0;
  346. for (int i = 0 ; i < nfds ; i++) {
  347. fds[i].revents = 0;
  348. if (polls[i].flags & RET_R)
  349. fds[i].revents |= (fds[i].events & (POLLIN|POLLRDNORM));
  350. if (polls[i].flags & RET_W)
  351. fds[i].revents |= (fds[i].events & (POLLOUT|POLLWRNORM));
  352. if (polls[i].flags & RET_E)
  353. fds[i].revents |= (fds[i].events & (POLLERR|POLLHUP));
  354. if (fds[i].revents)
  355. ret++;
  356. }
  357. out:
  358. __try_free(cur, polls);
  359. return ret;
  360. }
  361. typedef long int __fd_mask;
  362. #ifndef __NFDBITS
  363. #define __NFDBITS (8 * (int) sizeof (__fd_mask))
  364. #endif
  365. #ifndef __FDS_BITS
  366. #define __FDS_BITS(set) ((set)->fds_bits)
  367. #endif
  368. /* We don't use `memset' because this would require a prototype and
  369. the array isn't too big. */
  370. # define __FD_ZERO(set) \
  371. do { \
  372. unsigned int __i; \
  373. fd_set *__arr = (set); \
  374. for (__i = 0; __i < sizeof (fd_set) / sizeof (__fd_mask); ++__i) \
  375. __FDS_BITS (__arr)[__i] = 0; \
  376. } while (0)
  377. #define __FD_ELT(d) ((d) / __NFDBITS)
  378. #define __FD_MASK(d) ((__fd_mask) 1 << ((d) % __NFDBITS))
  379. #define __FD_SET(d, set) \
  380. ((void) (__FDS_BITS (set)[__FD_ELT (d)] |= __FD_MASK (d)))
  381. #define __FD_CLR(d, set) \
  382. ((void) (__FDS_BITS (set)[__FD_ELT (d)] &= ~__FD_MASK (d)))
  383. #define __FD_ISSET(d, set) \
  384. ((__FDS_BITS (set)[__FD_ELT (d)] & __FD_MASK (d)) != 0)
  385. DEFINE_PROFILE_CATAGORY(select, );
  386. DEFINE_PROFILE_INTERVAL(select_tryalloca_1, select);
  387. DEFINE_PROFILE_INTERVAL(select_setup_array, select);
  388. DEFINE_PROFILE_INTERVAL(select_do_poll, select);
  389. DEFINE_PROFILE_INTERVAL(select_fd_zero, select);
  390. DEFINE_PROFILE_INTERVAL(select_fd_sets, select);
  391. DEFINE_PROFILE_INTERVAL(select_try_free, select);
  392. int shim_do_select (int nfds, fd_set * readfds, fd_set * writefds,
  393. fd_set * errorfds, struct __kernel_timeval * tsv)
  394. {
  395. BEGIN_PROFILE_INTERVAL();
  396. if (!nfds) {
  397. if (!tsv)
  398. return -EINVAL;
  399. struct __kernel_timespec tsp;
  400. tsp.tv_sec = tsv->tv_sec;
  401. tsp.tv_nsec = tsv->tv_usec * 1000;
  402. return shim_do_nanosleep (&tsp, NULL);
  403. }
  404. struct shim_thread * cur = get_cur_thread();
  405. struct poll_handle * polls =
  406. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  407. int npolls = 0;
  408. SAVE_PROFILE_INTERVAL(select_tryalloca_1);
  409. for (int fd = 0 ; fd < nfds ; fd++) {
  410. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  411. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  412. if (!do_r && !do_w)
  413. continue;
  414. polls[npolls].fd = fd;
  415. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  416. npolls++;
  417. }
  418. SAVE_PROFILE_INTERVAL(select_setup_array);
  419. unsigned long timeout = tsv ?
  420. tsv->tv_sec * 1000000ULL + tsv->tv_usec :
  421. POLL_NOTIMEOUT;
  422. int ret = __do_poll(npolls, polls, timeout);
  423. SAVE_PROFILE_INTERVAL(select_do_poll);
  424. if (ret < 0)
  425. goto out;
  426. ret = 0;
  427. if (readfds)
  428. __FD_ZERO(readfds);
  429. if (writefds)
  430. __FD_ZERO(writefds);
  431. if (errorfds)
  432. __FD_ZERO(errorfds);
  433. SAVE_PROFILE_INTERVAL(select_fd_zero);
  434. for (int i = 0 ; i < npolls ; i++) {
  435. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  436. __FD_SET(polls[i].fd, readfds);
  437. ret++;
  438. }
  439. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  440. __FD_SET(polls[i].fd, writefds);
  441. ret++;
  442. }
  443. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  444. __FD_SET(polls[i].fd, errorfds);
  445. ret++;
  446. }
  447. }
  448. SAVE_PROFILE_INTERVAL(select_fd_sets);
  449. out:
  450. __try_free(cur, polls);
  451. SAVE_PROFILE_INTERVAL(select_try_free);
  452. return ret;
  453. }
  454. int shim_do_pselect6 (int nfds, fd_set * readfds, fd_set * writefds,
  455. fd_set * errorfds, const struct __kernel_timespec * tsp,
  456. const __sigset_t * sigmask)
  457. {
  458. if (!nfds)
  459. return tsp ? shim_do_nanosleep (tsp, NULL) : -EINVAL;
  460. struct shim_thread * cur = get_cur_thread();
  461. struct poll_handle * polls =
  462. __try_alloca(cur, sizeof(struct poll_handle) * nfds);
  463. int npolls = 0;
  464. for (int fd = 0 ; fd < nfds ; fd++) {
  465. bool do_r = (readfds && __FD_ISSET(fd, readfds));
  466. bool do_w = (writefds && __FD_ISSET(fd, writefds));
  467. if (!do_r && !do_w)
  468. continue;
  469. polls[npolls].fd = fd;
  470. polls[npolls].flags = (do_r ? DO_R : 0)|(do_w ? DO_W : 0);
  471. npolls++;
  472. }
  473. unsigned long timeout = tsp ?
  474. tsp->tv_sec * 1000000ULL + tsp->tv_nsec / 1000 :
  475. POLL_NOTIMEOUT;
  476. int ret = __do_poll(npolls, polls, timeout);
  477. if (ret < 0)
  478. goto out;
  479. ret = 0;
  480. if (readfds)
  481. __FD_ZERO(readfds);
  482. if (writefds)
  483. __FD_ZERO(writefds);
  484. if (errorfds)
  485. __FD_ZERO(errorfds);
  486. for (int i = 0 ; i < npolls ; i++) {
  487. if (readfds && ((polls[i].flags & (DO_R|RET_R)) == (DO_R|RET_R))) {
  488. __FD_SET(polls[i].fd, readfds);
  489. ret++;
  490. }
  491. if (writefds && ((polls[i].flags & (DO_W|RET_W)) == (DO_W|RET_W))) {
  492. __FD_SET(polls[i].fd, writefds);
  493. ret++;
  494. }
  495. if (errorfds && ((polls[i].flags & (DO_R|DO_W|RET_E)) > RET_E)) {
  496. __FD_SET(polls[i].fd, errorfds);
  497. ret++;
  498. }
  499. }
  500. out:
  501. __try_free(cur, polls);
  502. return ret;
  503. }