db_object.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * db_object.c
  17. *
  18. * This file contains APIs for closing or polling PAL handles.
  19. */
  20. #include "pal_defs.h"
  21. #include "pal_linux_defs.h"
  22. #include "pal.h"
  23. #include "pal_internal.h"
  24. #include "pal_linux.h"
  25. #include "pal_error.h"
  26. #include "pal_debug.h"
  27. #include "api.h"
  28. #include <linux/time.h>
  29. #include <linux/poll.h>
  30. #include <linux/wait.h>
  31. #include <atomic.h>
  32. #include <asm/errno.h>
  33. #define DEFAULT_QUANTUM 500
  34. /* internally to wait for one object. Also used as a shortcut to wait
  35. * on events and semaphores.
  36. *
  37. * Returns 0 on success, negative value on failure (e.g., -PAL_ERROR_TRYAGAIN)
  38. */
  39. static int _DkObjectWaitOne (PAL_HANDLE handle, uint64_t timeout)
  40. {
  41. /* only for all these handle which has a file descriptor, or
  42. a eventfd. events and semaphores will skip this part */
  43. if (HANDLE_HDR(handle)->flags & HAS_FDS) {
  44. struct timespec timeout_ts;
  45. if (timeout >= 0) {
  46. long sec = (unsigned long) timeout / 1000000;
  47. long microsec = (unsigned long) timeout - (sec * 1000000);
  48. timeout_ts.tv_sec = sec;
  49. timeout_ts.tv_nsec = microsec * 1000;
  50. }
  51. struct pollfd fds[MAX_FDS];
  52. int off[MAX_FDS];
  53. int nfds = 0;
  54. for (int i = 0 ; i < MAX_FDS ; i++) {
  55. int events = 0;
  56. if ((HANDLE_HDR(handle)->flags & RFD(i)) &&
  57. !(HANDLE_HDR(handle)->flags & ERROR(i)))
  58. events |= POLLIN;
  59. if ((HANDLE_HDR(handle)->flags & WFD(i)) &&
  60. !(HANDLE_HDR(handle)->flags & WRITEABLE(i)) &&
  61. !(HANDLE_HDR(handle)->flags & ERROR(i)))
  62. events |= POLLOUT;
  63. if (events) {
  64. fds[nfds].fd = handle->generic.fds[i];
  65. fds[nfds].events = events|POLLHUP|POLLERR;
  66. fds[nfds].revents = 0;
  67. off[nfds] = i;
  68. nfds++;
  69. }
  70. }
  71. if (!nfds)
  72. return -PAL_ERROR_TRYAGAIN;
  73. int ret = INLINE_SYSCALL(ppoll, 5, &fds, nfds,
  74. timeout >= 0 ? &timeout_ts : NULL,
  75. NULL, 0);
  76. if (IS_ERR(ret))
  77. switch (ERRNO(ret)) {
  78. case EINTR:
  79. case ERESTART:
  80. return -PAL_ERROR_INTERRUPTED;
  81. default:
  82. return unix_to_pal_error(ERRNO(ret));
  83. }
  84. if (!ret)
  85. return -PAL_ERROR_TRYAGAIN;
  86. for (int i = 0 ; i < nfds ; i++) {
  87. if (!fds[i].revents)
  88. continue;
  89. if (fds[i].revents & POLLOUT)
  90. HANDLE_HDR(handle)->flags |= WRITEABLE(off[i]);
  91. if (fds[i].revents & (POLLHUP|POLLERR))
  92. HANDLE_HDR(handle)->flags |= ERROR(off[i]);
  93. }
  94. return 0;
  95. }
  96. const struct handle_ops * ops = HANDLE_OPS(handle);
  97. if (!ops->wait)
  98. return -PAL_ERROR_NOTSUPPORT;
  99. return ops->wait(handle, timeout);
  100. }
  101. /* _DkObjectsWaitAny for internal use. The function wait for any of the handle
  102. in the handle array. timeout can be set for the wait. */
  103. int _DkObjectsWaitAny (int count, PAL_HANDLE * handleArray, uint64_t timeout,
  104. PAL_HANDLE * polled)
  105. {
  106. if (count <= 0)
  107. return 0;
  108. if (count == 1) {
  109. int rv = _DkObjectWaitOne(handleArray[0], timeout);
  110. if (rv == 0)
  111. *polled = handleArray[0];
  112. return rv;
  113. }
  114. int i, j, ret, maxfds = 0, nfds = 0;
  115. /* we are not gonna to allow any polling on muliple synchronous
  116. objects, doing this is simply violating the division of
  117. labor between PAL and library OS */
  118. for (i = 0 ; i < count ; i++) {
  119. PAL_HANDLE hdl = handleArray[i];
  120. if (!hdl)
  121. continue;
  122. if (!(HANDLE_HDR(hdl)->flags & HAS_FDS))
  123. return -PAL_ERROR_NOTSUPPORT;
  124. /* eliminate repeated entries */
  125. for (j = 0 ; j < i ; j++)
  126. if (hdl == handleArray[j])
  127. break;
  128. if (j == i) {
  129. for (j = 0 ; j < MAX_FDS ; j++)
  130. if (HANDLE_HDR(hdl)->flags & (RFD(j)|WFD(j)))
  131. maxfds++;
  132. }
  133. }
  134. struct pollfd * fds = __alloca(sizeof(struct pollfd) * maxfds);
  135. PAL_HANDLE * hdls = __alloca(sizeof(PAL_HANDLE) * maxfds);
  136. for (i = 0 ; i < count ; i++) {
  137. PAL_HANDLE hdl = handleArray[i];
  138. if (!hdl)
  139. continue;
  140. for (j = 0 ; j < i ; j++)
  141. if (hdl == handleArray[j])
  142. break;
  143. if (j < i)
  144. continue;
  145. for (j = 0 ; j < MAX_FDS ; j++) {
  146. int events = 0;
  147. if ((HANDLE_HDR(hdl)->flags & RFD(j)) &&
  148. !(HANDLE_HDR(hdl)->flags & ERROR(j)))
  149. events |= POLLIN;
  150. if ((HANDLE_HDR(hdl)->flags & WFD(j)) &&
  151. !(HANDLE_HDR(hdl)->flags & WRITEABLE(j)) &&
  152. !(HANDLE_HDR(hdl)->flags & ERROR(j)))
  153. events |= POLLOUT;
  154. if (events && hdl->generic.fds[j] != PAL_IDX_POISON) {
  155. fds[nfds].fd = hdl->generic.fds[j];
  156. fds[nfds].events = events|POLLHUP|POLLERR;
  157. fds[nfds].revents = 0;
  158. hdls[nfds] = hdl;
  159. nfds++;
  160. }
  161. }
  162. }
  163. if (!nfds)
  164. return -PAL_ERROR_TRYAGAIN;
  165. struct timespec timeout_ts;
  166. if (timeout >= 0) {
  167. long sec = (unsigned long) timeout / 1000000;
  168. long microsec = (unsigned long) timeout - (sec * 1000000);
  169. timeout_ts.tv_sec = sec;
  170. timeout_ts.tv_nsec = microsec * 1000;
  171. }
  172. ret = INLINE_SYSCALL(ppoll, 5, fds, nfds,
  173. timeout >= 0 ? &timeout_ts : NULL,
  174. NULL, 0);
  175. if (IS_ERR(ret))
  176. switch (ERRNO(ret)) {
  177. case EINTR:
  178. case ERESTART:
  179. return -PAL_ERROR_INTERRUPTED;
  180. default:
  181. return unix_to_pal_error(ERRNO(ret));
  182. }
  183. if (!ret)
  184. return -PAL_ERROR_TRYAGAIN;
  185. PAL_HANDLE polled_hdl = NULL;
  186. for (i = 0 ; i < nfds ; i++) {
  187. if (!fds[i].revents)
  188. continue;
  189. PAL_HANDLE hdl = hdls[i];
  190. if (polled_hdl) {
  191. if (hdl != polled_hdl)
  192. continue;
  193. } else {
  194. polled_hdl = hdl;
  195. }
  196. for (j = 0 ; j < MAX_FDS ; j++)
  197. if ((HANDLE_HDR(hdl)->flags & (RFD(j)|WFD(j))) &&
  198. hdl->generic.fds[j] == fds[i].fd)
  199. break;
  200. if (j == MAX_FDS)
  201. continue;
  202. if (fds[i].revents & POLLOUT)
  203. HANDLE_HDR(hdl)->flags |= WRITEABLE(j);
  204. if (fds[i].revents & (POLLHUP|POLLERR))
  205. HANDLE_HDR(hdl)->flags |= ERROR(j);
  206. }
  207. *polled = polled_hdl;
  208. return polled_hdl ? 0 : -PAL_ERROR_TRYAGAIN;
  209. }
  210. #if TRACE_HEAP_LEAK == 1
  211. PAL_HANDLE heap_alloc_head;
  212. PAL_LOCK heap_alloc_trace_lock = LOCK_INIT;
  213. HEAP_ALLOC_RECORD * collect_heap_alloc_records (PAL_NUM max_records)
  214. {
  215. HEAP_ALLOC_RECORD * records =
  216. malloc(sizeof(HEAP_ALLOC_RECORD) * max_records);
  217. if (!records)
  218. return NULL;
  219. memset(records, 0, sizeof(HEAP_ALLOC_RECORD) * max_records);
  220. _DkInternalLock(&heap_alloc_trace_lock);
  221. PAL_HANDLE ptr = heap_alloc_head;
  222. int nrecords = 0, i;
  223. for (; ptr ; ptr = ptr->hdr.heap_trace.next) {
  224. assert(!ptr->hdr.heap_trace.next ||
  225. ptr->hdr.heap_trace.next->hdr.heap_trace.pprev ==
  226. &ptr->hdr.heap_trace.next);
  227. for (i = 0 ; i < nrecords ; i++)
  228. if (ptr->hdr.heap_trace.caller == records[i].caller) {
  229. records[i].count++;
  230. break;
  231. }
  232. if (i == nrecords) {
  233. if (nrecords == max_records) break;
  234. records[nrecords].caller = ptr->hdr.heap_trace.caller;
  235. records[nrecords].count = 1;
  236. nrecords++;
  237. }
  238. }
  239. _DkInternalUnlock(&heap_alloc_trace_lock);
  240. return records;
  241. }
  242. #endif /* TRACE_HEAP_LEAK == 0 */