sgx_graphene.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 OSCAR lab, Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU General Public License for more details.
  13. You should have received a copy of the GNU General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. #include <pal.h>
  16. #include <pal_error.h>
  17. #include <linux_list.h>
  18. #include <atomic.h>
  19. #include <linux/futex.h>
  20. #include <errno.h>
  21. #include "sgx_internal.h"
  22. #define MUTEX_SPINLOCK_TIMES 20
  23. static int _DkMutexLock (struct mutex_handle * mut)
  24. {
  25. int i, c = 0;
  26. int ret;
  27. struct atomic_int * m = &mut->value;
  28. /* Spin and try to take lock */
  29. for (i = 0; i < MUTEX_SPINLOCK_TIMES; i++) {
  30. c = atomic_dec_and_test(m);
  31. if (c)
  32. goto success;
  33. cpu_relax();
  34. }
  35. /* The lock is now contended */
  36. while (!c) {
  37. int val = atomic_read(m);
  38. if (val == 1)
  39. goto again;
  40. ret = INLINE_SYSCALL(futex, 6, m, FUTEX_WAIT, val, NULL, NULL, 0);
  41. if (IS_ERR(ret) &&
  42. ERRNO(ret) != EWOULDBLOCK &&
  43. ERRNO(ret) != EINTR) {
  44. ret = -PAL_ERROR_DENIED;
  45. goto out;
  46. }
  47. again:
  48. /* Upon wakeup, we still need to check whether mutex is unlocked or
  49. * someone else took it.
  50. * If c==0 upon return from xchg (i.e., the older value of m==0), we
  51. * will exit the loop. Else, we sleep again (through a futex call).
  52. */
  53. c = atomic_dec_and_test(m);
  54. }
  55. success:
  56. ret = 0;
  57. out:
  58. return ret;
  59. }
  60. static int _DkMutexUnlock (struct mutex_handle * mut)
  61. {
  62. int ret = 0;
  63. int must_wake = 0;
  64. struct atomic_int * m = &mut->value;
  65. /* Unlock, and if not contended then exit. */
  66. if (atomic_read(m) < 0)
  67. must_wake = 1;
  68. atomic_set(m, 1);
  69. if (must_wake) {
  70. /* We need to wake someone up */
  71. ret = INLINE_SYSCALL(futex, 6, m, FUTEX_WAKE, 1, NULL, NULL, 0);
  72. }
  73. if (IS_ERR(ret)) {
  74. ret = -PAL_ERROR_TRYAGAIN;
  75. goto out;
  76. }
  77. ret = 0;
  78. out:
  79. return ret;
  80. }
  81. static struct mutex_handle slabmgr_lock;
  82. static void * untrusted_slabmgr = NULL;
  83. #define system_lock() _DkMutexLock(&slabmgr_lock)
  84. #define system_unlock() _DkMutexUnlock(&slabmgr_lock)
  85. #define PAGE_SIZE (pagesize)
  86. #define STARTUP_SIZE 8
  87. static inline void * __malloc (int size)
  88. {
  89. void * addr = NULL;
  90. addr = (void *) INLINE_SYSCALL(mmap, 6, NULL, size,
  91. PROT_READ | PROT_WRITE,
  92. MAP_PRIVATE | MAP_ANONYMOUS,
  93. -1, 0);
  94. if (IS_ERR_P(addr))
  95. return NULL;
  96. return addr;
  97. }
  98. #define system_malloc(size) __malloc(size)
  99. static inline void __free (void * addr, int size)
  100. {
  101. INLINE_SYSCALL(munmap, 2, addr, size);
  102. }
  103. #define system_free(addr, size) __free(addr, size)
  104. #include "slabmgr.h"
  105. int init_untrusted_allocator (struct pal_sec * pal_sec)
  106. {
  107. if (!untrusted_slabmgr) {
  108. untrusted_slabmgr = create_slab_mgr();
  109. if (!untrusted_slabmgr)
  110. return -PAL_ERROR_NOMEM;
  111. }
  112. pal_sec->untrusted_allocator.alignment = pagesize;
  113. pal_sec->untrusted_allocator.slabmgr = untrusted_slabmgr;
  114. pal_sec->untrusted_allocator.lock = &slabmgr_lock;
  115. return 0;
  116. }
  117. void * malloc_untrusted (int size)
  118. {
  119. void * ptr = slab_alloc((SLAB_MGR) untrusted_slabmgr, size);
  120. /* the slab manger will always remain at least one byte of padding,
  121. so we can feel free to assign an offset at the byte prior to
  122. the pointer */
  123. if (ptr)
  124. *(((unsigned char *) ptr) - 1) = 0;
  125. return ptr;
  126. }
  127. void free_untrusted (void * ptr)
  128. {
  129. ptr -= *(((unsigned char *) ptr) - 1);
  130. slab_free((SLAB_MGR) untrusted_slabmgr, ptr);
  131. }
  132. int _DkEventSet (PAL_HANDLE event, int wakeup)
  133. {
  134. int ret = 0;
  135. if (event->event.isnotification) {
  136. // Leave it signaled, wake all
  137. if (atomic_cmpxchg(&event->event.signaled, 0, 1) == 0) {
  138. int nwaiters = atomic_read(&event->event.nwaiters);
  139. if (nwaiters) {
  140. if (wakeup != -1 && nwaiters > wakeup)
  141. nwaiters = wakeup;
  142. ret = INLINE_SYSCALL(futex, 6, &event->event.signaled,
  143. FUTEX_WAKE, nwaiters, NULL, NULL, 0);
  144. if (IS_ERR(ret))
  145. atomic_set(&event->event.signaled, 0);
  146. }
  147. }
  148. } else {
  149. // Only one thread wakes up, leave unsignaled
  150. ret = INLINE_SYSCALL(futex, 6, &event->event.signaled, FUTEX_WAKE, 1,
  151. NULL, NULL, 0);
  152. }
  153. return IS_ERR(ret) ? PAL_ERROR_TRYAGAIN : ret;
  154. }
  155. int _DkEventWait (PAL_HANDLE event)
  156. {
  157. int ret = 0;
  158. if (!event->event.isnotification || !atomic_read(&event->event.signaled)) {
  159. atomic_inc(&event->event.nwaiters);
  160. do {
  161. ret = INLINE_SYSCALL(futex, 6, &event->event.signaled, FUTEX_WAIT,
  162. 0, NULL, NULL, 0);
  163. if (IS_ERR(ret)) {
  164. if (ERRNO(ret) == EWOULDBLOCK) {
  165. ret = 0;
  166. } else {
  167. ret = -PAL_ERROR_DENIED;
  168. break;
  169. }
  170. }
  171. } while (event->event.isnotification &&
  172. !atomic_read(&event->event.signaled));
  173. atomic_dec(&event->event.nwaiters);
  174. }
  175. return ret;
  176. }
  177. #define PRINTBUF_SIZE 256
  178. struct printbuf {
  179. int idx; // current buffer index
  180. int cnt; // total bytes printed so far
  181. char buf[PRINTBUF_SIZE];
  182. };
  183. static void
  184. fputch(void * f, int ch, struct printbuf * b)
  185. {
  186. b->buf[b->idx++] = ch;
  187. if (b->idx == PRINTBUF_SIZE-1) {
  188. INLINE_SYSCALL(write, 3, 2, b->buf, b->idx);
  189. b->idx = 0;
  190. }
  191. b->cnt++;
  192. }
  193. static int
  194. vprintf(const char * fmt, va_list *ap)
  195. {
  196. struct printbuf b;
  197. b.idx = 0;
  198. b.cnt = 0;
  199. vfprintfmt((void *) &fputch, NULL, &b, fmt, ap);
  200. INLINE_SYSCALL(write, 3, 2, b.buf, b.idx);
  201. return b.cnt;
  202. }
  203. int
  204. pal_printf(const char * fmt, ...)
  205. {
  206. va_list ap;
  207. int cnt;
  208. va_start(ap, fmt);
  209. cnt = vprintf(fmt, &ap);
  210. va_end(ap);
  211. return cnt;
  212. }