enclave_pages.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. #include <pal_linux.h>
  4. #include <pal_internal.h>
  5. #include <pal_security.h>
  6. #include <api.h>
  7. #include "enclave_pages.h"
  8. #include <list.h>
  9. #include <stdint.h>
  10. static unsigned long pgsz = PRESET_PAGESIZE;
  11. void * heap_base;
  12. static uint64_t heap_size;
  13. /* This list keeps heap_vma structures of free regions
  14. * organized in DESCENDING order.*/
  15. DEFINE_LIST(heap_vma);
  16. struct heap_vma {
  17. LIST_TYPE(heap_vma) list;
  18. void * top;
  19. void * bottom;
  20. };
  21. DEFINE_LISTP(heap_vma);
  22. static LISTP_TYPE(heap_vma) heap_vma_list = LISTP_INIT;
  23. PAL_LOCK heap_vma_lock = LOCK_INIT;
  24. struct atomic_int alloced_pages, max_alloced_pages;
  25. void init_pages (void)
  26. {
  27. heap_base = pal_sec.heap_min;
  28. heap_size = pal_sec.heap_max - pal_sec.heap_min;
  29. SGX_DBG(DBG_M, "available heap size: %llu M\n",
  30. (heap_size - pal_sec.exec_size) / 1024 / 1024);
  31. if (pal_sec.exec_size) {
  32. struct heap_vma * vma = malloc(sizeof(struct heap_vma));
  33. vma->top = pal_sec.exec_addr + pal_sec.exec_size;
  34. vma->bottom = pal_sec.exec_addr;
  35. INIT_LIST_HEAD(vma, list);
  36. listp_add(vma, &heap_vma_list, list);
  37. }
  38. }
  39. #define ASSERT_VMA 0
  40. static void assert_vma_list (void)
  41. {
  42. #if ASSERT_VMA == 1
  43. void * last_addr = heap_base + heap_size;
  44. struct heap_vma * vma;
  45. listp_for_each_entry(vma, &heap_vma_list, list) {
  46. SGX_DBG(DBG_M, "[%d] %p - %p\n", pal_sec.pid, vma->bottom, vma->top);
  47. if (last_addr < vma->top || vma->top <= vma->bottom) {
  48. SGX_DBG(DBG_E, "*** [%d] corrupted heap vma: %p - %p (last = %p) ***\n", pal_sec.pid, vma->bottom, vma->top, last_addr);
  49. #ifdef DEBUG
  50. if (pal_sec.in_gdb)
  51. asm volatile ("int $3" ::: "memory");
  52. #endif
  53. ocall_exit();
  54. }
  55. last_addr = vma->bottom;
  56. }
  57. #endif
  58. }
  59. // TODO: This function should be fixed to always either return exactly `addr` or
  60. // fail.
  61. void * get_reserved_pages(void * addr, uint64_t size)
  62. {
  63. if (!size)
  64. return NULL;
  65. SGX_DBG(DBG_M, "*** get_reserved_pages: heap_base %p, heap_size %llu, limit %p ***\n", heap_base, heap_size, heap_base + heap_size);
  66. if (addr >= heap_base + heap_size) {
  67. SGX_DBG(DBG_E, "*** allocating out of heap: %p ***\n", addr);
  68. return NULL;
  69. }
  70. if (size & (pgsz - 1))
  71. size = ((size + pgsz - 1) & ~(pgsz - 1));
  72. if ((uintptr_t) addr & (pgsz - 1))
  73. addr = (void *) ((uintptr_t) addr & ~(pgsz - 1));
  74. SGX_DBG(DBG_M, "allocate %d bytes at %p\n", size, addr);
  75. _DkInternalLock(&heap_vma_lock);
  76. struct heap_vma * prev = NULL, * next;
  77. struct heap_vma * vma;
  78. /* Allocating in the heap region. This loop searches the vma list to
  79. * find the first vma with a starting address lower than the requested
  80. * address. Recall that vmas are in descending order.
  81. *
  82. * If the very first vma matches, prev will be null.
  83. */
  84. if (addr && addr >= heap_base &&
  85. addr + size <= heap_base + heap_size) {
  86. listp_for_each_entry(vma, &heap_vma_list, list) {
  87. if (vma->bottom < addr)
  88. break;
  89. prev = vma;
  90. }
  91. goto allocated;
  92. }
  93. if (addr) {
  94. _DkInternalUnlock(&heap_vma_lock);
  95. return NULL;
  96. }
  97. void * avail_top = heap_base + heap_size;
  98. listp_for_each_entry(vma, &heap_vma_list, list) {
  99. if (avail_top - vma->top > size) {
  100. addr = avail_top - size;
  101. goto allocated;
  102. }
  103. prev = vma;
  104. avail_top = prev->bottom;
  105. }
  106. if (avail_top >= heap_base + size) {
  107. addr = avail_top - size;
  108. goto allocated;
  109. }
  110. _DkInternalUnlock(&heap_vma_lock);
  111. SGX_DBG(DBG_E, "*** Not enough space on the heap (requested = %llu) ***\n", size);
  112. asm volatile("int $3");
  113. return NULL;
  114. allocated:
  115. if (prev) {
  116. // If this is the last entry, don't wrap around
  117. if (prev->list.next == listp_first_entry(&heap_vma_list, struct heap_vma, list))
  118. next = NULL;
  119. else
  120. next = prev->list.next;
  121. } else {
  122. /* In this case, the list is empty, or
  123. * first vma starts at or below the allocation site.
  124. *
  125. * The next field will be used to merge vmas with the allocation, if
  126. * they overlap, until the vmas drop below the requested addr
  127. * (traversing in decreasing virtual address order)
  128. */
  129. next = listp_empty(&heap_vma_list) ? NULL :
  130. listp_first_entry(&heap_vma_list, struct heap_vma, list);
  131. }
  132. if (prev && next)
  133. SGX_DBG(DBG_M, "insert vma between %p-%p and %p-%p\n",
  134. next->bottom, next->top, prev->bottom, prev->top);
  135. else if (prev)
  136. SGX_DBG(DBG_M, "insert vma below %p-%p\n", prev->bottom, prev->top);
  137. else if (next)
  138. SGX_DBG(DBG_M, "insert vma above %p-%p\n", next->bottom, next->top);
  139. vma = NULL;
  140. while (prev) {
  141. struct heap_vma * prev_prev = NULL;
  142. if (prev->bottom > addr + size)
  143. break;
  144. /* This appears to be doing a reverse search; we should stop before we
  145. * wrap back to the last entry */
  146. if (prev->list.prev != listp_last_entry(&heap_vma_list, struct heap_vma, list))
  147. prev_prev = list_entry(prev->list.prev, struct heap_vma, list);
  148. if (!vma) {
  149. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", addr, addr + size,
  150. prev->bottom, prev->top);
  151. vma = prev;
  152. vma->top = (addr + size > vma->top) ? addr + size : vma->top;
  153. vma->bottom = addr;
  154. } else {
  155. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  156. prev->bottom, prev->top);
  157. vma->top = (prev->top > vma->top) ? prev->top : vma->top;
  158. listp_del(prev, &heap_vma_list,list);
  159. free(prev);
  160. }
  161. prev = prev_prev;
  162. }
  163. while (next) {
  164. struct heap_vma * next_next = NULL;
  165. if (next->top < addr)
  166. break;
  167. if (next->list.next != listp_first_entry(&heap_vma_list, struct heap_vma, list))
  168. next_next = list_entry(next->list.next, struct heap_vma, list);
  169. if (!vma) {
  170. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", addr, addr + size,
  171. next->bottom, next->top);
  172. vma = next;
  173. vma->top = (addr + size > vma->top) ? addr + size : vma->top;
  174. } else {
  175. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  176. next->bottom, next->top);
  177. vma->bottom = next->bottom;
  178. listp_del(next, &heap_vma_list, list);
  179. free(next);
  180. }
  181. next = next_next;
  182. }
  183. if (!vma) {
  184. vma = malloc(sizeof(struct heap_vma));
  185. if (!vma) {
  186. _DkInternalUnlock(&heap_vma_lock);
  187. return NULL;
  188. }
  189. vma->top = addr + size;
  190. vma->bottom = addr;
  191. INIT_LIST_HEAD(vma, list);
  192. listp_add_after(vma, prev, &heap_vma_list, list);
  193. }
  194. if (vma->bottom >= vma->top) {
  195. SGX_DBG(DBG_E, "*** Bad memory bookkeeping: %p - %p ***\n",
  196. vma->bottom, vma->top);
  197. #ifdef DEBUG
  198. if (pal_sec.in_gdb)
  199. asm volatile ("int $3" ::: "memory");
  200. #endif
  201. }
  202. assert_vma_list();
  203. _DkInternalUnlock(&heap_vma_lock);
  204. atomic_add(size / pgsz, &alloced_pages);
  205. return addr;
  206. }
  207. void free_pages(void * addr, uint64_t size)
  208. {
  209. void * addr_top = addr + size;
  210. SGX_DBG(DBG_M, "free_pages: trying to free %p %llu\n", addr, size);
  211. if (!addr || !size)
  212. return;
  213. if ((uintptr_t) addr_top & (pgsz - 1))
  214. addr = (void *) (((uintptr_t) addr_top + pgsz + 1) & ~(pgsz - 1));
  215. if ((uintptr_t) addr & (pgsz - 1))
  216. addr = (void *) ((uintptr_t) addr & ~(pgsz - 1));
  217. if (addr >= heap_base + heap_size)
  218. return;
  219. if (addr_top <= heap_base)
  220. return;
  221. if (addr_top > heap_base + heap_size)
  222. addr_top = heap_base + heap_size;
  223. if (addr < heap_base)
  224. addr = heap_base;
  225. SGX_DBG(DBG_M, "free %d bytes at %p\n", size, addr);
  226. _DkInternalLock(&heap_vma_lock);
  227. struct heap_vma * vma, * p;
  228. listp_for_each_entry_safe(vma, p, &heap_vma_list, list) {
  229. if (vma->bottom >= addr_top)
  230. continue;
  231. if (vma->top <= addr)
  232. break;
  233. if (vma->bottom < addr) {
  234. struct heap_vma * new = malloc(sizeof(struct heap_vma));
  235. new->top = addr;
  236. new->bottom = vma->bottom;
  237. INIT_LIST_HEAD(new, list);
  238. list_add(new, vma, list);
  239. }
  240. vma->bottom = addr_top;
  241. if (vma->top <= vma->bottom) {
  242. listp_del(vma, &heap_vma_list, list); free(vma);
  243. }
  244. }
  245. assert_vma_list();
  246. _DkInternalUnlock(&heap_vma_lock);
  247. unsigned int val = atomic_read(&alloced_pages);
  248. atomic_sub(size / pgsz, &alloced_pages);
  249. if (val > atomic_read(&max_alloced_pages))
  250. atomic_set(&max_alloced_pages, val);
  251. }
  252. void print_alloced_pages (void)
  253. {
  254. unsigned int val = atomic_read(&alloced_pages);
  255. unsigned int max = atomic_read(&max_alloced_pages);
  256. printf(" >>>>>>>> "
  257. "Enclave heap size = %10ld pages / %10ld pages\n",
  258. val > max ? val : max, heap_size / pgsz);
  259. }