enclave_pages.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. #include <pal_linux.h>
  2. #include <pal_internal.h>
  3. #include <pal_security.h>
  4. #include <api.h>
  5. #include "enclave_pages.h"
  6. #include <list.h>
  7. #include <stdint.h>
  8. static size_t g_page_size = PRESET_PAGESIZE;
  9. void * heap_base;
  10. static uint64_t heap_size;
  11. /* This list keeps heap_vma structures of used/reserved regions organized in DESCENDING order.*/
  12. DEFINE_LIST(heap_vma);
  13. struct heap_vma {
  14. LIST_TYPE(heap_vma) list;
  15. void * top;
  16. void * bottom;
  17. };
  18. DEFINE_LISTP(heap_vma);
  19. static LISTP_TYPE(heap_vma) heap_vma_list = LISTP_INIT;
  20. PAL_LOCK heap_vma_lock = LOCK_INIT;
  21. struct atomic_int alloced_pages, max_alloced_pages;
  22. void init_pages (void)
  23. {
  24. uint64_t reserved_for_exec = 0;
  25. heap_base = pal_sec.heap_min;
  26. heap_size = pal_sec.heap_max - pal_sec.heap_min;
  27. if (pal_sec.exec_size) {
  28. struct heap_vma * vma = malloc(sizeof(struct heap_vma));
  29. vma->bottom = SATURATED_P_SUB(pal_sec.exec_addr, MEMORY_GAP, pal_sec.heap_min);
  30. vma->top = SATURATED_P_ADD(pal_sec.exec_addr + pal_sec.exec_size, MEMORY_GAP, pal_sec.heap_max);
  31. reserved_for_exec = vma->top - vma->bottom;
  32. INIT_LIST_HEAD(vma, list);
  33. LISTP_ADD(vma, &heap_vma_list, list);
  34. }
  35. SGX_DBG(DBG_M, "available heap size: %lu M\n",
  36. (heap_size - reserved_for_exec) / 1024 / 1024);
  37. }
  38. #define ASSERT_VMA 0
  39. static void assert_vma_list (void)
  40. {
  41. #if ASSERT_VMA == 1
  42. void * last_addr = heap_base + heap_size;
  43. struct heap_vma * vma;
  44. LISTP_FOR_EACH_ENTRY(vma, &heap_vma_list, list) {
  45. SGX_DBG(DBG_M, "[%d] %p - %p\n", pal_sec.pid, vma->bottom, vma->top);
  46. if (last_addr < vma->top || vma->top <= vma->bottom) {
  47. SGX_DBG(DBG_E, "*** [%d] corrupted heap vma: %p - %p (last = %p) ***\n", pal_sec.pid, vma->bottom, vma->top, last_addr);
  48. #ifdef DEBUG
  49. if (pal_sec.in_gdb)
  50. __asm__ volatile ("int $3" ::: "memory");
  51. #endif
  52. ocall_exit(1, /*is_exitgroup=*/true);
  53. }
  54. last_addr = vma->bottom;
  55. }
  56. #endif
  57. }
  58. static void * reserve_area(void * addr, size_t size, struct heap_vma * prev)
  59. {
  60. struct heap_vma * next;
  61. if (prev) {
  62. // If this is the last entry, don't wrap around
  63. if (prev->list.next == LISTP_FIRST_ENTRY(&heap_vma_list, struct heap_vma, list))
  64. next = NULL;
  65. else
  66. next = prev->list.next;
  67. } else {
  68. /* In this case, the list is empty, or
  69. * first vma starts at or below the allocation site.
  70. *
  71. * The next field will be used to merge vmas with the allocation, if
  72. * they overlap, until the vmas drop below the requested addr
  73. * (traversing in decreasing virtual address order)
  74. */
  75. next = LISTP_EMPTY(&heap_vma_list) ? NULL :
  76. LISTP_FIRST_ENTRY(&heap_vma_list, struct heap_vma, list);
  77. }
  78. if (prev && next)
  79. SGX_DBG(DBG_M, "insert vma between %p-%p and %p-%p\n",
  80. next->bottom, next->top, prev->bottom, prev->top);
  81. else if (prev)
  82. SGX_DBG(DBG_M, "insert vma below %p-%p\n", prev->bottom, prev->top);
  83. else if (next)
  84. SGX_DBG(DBG_M, "insert vma above %p-%p\n", next->bottom, next->top);
  85. struct heap_vma * vma = NULL;
  86. while (prev) {
  87. struct heap_vma * prev_prev = NULL;
  88. if (prev->bottom > addr + size)
  89. break;
  90. /* This appears to be doing a reverse search; we should stop before we
  91. * wrap back to the last entry */
  92. if (prev->list.prev != LISTP_LAST_ENTRY(&heap_vma_list, struct heap_vma, list))
  93. prev_prev = LIST_ENTRY(prev->list.prev, struct heap_vma, list);
  94. if (!vma) {
  95. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", addr, addr + size,
  96. prev->bottom, prev->top);
  97. vma = prev;
  98. vma->top = (addr + size > vma->top) ? addr + size : vma->top;
  99. vma->bottom = addr;
  100. } else {
  101. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  102. prev->bottom, prev->top);
  103. vma->top = (prev->top > vma->top) ? prev->top : vma->top;
  104. LISTP_DEL(prev, &heap_vma_list,list);
  105. free(prev);
  106. }
  107. prev = prev_prev;
  108. }
  109. while (next) {
  110. struct heap_vma * next_next = NULL;
  111. if (next->top < addr)
  112. break;
  113. if (next->list.next != LISTP_FIRST_ENTRY(&heap_vma_list, struct heap_vma, list))
  114. next_next = LIST_ENTRY(next->list.next, struct heap_vma, list);
  115. if (!vma) {
  116. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", addr, addr + size,
  117. next->bottom, next->top);
  118. vma = next;
  119. vma->top = (addr + size > vma->top) ? addr + size : vma->top;
  120. } else {
  121. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  122. next->bottom, next->top);
  123. vma->bottom = next->bottom;
  124. LISTP_DEL(next, &heap_vma_list, list);
  125. free(next);
  126. }
  127. next = next_next;
  128. }
  129. if (!vma) {
  130. vma = malloc(sizeof(struct heap_vma));
  131. if (!vma) {
  132. return NULL;
  133. }
  134. vma->top = addr + size;
  135. vma->bottom = addr;
  136. INIT_LIST_HEAD(vma, list);
  137. LISTP_ADD_AFTER(vma, prev, &heap_vma_list, list);
  138. }
  139. if (vma->bottom >= vma->top) {
  140. SGX_DBG(DBG_E, "*** Bad memory bookkeeping: %p - %p ***\n",
  141. vma->bottom, vma->top);
  142. #ifdef DEBUG
  143. if (pal_sec.in_gdb)
  144. __asm__ volatile ("int $3" ::: "memory");
  145. #endif
  146. }
  147. assert_vma_list();
  148. atomic_add(size / g_page_size, &alloced_pages);
  149. return addr;
  150. }
  151. // TODO: This function should be fixed to always either return exactly `addr` or
  152. // fail.
  153. void * get_reserved_pages(void * addr, size_t size)
  154. {
  155. if (!size)
  156. return NULL;
  157. SGX_DBG(DBG_M, "*** get_reserved_pages: heap_base %p, heap_size %lu, limit %p ***\n", heap_base, heap_size, heap_base + heap_size);
  158. if (addr >= heap_base + heap_size) {
  159. SGX_DBG(DBG_E, "*** allocating out of heap: %p ***\n", addr);
  160. return NULL;
  161. }
  162. size = ALIGN_UP(size, g_page_size);
  163. addr = ALIGN_DOWN_PTR(addr, g_page_size);
  164. SGX_DBG(DBG_M, "allocate %ld bytes at %p\n", size, addr);
  165. _DkInternalLock(&heap_vma_lock);
  166. struct heap_vma * prev = NULL;
  167. struct heap_vma * vma;
  168. /* Allocating in the heap region. This loop searches the vma list to
  169. * find the first vma with a starting address lower than the requested
  170. * address. Recall that vmas are in descending order.
  171. *
  172. * If the very first vma matches, prev will be null.
  173. */
  174. if (addr && addr >= heap_base &&
  175. addr + size <= heap_base + heap_size) {
  176. LISTP_FOR_EACH_ENTRY(vma, &heap_vma_list, list) {
  177. if (vma->bottom < addr)
  178. break;
  179. prev = vma;
  180. }
  181. void * ret = reserve_area(addr, size, prev);
  182. _DkInternalUnlock(&heap_vma_lock);
  183. return ret;
  184. }
  185. if (addr) {
  186. _DkInternalUnlock(&heap_vma_lock);
  187. return NULL;
  188. }
  189. void * avail_top = heap_base + heap_size;
  190. LISTP_FOR_EACH_ENTRY(vma, &heap_vma_list, list) {
  191. if ((size_t)(avail_top - vma->top) > size) {
  192. addr = avail_top - size;
  193. void * ret = reserve_area(addr, size, prev);
  194. _DkInternalUnlock(&heap_vma_lock);
  195. return ret;
  196. }
  197. prev = vma;
  198. avail_top = prev->bottom;
  199. }
  200. if (avail_top >= heap_base + size) {
  201. addr = avail_top - size;
  202. void * ret = reserve_area(addr, size, prev);
  203. _DkInternalUnlock(&heap_vma_lock);
  204. return ret;
  205. }
  206. _DkInternalUnlock(&heap_vma_lock);
  207. SGX_DBG(DBG_E, "*** Not enough space on the heap (requested = %lu) ***\n", size);
  208. __asm__ volatile("int $3");
  209. return NULL;
  210. }
  211. void free_pages(void * addr, size_t size)
  212. {
  213. void * addr_top = addr + size;
  214. SGX_DBG(DBG_M, "free_pages: trying to free %p %lu\n", addr, size);
  215. if (!addr || !size)
  216. return;
  217. addr = ALIGN_DOWN_PTR(addr, g_page_size);
  218. addr_top = ALIGN_UP_PTR(addr_top, g_page_size);
  219. if (addr >= heap_base + heap_size)
  220. return;
  221. if (addr_top <= heap_base)
  222. return;
  223. if (addr_top > heap_base + heap_size)
  224. addr_top = heap_base + heap_size;
  225. if (addr < heap_base)
  226. addr = heap_base;
  227. SGX_DBG(DBG_M, "free %ld bytes at %p\n", size, addr);
  228. _DkInternalLock(&heap_vma_lock);
  229. struct heap_vma * vma, * p;
  230. LISTP_FOR_EACH_ENTRY_SAFE(vma, p, &heap_vma_list, list) {
  231. if (vma->bottom >= addr_top)
  232. continue;
  233. if (vma->top <= addr)
  234. break;
  235. if (vma->bottom < addr) {
  236. struct heap_vma * new = malloc(sizeof(struct heap_vma));
  237. new->top = addr;
  238. new->bottom = vma->bottom;
  239. INIT_LIST_HEAD(new, list);
  240. LIST_ADD(new, vma, list);
  241. }
  242. vma->bottom = addr_top;
  243. if (vma->top <= vma->bottom) {
  244. LISTP_DEL(vma, &heap_vma_list, list); free(vma);
  245. }
  246. }
  247. assert_vma_list();
  248. _DkInternalUnlock(&heap_vma_lock);
  249. unsigned int val = atomic_read(&alloced_pages);
  250. atomic_sub(size / g_page_size, &alloced_pages);
  251. if (val > atomic_read(&max_alloced_pages))
  252. atomic_set(&max_alloced_pages, val);
  253. }
  254. void print_alloced_pages (void)
  255. {
  256. unsigned int val = atomic_read(&alloced_pages);
  257. unsigned int max = atomic_read(&max_alloced_pages);
  258. printf(" >>>>>>>> "
  259. "Enclave heap size = %10d pages / %10ld pages\n",
  260. val > max ? val : max, heap_size / g_page_size);
  261. }