enclave_pages.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. #include <pal_linux.h>
  4. #include <pal_internal.h>
  5. #include <pal_security.h>
  6. #include <api.h>
  7. #include "enclave_pages.h"
  8. #include <linux_list.h>
  9. static unsigned long pgsz = PRESET_PAGESIZE;
  10. void * heap_base;
  11. static uint64_t heap_size;
  12. struct heap_vma {
  13. struct list_head list;
  14. void * top;
  15. void * bottom;
  16. };
  17. static LIST_HEAD(heap_vma_list);
  18. PAL_LOCK heap_vma_lock = LOCK_INIT;
  19. struct atomic_int alloced_pages, max_alloced_pages;
  20. void init_pages (void)
  21. {
  22. heap_base = pal_sec.heap_min;
  23. heap_size = pal_sec.heap_max - pal_sec.heap_min;
  24. SGX_DBG(DBG_M, "available heap size: %llu M\n",
  25. (heap_size - pal_sec.exec_size) / 1024 / 1024);
  26. if (pal_sec.exec_size) {
  27. struct heap_vma * vma = malloc(sizeof(struct heap_vma));
  28. vma->top = pal_sec.exec_addr + pal_sec.exec_size;
  29. vma->bottom = pal_sec.exec_addr;
  30. INIT_LIST_HEAD(&vma->list);
  31. list_add(&vma->list, &heap_vma_list);
  32. }
  33. }
  34. #define ASSERT_VMA 0
  35. static void assert_vma_list (void)
  36. {
  37. #if ASSERT_VMA == 1
  38. void * last_addr = heap_base + heap_size;
  39. struct heap_vma * vma;
  40. list_for_each_entry(vma, &heap_vma_list, list) {
  41. SGX_DBG(DBG_M, "[%d] %p - %p\n", pal_sec.pid, vma->bottom, vma->top);
  42. if (last_addr < vma->top || vma->top <= vma->bottom) {
  43. SGX_DBG(DBG_E, "*** [%d] corrupted heap vma: %p - %p (last = %p) ***\n", pal_sec.pid, vma->bottom, vma->top, last_addr);
  44. #ifdef DEBUG
  45. if (pal_sec.in_gdb)
  46. asm volatile ("int $3" ::: "memory");
  47. #endif
  48. ocall_exit();
  49. }
  50. last_addr = vma->bottom;
  51. }
  52. #endif
  53. }
  54. void * get_reserved_pages(void * addr, uint64_t size)
  55. {
  56. if (!size)
  57. return NULL;
  58. if (addr >= heap_base + heap_size) {
  59. SGX_DBG(DBG_M, "*** allocating out of heap: %p ***\n", addr);
  60. return NULL;
  61. }
  62. if (size & (pgsz - 1))
  63. size = ((size + pgsz - 1) & ~(pgsz - 1));
  64. if ((unsigned long) addr & (pgsz - 1))
  65. addr = (void *) ((unsigned long) addr & ~(pgsz - 1));
  66. SGX_DBG(DBG_M, "allocate %d bytes at %p\n", size, addr);
  67. _DkInternalLock(&heap_vma_lock);
  68. struct heap_vma * prev = NULL, * next;
  69. struct heap_vma * vma;
  70. if (addr && addr >= heap_base &&
  71. addr + size <= heap_base + heap_size) {
  72. list_for_each_entry(vma, &heap_vma_list, list) {
  73. if (vma->bottom < addr)
  74. break;
  75. prev = vma;
  76. }
  77. goto allocated;
  78. }
  79. if (addr) {
  80. _DkInternalUnlock(&heap_vma_lock);
  81. return NULL;
  82. }
  83. void * avail_top = heap_base + heap_size;
  84. list_for_each_entry(vma, &heap_vma_list, list) {
  85. if (vma->top < heap_base)
  86. break;
  87. if (avail_top >= vma->top + size) {
  88. addr = avail_top - size;
  89. goto allocated;
  90. }
  91. prev = vma;
  92. avail_top = prev->bottom;
  93. }
  94. if (avail_top >= heap_base + size) {
  95. addr = avail_top - size;
  96. goto allocated;
  97. }
  98. _DkInternalUnlock(&heap_vma_lock);
  99. SGX_DBG(DBG_E, "*** Not enough space on the heap (requested = %llu) ***\n", size);
  100. asm volatile("int $3");
  101. return NULL;
  102. allocated:
  103. if (prev) {
  104. next = (prev->list.next == &heap_vma_list) ? NULL :
  105. list_entry(prev->list.next, struct heap_vma, list);
  106. } else {
  107. next = list_empty(&heap_vma_list) ? NULL :
  108. list_first_entry(&heap_vma_list, struct heap_vma, list);
  109. }
  110. if (prev && next)
  111. SGX_DBG(DBG_M, "insert vma between %p-%p and %p-%p\n",
  112. next->bottom, next->top, prev->bottom, prev->top);
  113. else if (prev)
  114. SGX_DBG(DBG_M, "insert vma below %p-%p\n", prev->bottom, prev->top);
  115. else if (next)
  116. SGX_DBG(DBG_M, "insert vma above %p-%p\n", next->bottom, next->top);
  117. vma = NULL;
  118. while (prev) {
  119. struct heap_vma * prev_prev = NULL;
  120. if (prev->bottom > addr + size)
  121. break;
  122. if (prev->list.prev != &heap_vma_list)
  123. prev_prev = list_entry(prev->list.prev, struct heap_vma, list);
  124. if (!vma) {
  125. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", addr, addr + size,
  126. prev->bottom, prev->top);
  127. vma = prev;
  128. vma->top = (addr + size > vma->top) ? addr + size : vma->top;
  129. vma->bottom = addr;
  130. } else {
  131. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  132. prev->bottom, prev->top);
  133. vma->top = (prev->top > vma->top) ? prev->top : vma->top;
  134. list_del(&prev->list);
  135. free(prev);
  136. }
  137. prev = prev_prev;
  138. }
  139. while (next) {
  140. struct heap_vma * next_next = NULL;
  141. if (next->top < addr)
  142. break;
  143. if (next->list.next != &heap_vma_list)
  144. next_next = list_entry(next->list.next, struct heap_vma, list);
  145. if (!vma) {
  146. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", addr, addr + size,
  147. next->bottom, next->top);
  148. vma = next;
  149. vma->top = (addr + size > vma->top) ? addr + size : vma->top;
  150. } else {
  151. SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  152. next->bottom, next->top);
  153. vma->bottom = next->bottom;
  154. list_del(&next->list);
  155. free(next);
  156. }
  157. next = next_next;
  158. }
  159. if (!vma) {
  160. vma = malloc(sizeof(struct heap_vma));
  161. vma->top = addr + size;
  162. vma->bottom = addr;
  163. INIT_LIST_HEAD(&vma->list);
  164. list_add(&vma->list, prev ? &prev->list : &heap_vma_list);
  165. }
  166. if (vma->bottom >= vma->top) {
  167. SGX_DBG(DBG_E, "*** Bad memory bookkeeping: %p - %p ***\n",
  168. vma->bottom, vma->top);
  169. #ifdef DEBUG
  170. if (pal_sec.in_gdb)
  171. asm volatile ("int $3" ::: "memory");
  172. #endif
  173. }
  174. assert_vma_list();
  175. _DkInternalUnlock(&heap_vma_lock);
  176. atomic_add(size / pgsz, &alloced_pages);
  177. return addr;
  178. }
  179. void free_pages(void * addr, uint64_t size)
  180. {
  181. void * addr_top = addr + size;
  182. if (!addr || !size)
  183. return;
  184. if ((unsigned long) addr_top & (pgsz - 1))
  185. addr = (void *) (((unsigned long) addr_top + pgsz + 1) & ~(pgsz - 1));
  186. if ((unsigned long) addr & (pgsz - 1))
  187. addr = (void *) ((unsigned long) addr & ~(pgsz - 1));
  188. if (addr >= heap_base + heap_size)
  189. return;
  190. if (addr_top <= heap_base)
  191. return;
  192. if (addr_top > heap_base + heap_size)
  193. addr_top = heap_base + heap_size;
  194. if (addr < heap_base)
  195. addr = heap_base;
  196. SGX_DBG(DBG_M, "free %d bytes at %p\n", size, addr);
  197. _DkInternalLock(&heap_vma_lock);
  198. struct heap_vma * vma, * p;
  199. list_for_each_entry_safe(vma, p, &heap_vma_list, list) {
  200. if (vma->bottom >= addr_top)
  201. continue;
  202. if (vma->top <= addr)
  203. break;
  204. if (vma->bottom < addr) {
  205. struct heap_vma * new = malloc(sizeof(struct heap_vma));
  206. new->top = addr;
  207. new->bottom = vma->bottom;
  208. INIT_LIST_HEAD(&new->list);
  209. list_add(&new->list, &vma->list);
  210. }
  211. vma->bottom = addr_top;
  212. if (vma->top <= vma->bottom) {
  213. list_del(&vma->list); free(vma);
  214. }
  215. }
  216. assert_vma_list();
  217. _DkInternalUnlock(&heap_vma_lock);
  218. unsigned int val = atomic_read(&alloced_pages);
  219. atomic_sub(size / pgsz, &alloced_pages);
  220. if (val > atomic_read(&max_alloced_pages))
  221. atomic_set(&max_alloced_pages, val);
  222. }
  223. void print_alloced_pages (void)
  224. {
  225. unsigned int val = atomic_read(&alloced_pages);
  226. unsigned int max = atomic_read(&max_alloced_pages);
  227. printf(" >>>>>>>> "
  228. "Enclave heap size = %10ld pages / %10ld pages\n",
  229. val > max ? val : max, heap_size / pgsz);
  230. }