enclave_pages.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. #include "api.h"
  2. #include "enclave_pages.h"
  3. #include "list.h"
  4. #include "pal_error.h"
  5. #include "pal_internal.h"
  6. #include "pal_linux.h"
  7. #include "pal_security.h"
  8. struct atomic_int g_alloced_pages;
  9. static size_t g_page_size = PRESET_PAGESIZE;
  10. static void* g_heap_bottom;
  11. static void* g_heap_top;
  12. /* list of VMAs of used memory areas kept in DESCENDING order */
  13. /* TODO: rewrite the logic so that this list keeps VMAs in ascending order */
  14. DEFINE_LIST(heap_vma);
  15. struct heap_vma {
  16. LIST_TYPE(heap_vma) list;
  17. void* bottom;
  18. void* top;
  19. bool is_pal_internal;
  20. };
  21. DEFINE_LISTP(heap_vma);
  22. static LISTP_TYPE(heap_vma) g_heap_vma_list = LISTP_INIT;
  23. static PAL_LOCK g_heap_vma_lock = LOCK_INIT;
  24. int init_enclave_pages(void) {
  25. g_heap_bottom = pal_sec.heap_min;
  26. g_heap_top = pal_sec.heap_max;
  27. size_t reserved_size = 0;
  28. struct heap_vma* exec_vma = NULL;
  29. if (pal_sec.exec_addr < g_heap_top && pal_sec.exec_addr + pal_sec.exec_size > g_heap_bottom) {
  30. /* there is an executable mapped inside the heap, carve a VMA for its area; this can happen
  31. * in case of non-PIE executables that start at a predefined address (typically 0x400000) */
  32. exec_vma = malloc(sizeof(*exec_vma));
  33. if (!exec_vma) {
  34. SGX_DBG(DBG_E, "*** Cannot initialize VMA for executable ***\n");
  35. return -PAL_ERROR_NOMEM;
  36. }
  37. exec_vma->bottom = SATURATED_P_SUB(pal_sec.exec_addr, MEMORY_GAP, g_heap_bottom);
  38. exec_vma->top = SATURATED_P_ADD(pal_sec.exec_addr + pal_sec.exec_size, MEMORY_GAP, g_heap_top);
  39. exec_vma->is_pal_internal = false;
  40. INIT_LIST_HEAD(exec_vma, list);
  41. LISTP_ADD(exec_vma, &g_heap_vma_list, list);
  42. reserved_size += exec_vma->top - exec_vma->bottom;
  43. }
  44. atomic_add(reserved_size / g_page_size, &g_alloced_pages);
  45. SGX_DBG(DBG_M, "Heap size: %luM\n", (g_heap_top - g_heap_bottom - reserved_size) / 1024 / 1024);
  46. return 0;
  47. }
  48. static void* __create_vma_and_merge(void* addr, size_t size, bool is_pal_internal,
  49. struct heap_vma* vma_above) {
  50. assert(_DkInternalIsLocked(&g_heap_vma_lock));
  51. assert(addr && size);
  52. if (addr < g_heap_bottom)
  53. return NULL;
  54. /* find enclosing VMAs and check that pal-internal VMAs do not overlap with normal VMAs */
  55. struct heap_vma* vma_below;
  56. if (vma_above) {
  57. vma_below = LISTP_NEXT_ENTRY(vma_above, &g_heap_vma_list, list);
  58. } else {
  59. /* no VMA above `addr`; VMA right below `addr` must be the first (highest-address) in list */
  60. vma_below = LISTP_FIRST_ENTRY(&g_heap_vma_list, struct heap_vma, list);
  61. }
  62. /* check whether [addr, addr + size) overlaps with above VMAs of different type */
  63. struct heap_vma* check_vma_above = vma_above;
  64. while (check_vma_above && addr + size > check_vma_above->bottom) {
  65. if (check_vma_above->is_pal_internal != is_pal_internal) {
  66. SGX_DBG(DBG_M, "VMA %p-%p (internal=%d) overlaps with %p-%p (internal=%d)\n",
  67. addr, addr + size, is_pal_internal, check_vma_above->bottom,
  68. check_vma_above->top, check_vma_above->is_pal_internal);
  69. return NULL;
  70. }
  71. check_vma_above = LISTP_PREV_ENTRY(check_vma_above, &g_heap_vma_list, list);
  72. }
  73. /* check whether [addr, addr + size) overlaps with below VMAs of different type */
  74. struct heap_vma* check_vma_below = vma_below;
  75. while (check_vma_below && addr < check_vma_below->top) {
  76. if (check_vma_below->is_pal_internal != is_pal_internal) {
  77. SGX_DBG(DBG_M, "VMA %p-%p (internal=%d) overlaps with %p-%p (internal=%d)\n",
  78. addr, addr + size, is_pal_internal, check_vma_below->bottom,
  79. check_vma_below->top, check_vma_below->is_pal_internal);
  80. return NULL;
  81. }
  82. check_vma_below = LISTP_NEXT_ENTRY(check_vma_below, &g_heap_vma_list, list);
  83. }
  84. /* create VMA with [addr, addr+size); in case of existing overlapping VMAs, the created VMA is
  85. * merged with them and the old VMAs are discarded, similar to mmap(MAX_FIXED) */
  86. struct heap_vma* vma = malloc(sizeof(*vma));
  87. if (!vma)
  88. return NULL;
  89. vma->bottom = addr;
  90. vma->top = addr + size;
  91. vma->is_pal_internal = is_pal_internal;
  92. /* how much memory was freed because [addr, addr + size) overlapped with VMAs */
  93. size_t freed = 0;
  94. /* Try to merge VMAs as an optimization:
  95. * (1) start from `vma_above` and iterate through VMAs with higher-addresses for merges
  96. * (2) start from `vma_below` and iterate through VMAs with lower-addresses for merges.
  97. * Note that we never merge normal VMAs with pal-internal VMAs. */
  98. while (vma_above && vma_above->bottom <= vma->top &&
  99. vma_above->is_pal_internal == vma->is_pal_internal) {
  100. /* newly created VMA grows into above VMA; expand newly created VMA and free above-VMA */
  101. SGX_DBG(DBG_M, "Merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  102. vma_above->bottom, vma_above->top);
  103. freed += vma_above->top - vma_above->bottom;
  104. struct heap_vma* vma_above_above = LISTP_PREV_ENTRY(vma_above, &g_heap_vma_list, list);
  105. vma->bottom = MIN(vma_above->bottom, vma->bottom);
  106. vma->top = MAX(vma_above->top, vma->top);
  107. LISTP_DEL(vma_above, &g_heap_vma_list, list);
  108. free(vma_above);
  109. vma_above = vma_above_above;
  110. }
  111. while (vma_below && vma_below->top >= vma->bottom &&
  112. vma_below->is_pal_internal == vma->is_pal_internal) {
  113. /* newly created VMA grows into below VMA; expand newly create VMA and free below-VMA */
  114. SGX_DBG(DBG_M, "Merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  115. vma_below->bottom, vma_below->top);
  116. freed += vma_below->top - vma_below->bottom;
  117. struct heap_vma* vma_below_below = LISTP_NEXT_ENTRY(vma_below, &g_heap_vma_list, list);
  118. vma->bottom = MIN(vma_below->bottom, vma->bottom);
  119. vma->top = MAX(vma_below->top, vma->top);
  120. LISTP_DEL(vma_below, &g_heap_vma_list, list);
  121. free(vma_below);
  122. vma_below = vma_below_below;
  123. }
  124. INIT_LIST_HEAD(vma, list);
  125. LISTP_ADD_AFTER(vma, vma_above, &g_heap_vma_list, list);
  126. SGX_DBG(DBG_M, "Created vma %p-%p\n", vma->bottom, vma->top);
  127. if (vma->bottom >= vma->top) {
  128. SGX_DBG(DBG_E, "*** Bad memory bookkeeping: %p - %p ***\n", vma->bottom, vma->top);
  129. ocall_exit(/*exitcode=*/1, /*is_exitgroup=*/true);
  130. }
  131. assert(vma->top - vma->bottom >= (ptrdiff_t)freed);
  132. size_t allocated = vma->top - vma->bottom - freed;
  133. atomic_add(allocated / g_page_size, &g_alloced_pages);
  134. return addr;
  135. }
  136. void* get_enclave_pages(void* addr, size_t size, bool is_pal_internal) {
  137. void* ret = NULL;
  138. if (!size)
  139. return NULL;
  140. size = ALIGN_UP(size, g_page_size);
  141. addr = ALIGN_DOWN_PTR(addr, g_page_size);
  142. assert(access_ok(addr, size));
  143. SGX_DBG(DBG_M, "Allocating %lu bytes in enclave memory at %p (%s)\n", size, addr,
  144. is_pal_internal ? "PAL internal" : "normal");
  145. struct heap_vma* vma_above = NULL;
  146. struct heap_vma* vma;
  147. _DkInternalLock(&g_heap_vma_lock);
  148. if (addr) {
  149. /* caller specified concrete address; find VMA right-above this address */
  150. if (addr < g_heap_bottom || addr + size > g_heap_top)
  151. goto out;
  152. LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
  153. if (vma->bottom < addr) {
  154. /* current VMA is not above `addr`, thus vma_above is VMA right-above `addr` */
  155. break;
  156. }
  157. vma_above = vma;
  158. }
  159. ret = __create_vma_and_merge(addr, size, is_pal_internal, vma_above);
  160. } else {
  161. /* caller did not specify address; find first (highest-address) empty slot that fits */
  162. void* vma_above_bottom = g_heap_top;
  163. LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
  164. if (vma->top < vma_above_bottom - size) {
  165. ret = __create_vma_and_merge(vma_above_bottom - size, size, is_pal_internal, vma_above);
  166. goto out;
  167. }
  168. vma_above = vma;
  169. vma_above_bottom = vma_above->bottom;
  170. }
  171. /* corner case: there may be enough space between heap bottom and the lowest-address VMA */
  172. if (g_heap_bottom < vma_above_bottom - size)
  173. ret = __create_vma_and_merge(vma_above_bottom - size, size, is_pal_internal, vma_above);
  174. }
  175. out:
  176. _DkInternalUnlock(&g_heap_vma_lock);
  177. return ret;
  178. }
  179. int free_enclave_pages(void* addr, size_t size) {
  180. int ret = 0;
  181. if (!size)
  182. return -PAL_ERROR_NOMEM;
  183. size = ALIGN_UP(size, g_page_size);
  184. if (!access_ok(addr, size) || !IS_ALIGNED_PTR(addr, g_page_size) ||
  185. addr < g_heap_bottom || addr + size > g_heap_top) {
  186. return -PAL_ERROR_INVAL;
  187. }
  188. SGX_DBG(DBG_M, "Freeing %lu bytes in enclave memory at %p\n", size, addr);
  189. _DkInternalLock(&g_heap_vma_lock);
  190. /* VMA list contains both normal and pal-internal VMAs; it is impossible to free an area
  191. * that overlaps with VMAs of two types at the same time, so we fail in such cases */
  192. bool is_pal_internal_set = false;
  193. bool is_pal_internal;
  194. /* how much memory was actually freed, since [addr, addr + size) can overlap with VMAs */
  195. size_t freed = 0;
  196. struct heap_vma* vma;
  197. struct heap_vma* p;
  198. LISTP_FOR_EACH_ENTRY_SAFE(vma, p, &g_heap_vma_list, list) {
  199. if (vma->bottom >= addr + size)
  200. continue;
  201. if (vma->top <= addr)
  202. break;
  203. /* found VMA overlapping with area to free; check it is either normal or pal-internal */
  204. if (!is_pal_internal_set) {
  205. is_pal_internal = vma->is_pal_internal;
  206. is_pal_internal_set = true;
  207. }
  208. if (is_pal_internal != vma->is_pal_internal) {
  209. SGX_DBG(DBG_E, "*** Area to free (address %p, size %lu) overlaps with both normal and "
  210. "pal-internal VMAs ***\n", addr, size);
  211. ret = -PAL_ERROR_INVAL;
  212. goto out;
  213. }
  214. freed += MIN(vma->top, addr + size) - MAX(vma->bottom, addr);
  215. if (vma->bottom < addr) {
  216. /* create VMA [vma->bottom, addr); this may leave VMA [addr + size, vma->top), see below */
  217. struct heap_vma* new = malloc(sizeof(*new));
  218. if (!new) {
  219. SGX_DBG(DBG_E, "*** Cannot create split VMA during free of address %p ***\n", addr);
  220. ret = -PAL_ERROR_NOMEM;
  221. goto out;
  222. }
  223. new->top = addr;
  224. new->bottom = vma->bottom;
  225. new->is_pal_internal = vma->is_pal_internal;
  226. INIT_LIST_HEAD(new, list);
  227. LIST_ADD(new, vma, list);
  228. }
  229. /* compress overlapping VMA to [addr + size, vma->top) */
  230. vma->bottom = addr + size;
  231. if (vma->top <= addr + size) {
  232. /* memory area to free completely covers/extends above the rest of the VMA */
  233. LISTP_DEL(vma, &g_heap_vma_list, list);
  234. free(vma);
  235. }
  236. }
  237. atomic_sub(freed / g_page_size, &g_alloced_pages);
  238. out:
  239. _DkInternalUnlock(&g_heap_vma_lock);
  240. return ret;
  241. }
  242. /* returns current highest available address on the enclave heap */
  243. void* get_enclave_heap_top(void) {
  244. _DkInternalLock(&g_heap_vma_lock);
  245. void* addr = g_heap_top;
  246. struct heap_vma* vma;
  247. LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
  248. if (vma->top < addr) {
  249. goto out;
  250. }
  251. addr = vma->bottom;
  252. }
  253. out:
  254. _DkInternalUnlock(&g_heap_vma_lock);
  255. return addr;
  256. }