enclave_pages.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. #include "api.h"
  2. #include "enclave_pages.h"
  3. #include "list.h"
  4. #include "pal_error.h"
  5. #include "pal_internal.h"
  6. #include "pal_linux.h"
  7. #include "pal_security.h"
  8. struct atomic_int g_alloced_pages;
  9. static size_t g_page_size = PRESET_PAGESIZE;
  10. static void* g_heap_bottom;
  11. static void* g_heap_top;
  12. /* list of VMAs of used memory areas kept in DESCENDING order */
  13. /* TODO: rewrite the logic so that this list keeps VMAs in ascending order */
  14. DEFINE_LIST(heap_vma);
  15. struct heap_vma {
  16. LIST_TYPE(heap_vma) list;
  17. void* bottom;
  18. void* top;
  19. };
  20. DEFINE_LISTP(heap_vma);
  21. static LISTP_TYPE(heap_vma) g_heap_vma_list = LISTP_INIT;
  22. static PAL_LOCK g_heap_vma_lock = LOCK_INIT;
  23. int init_enclave_pages(void) {
  24. g_heap_bottom = pal_sec.heap_min;
  25. g_heap_top = pal_sec.heap_max;
  26. size_t reserved_size = 0;
  27. struct heap_vma* exec_vma = NULL;
  28. if (pal_sec.exec_addr < g_heap_top && pal_sec.exec_addr + pal_sec.exec_size > g_heap_bottom) {
  29. /* there is an executable mapped inside the heap, carve a VMA for its area; this can happen
  30. * in case of non-PIE executables that start at a predefined address (typically 0x400000) */
  31. exec_vma = malloc(sizeof(*exec_vma));
  32. if (!exec_vma) {
  33. SGX_DBG(DBG_E, "*** Cannot initialize VMA for executable ***\n");
  34. return -PAL_ERROR_NOMEM;
  35. }
  36. exec_vma->bottom = SATURATED_P_SUB(pal_sec.exec_addr, MEMORY_GAP, g_heap_bottom);
  37. exec_vma->top = SATURATED_P_ADD(pal_sec.exec_addr + pal_sec.exec_size, MEMORY_GAP, g_heap_top);
  38. INIT_LIST_HEAD(exec_vma, list);
  39. LISTP_ADD(exec_vma, &g_heap_vma_list, list);
  40. reserved_size += exec_vma->top - exec_vma->bottom;
  41. }
  42. atomic_add(reserved_size / g_page_size, &g_alloced_pages);
  43. SGX_DBG(DBG_M, "Heap size: %luM\n", (g_heap_top - g_heap_bottom - reserved_size) / 1024 / 1024);
  44. return 0;
  45. }
  46. static void* __create_vma_and_merge(void* addr, size_t size, struct heap_vma* vma_above) {
  47. assert(_DkInternalIsLocked(&g_heap_vma_lock));
  48. assert(addr && size);
  49. if (addr < g_heap_bottom)
  50. return NULL;
  51. /* create VMA with [addr, addr+size); in case of existing overlapping VMAs, the created VMA is
  52. * merged with them and the old VMAs are discarded, similar to mmap(MAX_FIXED) */
  53. struct heap_vma* vma = malloc(sizeof(*vma));
  54. if (!vma)
  55. return NULL;
  56. vma->bottom = addr;
  57. vma->top = addr + size;
  58. /* find VMAs to merge:
  59. * (1) start from `vma_above` and iterate through VMAs with higher-addresses for merges
  60. * (2) start from `vma_below` and iterate through VMAs with lower-addresses for merges */
  61. struct heap_vma* vma_below;
  62. if (vma_above) {
  63. vma_below = LISTP_NEXT_ENTRY(vma_above, &g_heap_vma_list, list);
  64. } else {
  65. /* no VMA above `addr`; VMA right below `addr` must be the first (highest-address) in list */
  66. vma_below = LISTP_FIRST_ENTRY(&g_heap_vma_list, struct heap_vma, list);
  67. }
  68. while (vma_above && vma_above->bottom <= vma->top) {
  69. /* newly created VMA grows into above VMA; expand newly created VMA and free above-VMA */
  70. SGX_DBG(DBG_M, "Merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  71. vma_above->bottom, vma_above->top);
  72. struct heap_vma* vma_above_above = LISTP_PREV_ENTRY(vma_above, &g_heap_vma_list, list);
  73. vma->bottom = MIN(vma_above->bottom, vma->bottom);
  74. vma->top = MAX(vma_above->top, vma->top);
  75. LISTP_DEL(vma_above, &g_heap_vma_list, list);
  76. free(vma_above);
  77. vma_above = vma_above_above;
  78. }
  79. while (vma_below && vma_below->top >= vma->bottom) {
  80. /* newly created VMA grows into below VMA; expand newly create VMA and free below-VMA */
  81. SGX_DBG(DBG_M, "Merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  82. vma_below->bottom, vma_below->top);
  83. struct heap_vma* vma_below_below = LISTP_NEXT_ENTRY(vma_below, &g_heap_vma_list, list);
  84. vma->bottom = MIN(vma_below->bottom, vma->bottom);
  85. vma->top = MAX(vma_below->top, vma->top);
  86. LISTP_DEL(vma_below, &g_heap_vma_list, list);
  87. free(vma_below);
  88. vma_below = vma_below_below;
  89. }
  90. INIT_LIST_HEAD(vma, list);
  91. LISTP_ADD_AFTER(vma, vma_above, &g_heap_vma_list, list);
  92. SGX_DBG(DBG_M, "Created vma %p-%p\n", vma->bottom, vma->top);
  93. if (vma->bottom >= vma->top) {
  94. SGX_DBG(DBG_E, "*** Bad memory bookkeeping: %p - %p ***\n", vma->bottom, vma->top);
  95. ocall_exit(/*exitcode=*/1, /*is_exitgroup=*/true);
  96. }
  97. atomic_add(size / g_page_size, &g_alloced_pages);
  98. return addr;
  99. }
  100. void* get_enclave_pages(void* addr, size_t size) {
  101. void* ret = NULL;
  102. if (!size)
  103. return NULL;
  104. size = ALIGN_UP(size, g_page_size);
  105. addr = ALIGN_DOWN_PTR(addr, g_page_size);
  106. assert(access_ok(addr, size));
  107. SGX_DBG(DBG_M, "Allocating %ld bytes at %p\n", size, addr);
  108. struct heap_vma* vma_above = NULL;
  109. struct heap_vma* vma;
  110. _DkInternalLock(&g_heap_vma_lock);
  111. if (addr) {
  112. /* caller specified concrete address; find VMA right-above this address */
  113. if (addr < g_heap_bottom || addr + size > g_heap_top)
  114. goto out;
  115. LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
  116. if (vma->bottom < addr) {
  117. /* current VMA is not above `addr`, thus vma_above is VMA right-above `addr` */
  118. break;
  119. }
  120. vma_above = vma;
  121. }
  122. ret = __create_vma_and_merge(addr, size, vma_above);
  123. } else {
  124. /* caller did not specify address; find first (highest-address) empty slot that fits */
  125. void* vma_above_bottom = g_heap_top;
  126. LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
  127. if (vma->top < vma_above_bottom - size) {
  128. ret = __create_vma_and_merge(vma_above_bottom - size, size, vma_above);
  129. goto out;
  130. }
  131. vma_above = vma;
  132. vma_above_bottom = vma_above->bottom;
  133. }
  134. /* corner case: there may be enough space between heap bottom and the lowest-address VMA */
  135. if (g_heap_bottom < vma_above_bottom - size)
  136. ret = __create_vma_and_merge(vma_above_bottom - size, size, vma_above);
  137. }
  138. out:
  139. _DkInternalUnlock(&g_heap_vma_lock);
  140. if (!ret) {
  141. SGX_DBG(DBG_E, "*** Cannot allocate %lu bytes on the heap (at address %p) ***\n", size, addr);
  142. }
  143. return ret;
  144. }
  145. int free_enclave_pages(void* addr, size_t size) {
  146. int ret = 0;
  147. if (!size)
  148. return -PAL_ERROR_NOMEM;
  149. size = ALIGN_UP(size, g_page_size);
  150. if (!access_ok(addr, size) || !IS_ALIGNED_PTR(addr, g_page_size) ||
  151. addr < g_heap_bottom || addr + size > g_heap_top) {
  152. return -PAL_ERROR_INVAL;
  153. }
  154. SGX_DBG(DBG_M, "Freeing %ld bytes at %p\n", size, addr);
  155. _DkInternalLock(&g_heap_vma_lock);
  156. struct heap_vma* vma;
  157. struct heap_vma* p;
  158. LISTP_FOR_EACH_ENTRY_SAFE(vma, p, &g_heap_vma_list, list) {
  159. if (vma->bottom >= addr + size)
  160. continue;
  161. if (vma->top <= addr)
  162. break;
  163. /* found VMA overlapping with memory area to free */
  164. if (vma->bottom < addr) {
  165. /* create VMA [vma->bottom, addr); this may leave VMA [addr + size, vma->top), see below */
  166. struct heap_vma* new = malloc(sizeof(*new));
  167. if (!new) {
  168. SGX_DBG(DBG_E, "*** Cannot create split VMA during free of address %p ***\n", addr);
  169. ret = -PAL_ERROR_NOMEM;
  170. goto out;
  171. }
  172. new->top = addr;
  173. new->bottom = vma->bottom;
  174. INIT_LIST_HEAD(new, list);
  175. LIST_ADD(new, vma, list);
  176. }
  177. /* compress overlapping VMA to [addr + size, vma->top) */
  178. vma->bottom = addr + size;
  179. if (vma->top <= addr + size) {
  180. /* memory area to free completely covers/extends above the rest of the VMA */
  181. LISTP_DEL(vma, &g_heap_vma_list, list);
  182. free(vma);
  183. }
  184. }
  185. atomic_sub(size / g_page_size, &g_alloced_pages);
  186. out:
  187. _DkInternalUnlock(&g_heap_vma_lock);
  188. return ret;
  189. }