enclave_pages.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370
  1. #include "api.h"
  2. #include "enclave_pages.h"
  3. #include "list.h"
  4. #include "pal_error.h"
  5. #include "pal_internal.h"
  6. #include "pal_linux.h"
  7. #include "pal_security.h"
  8. struct atomic_int g_alloced_pages;
  9. static size_t g_page_size = PRESET_PAGESIZE;
  10. static void* g_heap_bottom;
  11. static void* g_heap_top;
  12. /* list of VMAs of used memory areas kept in DESCENDING order */
  13. /* TODO: rewrite the logic so that this list keeps VMAs in ascending order */
  14. DEFINE_LIST(heap_vma);
  15. struct heap_vma {
  16. LIST_TYPE(heap_vma) list;
  17. void* bottom;
  18. void* top;
  19. bool is_pal_internal;
  20. };
  21. DEFINE_LISTP(heap_vma);
  22. static LISTP_TYPE(heap_vma) g_heap_vma_list = LISTP_INIT;
  23. static PAL_LOCK g_heap_vma_lock = LOCK_INIT;
  24. /* heap_vma objects are taken from pre-allocated pool to avoid recursive mallocs */
  25. #define MAX_HEAP_VMAS 100000
  26. static struct heap_vma g_heap_vma_pool[MAX_HEAP_VMAS];
  27. static size_t g_heap_vma_num = 0;
  28. static struct heap_vma* g_free_vma = NULL;
  29. /* returns uninitialized heap_vma, the caller is responsible for setting at least bottom/top */
  30. static struct heap_vma* __alloc_vma(void) {
  31. assert(_DkInternalIsLocked(&g_heap_vma_lock));
  32. if (g_free_vma) {
  33. /* simple optimization: if there is a cached free vma object, use it */
  34. assert((uintptr_t)g_free_vma >= (uintptr_t)&g_heap_vma_pool[0]);
  35. assert((uintptr_t)g_free_vma <= (uintptr_t)&g_heap_vma_pool[MAX_HEAP_VMAS - 1]);
  36. struct heap_vma* ret = g_free_vma;
  37. g_free_vma = NULL;
  38. g_heap_vma_num++;
  39. return ret;
  40. }
  41. /* FIXME: this loop may become perf bottleneck on large number of vma objects; however,
  42. * experiments show that this number typically does not exceed 20 (thanks to VMA merging) */
  43. for (size_t i = 0; i < MAX_HEAP_VMAS; i++) {
  44. if (!g_heap_vma_pool[i].bottom && !g_heap_vma_pool[i].top) {
  45. /* found empty slot in the pool, use it */
  46. g_heap_vma_num++;
  47. return &g_heap_vma_pool[i];
  48. }
  49. }
  50. return NULL;
  51. }
  52. static void __free_vma(struct heap_vma* vma) {
  53. assert(_DkInternalIsLocked(&g_heap_vma_lock));
  54. assert((uintptr_t)vma >= (uintptr_t)&g_heap_vma_pool[0]);
  55. assert((uintptr_t)vma <= (uintptr_t)&g_heap_vma_pool[MAX_HEAP_VMAS - 1]);
  56. g_free_vma = vma;
  57. vma->top = 0;
  58. vma->bottom = 0;
  59. g_heap_vma_num--;
  60. }
  61. int init_enclave_pages(void) {
  62. int ret;
  63. g_heap_bottom = pal_sec.heap_min;
  64. g_heap_top = pal_sec.heap_max;
  65. size_t reserved_size = 0;
  66. struct heap_vma* exec_vma = NULL;
  67. _DkInternalLock(&g_heap_vma_lock);
  68. if (pal_sec.exec_addr < g_heap_top && pal_sec.exec_addr + pal_sec.exec_size > g_heap_bottom) {
  69. /* there is an executable mapped inside the heap, carve a VMA for its area; this can happen
  70. * in case of non-PIE executables that start at a predefined address (typically 0x400000) */
  71. exec_vma = __alloc_vma();
  72. if (!exec_vma) {
  73. SGX_DBG(DBG_E, "*** Cannot initialize VMA for executable ***\n");
  74. ret = -PAL_ERROR_NOMEM;
  75. goto out;
  76. }
  77. exec_vma->bottom = SATURATED_P_SUB(pal_sec.exec_addr, MEMORY_GAP, g_heap_bottom);
  78. exec_vma->top = SATURATED_P_ADD(pal_sec.exec_addr + pal_sec.exec_size, MEMORY_GAP, g_heap_top);
  79. exec_vma->is_pal_internal = false;
  80. INIT_LIST_HEAD(exec_vma, list);
  81. LISTP_ADD(exec_vma, &g_heap_vma_list, list);
  82. reserved_size += exec_vma->top - exec_vma->bottom;
  83. }
  84. atomic_add(reserved_size / g_page_size, &g_alloced_pages);
  85. SGX_DBG(DBG_M, "Heap size: %luM\n", (g_heap_top - g_heap_bottom - reserved_size) / 1024 / 1024);
  86. ret = 0;
  87. out:
  88. _DkInternalUnlock(&g_heap_vma_lock);
  89. return ret;
  90. }
  91. static void* __create_vma_and_merge(void* addr, size_t size, bool is_pal_internal,
  92. struct heap_vma* vma_above) {
  93. assert(_DkInternalIsLocked(&g_heap_vma_lock));
  94. assert(addr && size);
  95. if (addr < g_heap_bottom)
  96. return NULL;
  97. /* find enclosing VMAs and check that pal-internal VMAs do not overlap with normal VMAs */
  98. struct heap_vma* vma_below;
  99. if (vma_above) {
  100. vma_below = LISTP_NEXT_ENTRY(vma_above, &g_heap_vma_list, list);
  101. } else {
  102. /* no VMA above `addr`; VMA right below `addr` must be the first (highest-address) in list */
  103. vma_below = LISTP_FIRST_ENTRY(&g_heap_vma_list, struct heap_vma, list);
  104. }
  105. /* check whether [addr, addr + size) overlaps with above VMAs of different type */
  106. struct heap_vma* check_vma_above = vma_above;
  107. while (check_vma_above && addr + size > check_vma_above->bottom) {
  108. if (check_vma_above->is_pal_internal != is_pal_internal) {
  109. SGX_DBG(DBG_M, "VMA %p-%p (internal=%d) overlaps with %p-%p (internal=%d)\n",
  110. addr, addr + size, is_pal_internal, check_vma_above->bottom,
  111. check_vma_above->top, check_vma_above->is_pal_internal);
  112. return NULL;
  113. }
  114. check_vma_above = LISTP_PREV_ENTRY(check_vma_above, &g_heap_vma_list, list);
  115. }
  116. /* check whether [addr, addr + size) overlaps with below VMAs of different type */
  117. struct heap_vma* check_vma_below = vma_below;
  118. while (check_vma_below && addr < check_vma_below->top) {
  119. if (check_vma_below->is_pal_internal != is_pal_internal) {
  120. SGX_DBG(DBG_M, "VMA %p-%p (internal=%d) overlaps with %p-%p (internal=%d)\n",
  121. addr, addr + size, is_pal_internal, check_vma_below->bottom,
  122. check_vma_below->top, check_vma_below->is_pal_internal);
  123. return NULL;
  124. }
  125. check_vma_below = LISTP_NEXT_ENTRY(check_vma_below, &g_heap_vma_list, list);
  126. }
  127. /* create VMA with [addr, addr+size); in case of existing overlapping VMAs, the created VMA is
  128. * merged with them and the old VMAs are discarded, similar to mmap(MAX_FIXED) */
  129. struct heap_vma* vma = __alloc_vma();
  130. if (!vma)
  131. return NULL;
  132. vma->bottom = addr;
  133. vma->top = addr + size;
  134. vma->is_pal_internal = is_pal_internal;
  135. /* how much memory was freed because [addr, addr + size) overlapped with VMAs */
  136. size_t freed = 0;
  137. /* Try to merge VMAs as an optimization:
  138. * (1) start from `vma_above` and iterate through VMAs with higher-addresses for merges
  139. * (2) start from `vma_below` and iterate through VMAs with lower-addresses for merges.
  140. * Note that we never merge normal VMAs with pal-internal VMAs. */
  141. while (vma_above && vma_above->bottom <= vma->top &&
  142. vma_above->is_pal_internal == vma->is_pal_internal) {
  143. /* newly created VMA grows into above VMA; expand newly created VMA and free above-VMA */
  144. SGX_DBG(DBG_M, "Merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  145. vma_above->bottom, vma_above->top);
  146. freed += vma_above->top - vma_above->bottom;
  147. struct heap_vma* vma_above_above = LISTP_PREV_ENTRY(vma_above, &g_heap_vma_list, list);
  148. vma->bottom = MIN(vma_above->bottom, vma->bottom);
  149. vma->top = MAX(vma_above->top, vma->top);
  150. LISTP_DEL(vma_above, &g_heap_vma_list, list);
  151. __free_vma(vma_above);
  152. vma_above = vma_above_above;
  153. }
  154. while (vma_below && vma_below->top >= vma->bottom &&
  155. vma_below->is_pal_internal == vma->is_pal_internal) {
  156. /* newly created VMA grows into below VMA; expand newly create VMA and free below-VMA */
  157. SGX_DBG(DBG_M, "Merge %p-%p and %p-%p\n", vma->bottom, vma->top,
  158. vma_below->bottom, vma_below->top);
  159. freed += vma_below->top - vma_below->bottom;
  160. struct heap_vma* vma_below_below = LISTP_NEXT_ENTRY(vma_below, &g_heap_vma_list, list);
  161. vma->bottom = MIN(vma_below->bottom, vma->bottom);
  162. vma->top = MAX(vma_below->top, vma->top);
  163. LISTP_DEL(vma_below, &g_heap_vma_list, list);
  164. __free_vma(vma_below);
  165. vma_below = vma_below_below;
  166. }
  167. INIT_LIST_HEAD(vma, list);
  168. LISTP_ADD_AFTER(vma, vma_above, &g_heap_vma_list, list);
  169. SGX_DBG(DBG_M, "Created vma %p-%p\n", vma->bottom, vma->top);
  170. if (vma->bottom >= vma->top) {
  171. SGX_DBG(DBG_E, "*** Bad memory bookkeeping: %p - %p ***\n", vma->bottom, vma->top);
  172. ocall_exit(/*exitcode=*/1, /*is_exitgroup=*/true);
  173. }
  174. assert(vma->top - vma->bottom >= (ptrdiff_t)freed);
  175. size_t allocated = vma->top - vma->bottom - freed;
  176. atomic_add(allocated / g_page_size, &g_alloced_pages);
  177. return addr;
  178. }
  179. void* get_enclave_pages(void* addr, size_t size, bool is_pal_internal) {
  180. void* ret = NULL;
  181. if (!size)
  182. return NULL;
  183. size = ALIGN_UP(size, g_page_size);
  184. addr = ALIGN_DOWN_PTR(addr, g_page_size);
  185. assert(access_ok(addr, size));
  186. SGX_DBG(DBG_M, "Allocating %lu bytes in enclave memory at %p (%s)\n", size, addr,
  187. is_pal_internal ? "PAL internal" : "normal");
  188. struct heap_vma* vma_above = NULL;
  189. struct heap_vma* vma;
  190. _DkInternalLock(&g_heap_vma_lock);
  191. if (addr) {
  192. /* caller specified concrete address; find VMA right-above this address */
  193. if (addr < g_heap_bottom || addr + size > g_heap_top)
  194. goto out;
  195. LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
  196. if (vma->bottom < addr) {
  197. /* current VMA is not above `addr`, thus vma_above is VMA right-above `addr` */
  198. break;
  199. }
  200. vma_above = vma;
  201. }
  202. ret = __create_vma_and_merge(addr, size, is_pal_internal, vma_above);
  203. } else {
  204. /* caller did not specify address; find first (highest-address) empty slot that fits */
  205. void* vma_above_bottom = g_heap_top;
  206. LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
  207. if (vma->top < vma_above_bottom - size) {
  208. ret = __create_vma_and_merge(vma_above_bottom - size, size, is_pal_internal, vma_above);
  209. goto out;
  210. }
  211. vma_above = vma;
  212. vma_above_bottom = vma_above->bottom;
  213. }
  214. /* corner case: there may be enough space between heap bottom and the lowest-address VMA */
  215. if (g_heap_bottom < vma_above_bottom - size)
  216. ret = __create_vma_and_merge(vma_above_bottom - size, size, is_pal_internal, vma_above);
  217. }
  218. out:
  219. _DkInternalUnlock(&g_heap_vma_lock);
  220. return ret;
  221. }
  222. int free_enclave_pages(void* addr, size_t size) {
  223. int ret = 0;
  224. if (!size)
  225. return -PAL_ERROR_NOMEM;
  226. size = ALIGN_UP(size, g_page_size);
  227. if (!access_ok(addr, size) || !IS_ALIGNED_PTR(addr, g_page_size) ||
  228. addr < g_heap_bottom || addr + size > g_heap_top) {
  229. return -PAL_ERROR_INVAL;
  230. }
  231. SGX_DBG(DBG_M, "Freeing %lu bytes in enclave memory at %p\n", size, addr);
  232. _DkInternalLock(&g_heap_vma_lock);
  233. /* VMA list contains both normal and pal-internal VMAs; it is impossible to free an area
  234. * that overlaps with VMAs of two types at the same time, so we fail in such cases */
  235. bool is_pal_internal_set = false;
  236. bool is_pal_internal;
  237. /* how much memory was actually freed, since [addr, addr + size) can overlap with VMAs */
  238. size_t freed = 0;
  239. struct heap_vma* vma;
  240. struct heap_vma* p;
  241. LISTP_FOR_EACH_ENTRY_SAFE(vma, p, &g_heap_vma_list, list) {
  242. if (vma->bottom >= addr + size)
  243. continue;
  244. if (vma->top <= addr)
  245. break;
  246. /* found VMA overlapping with area to free; check it is either normal or pal-internal */
  247. if (!is_pal_internal_set) {
  248. is_pal_internal = vma->is_pal_internal;
  249. is_pal_internal_set = true;
  250. }
  251. if (is_pal_internal != vma->is_pal_internal) {
  252. SGX_DBG(DBG_E, "*** Area to free (address %p, size %lu) overlaps with both normal and "
  253. "pal-internal VMAs ***\n", addr, size);
  254. ret = -PAL_ERROR_INVAL;
  255. goto out;
  256. }
  257. freed += MIN(vma->top, addr + size) - MAX(vma->bottom, addr);
  258. if (vma->bottom < addr) {
  259. /* create VMA [vma->bottom, addr); this may leave VMA [addr + size, vma->top), see below */
  260. struct heap_vma* new = __alloc_vma();
  261. if (!new) {
  262. SGX_DBG(DBG_E, "*** Cannot create split VMA during free of address %p ***\n", addr);
  263. ret = -PAL_ERROR_NOMEM;
  264. goto out;
  265. }
  266. new->top = addr;
  267. new->bottom = vma->bottom;
  268. new->is_pal_internal = vma->is_pal_internal;
  269. INIT_LIST_HEAD(new, list);
  270. LIST_ADD(new, vma, list);
  271. }
  272. /* compress overlapping VMA to [addr + size, vma->top) */
  273. vma->bottom = addr + size;
  274. if (vma->top <= addr + size) {
  275. /* memory area to free completely covers/extends above the rest of the VMA */
  276. LISTP_DEL(vma, &g_heap_vma_list, list);
  277. __free_vma(vma);
  278. }
  279. }
  280. atomic_sub(freed / g_page_size, &g_alloced_pages);
  281. out:
  282. _DkInternalUnlock(&g_heap_vma_lock);
  283. return ret;
  284. }
  285. /* returns current highest available address on the enclave heap */
  286. void* get_enclave_heap_top(void) {
  287. _DkInternalLock(&g_heap_vma_lock);
  288. void* addr = g_heap_top;
  289. struct heap_vma* vma;
  290. LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
  291. if (vma->top < addr) {
  292. goto out;
  293. }
  294. addr = vma->bottom;
  295. }
  296. out:
  297. _DkInternalUnlock(&g_heap_vma_lock);
  298. return addr;
  299. }