Browse Source

[Pal/Linux-SGX] Refactor memory management of enclave pages

Previously, Linux-SGX logic of allocating/freeing enclave pages was
complicated and hard to read. This commit refactors this code for
readability, without changes in functionality.
Dmitrii Kuvaiskii 4 years ago
parent
commit
95bfd8aeb5

+ 0 - 15
LibOS/shim/include/shim_internal.h

@@ -823,21 +823,6 @@ static_always_inline void * current_stack(void)
     return _rsp;
 }
 
-static_always_inline bool __range_not_ok(unsigned long addr, unsigned long size) {
-    addr += size;
-    if (addr < size) {
-        /* pointer arithmetic overflow, this check is x86-64 specific */
-        return true;
-    }
-    return false;
-}
-
-/* Check if pointer to memory region is valid. Return true if the memory
- * region may be valid, false if it is definitely invalid. */
-static inline bool access_ok(const volatile void* addr, size_t size) {
-    return !__range_not_ok((unsigned long)addr, (unsigned long)size);
-}
-
 #else
 # error "Unsupported architecture"
 #endif /* __x86_64__ */

+ 20 - 0
Pal/include/lib/api.h

@@ -262,4 +262,24 @@ int set_config (struct config_store * cfg, const char * key, const char * val);
 
 #define URI_PREFIX_FILE_LEN     (static_strlen(URI_PREFIX_FILE))
 
+#ifdef __x86_64__
+static inline bool __range_not_ok(uintptr_t addr, size_t size) {
+    addr += size;
+    if (addr < size) {
+        /* pointer arithmetic overflow, this check is x86-64 specific */
+        return true;
+    }
+    return false;
+}
+
+/* Check if pointer to memory region is valid. Return true if the memory
+ * region may be valid, false if it is definitely invalid. */
+static inline bool access_ok(const volatile void* addr, size_t size) {
+    return !__range_not_ok((uintptr_t)addr, size);
+}
+
+#else
+# error "Unsupported architecture"
+#endif /* __x86_64__ */
+
 #endif /* API_H */

+ 1 - 1
Pal/src/host/Linux-SGX/db_files.c

@@ -199,7 +199,7 @@ static int file_map(PAL_HANDLE handle, void** addr, int prot, uint64_t offset, u
         return -PAL_ERROR_DENIED;
     }
 
-    mem = get_reserved_pages(mem, size);
+    mem = get_enclave_pages(mem, size);
     if (!mem)
         return -PAL_ERROR_NOMEM;
 

+ 2 - 2
Pal/src/host/Linux-SGX/db_main.c

@@ -62,7 +62,7 @@ void _DkGetAvailableUserAddressRange (PAL_PTR * start, PAL_PTR * end,
                                       PAL_PTR * hole_start, PAL_PTR * hole_end)
 {
     *start = (PAL_PTR) pal_sec.heap_min;
-    *end = (PAL_PTR) get_reserved_pages(NULL, g_page_size);
+    *end = (PAL_PTR) get_enclave_pages(NULL, g_page_size);
     *hole_start = SATURATED_P_SUB(pal_sec.exec_addr, MEMORY_GAP, *start);
     *hole_end = SATURATED_P_ADD(pal_sec.exec_addr + pal_sec.exec_size, MEMORY_GAP, *end);
 }
@@ -307,7 +307,7 @@ void pal_linux_main(char * uptr_args, uint64_t args_size,
     /* set up page allocator and slab manager */
     init_slab_mgr(g_page_size);
     init_untrusted_slab_mgr();
-    init_pages();
+    init_enclave_pages();
     init_enclave_key();
 
     init_cpuid();

+ 8 - 11
Pal/src/host/Linux-SGX/db_memory.c

@@ -77,15 +77,9 @@ int _DkVirtualMemoryAlloc (void ** paddr, uint64_t size, int alloc_type, int pro
     if (size == 0)
         __asm__ volatile ("int $3");
 
-    mem = get_reserved_pages(addr, size);
+    mem = get_enclave_pages(addr, size);
     if (!mem)
         return addr ? -PAL_ERROR_DENIED : -PAL_ERROR_NOMEM;
-    if (addr && mem != addr) {
-        // TODO: This case should be made impossible by fixing
-        // `get_reserved_pages` semantics.
-        free_pages(mem, size);
-        return -PAL_ERROR_INVAL; // `addr` was unaligned.
-    }
 
     if (alloc_type & PAL_ALLOC_INTERNAL) {
         spinlock_lock(&pal_vma_lock);
@@ -93,7 +87,7 @@ int _DkVirtualMemoryAlloc (void ** paddr, uint64_t size, int alloc_type, int pro
             spinlock_unlock(&pal_vma_lock);
             SGX_DBG(DBG_E, "Pal is out of VMAs (current limit on VMAs PAL_VMA_MAX = %d)!\n",
                     PAL_VMA_MAX);
-            free_pages(mem, size);
+            free_enclave_pages(mem, size);
             return -PAL_ERROR_NOMEM;
         }
 
@@ -114,7 +108,10 @@ int _DkVirtualMemoryAlloc (void ** paddr, uint64_t size, int alloc_type, int pro
 int _DkVirtualMemoryFree (void * addr, uint64_t size)
 {
     if (sgx_is_completely_within_enclave(addr, size)) {
-        free_pages(addr, size);
+        int ret = free_enclave_pages(addr, size);
+        if (ret < 0) {
+            return ret;
+        }
 
         /* check if it is internal PAL memory and remove this VMA from pal_vmas if yes */
         spinlock_lock(&pal_vma_lock);
@@ -157,11 +154,11 @@ unsigned long _DkMemoryQuota (void)
     return pal_sec.heap_max - pal_sec.heap_min;
 }
 
-extern struct atomic_int alloced_pages;
+extern struct atomic_int g_alloced_pages;
 extern unsigned int g_page_size;
 
 unsigned long _DkMemoryAvailableQuota (void)
 {
     return (pal_sec.heap_max - pal_sec.heap_min) -
-        atomic_read(&alloced_pages) * g_page_size;
+        atomic_read(&g_alloced_pages) * g_page_size;
 }

+ 1 - 3
Pal/src/host/Linux-SGX/db_streams.c

@@ -73,9 +73,7 @@ out:
 /* _DkStreamUnmap for internal use. Unmap stream at certain memory address.
    The memory is unmapped as a whole.*/
 int _DkStreamUnmap(void* addr, uint64_t size) {
-    /* Just let the kernel tell us if the mapping isn't good. */
-    free_pages(addr, size);
-    return 0;
+    return free_enclave_pages(addr, size);
 }
 
 static size_t addr_size(const struct sockaddr* addr) {

+ 159 - 243
Pal/src/host/Linux-SGX/enclave_pages.c

@@ -1,323 +1,239 @@
-#include <pal_linux.h>
-#include <pal_internal.h>
-#include <pal_security.h>
-#include <api.h>
+#include "api.h"
 #include "enclave_pages.h"
+#include "list.h"
+#include "pal_error.h"
+#include "pal_internal.h"
+#include "pal_linux.h"
+#include "pal_security.h"
 
-#include <list.h>
-
-#include <stdint.h>
+struct atomic_int g_alloced_pages;
 
 static size_t g_page_size = PRESET_PAGESIZE;
-void * heap_base;
-static uint64_t heap_size;
+static void* g_heap_bottom;
+static void* g_heap_top;
 
-/* This list keeps heap_vma structures of used/reserved regions organized in DESCENDING order.*/
+/* list of VMAs of used memory areas kept in DESCENDING order */
+/* TODO: rewrite the logic so that this list keeps VMAs in ascending order */
 DEFINE_LIST(heap_vma);
 struct heap_vma {
     LIST_TYPE(heap_vma) list;
-    void * top;
-    void * bottom;
+    void* bottom;
+    void* top;
 };
-
 DEFINE_LISTP(heap_vma);
-static LISTP_TYPE(heap_vma) heap_vma_list = LISTP_INIT;
-PAL_LOCK heap_vma_lock = LOCK_INIT;
-
-struct atomic_int alloced_pages, max_alloced_pages;
 
-void init_pages (void)
-{
-    uint64_t reserved_for_exec = 0;
+static LISTP_TYPE(heap_vma) g_heap_vma_list = LISTP_INIT;
+static PAL_LOCK g_heap_vma_lock = LOCK_INIT;
 
-    heap_base = pal_sec.heap_min;
-    heap_size = pal_sec.heap_max - pal_sec.heap_min;
+int init_enclave_pages(void) {
+    g_heap_bottom = pal_sec.heap_min;
+    g_heap_top    = pal_sec.heap_max;
 
-    if (pal_sec.exec_size) {
-        struct heap_vma * vma = malloc(sizeof(struct heap_vma));
-        vma->bottom = SATURATED_P_SUB(pal_sec.exec_addr, MEMORY_GAP, pal_sec.heap_min);
-        vma->top = SATURATED_P_ADD(pal_sec.exec_addr + pal_sec.exec_size, MEMORY_GAP, pal_sec.heap_max);
-        reserved_for_exec = vma->top - vma->bottom;
-        INIT_LIST_HEAD(vma, list);
-        LISTP_ADD(vma, &heap_vma_list, list);
-    }
-
-    SGX_DBG(DBG_M, "available heap size: %lu M\n",
-           (heap_size - reserved_for_exec) / 1024 / 1024);
-}
+    size_t reserved_size = 0;
+    struct heap_vma* exec_vma = NULL;
 
-#define ASSERT_VMA          0
-
-static void assert_vma_list (void)
-{
-#if ASSERT_VMA == 1
-    void * last_addr = heap_base + heap_size;
-    struct heap_vma * vma;
-
-    LISTP_FOR_EACH_ENTRY(vma, &heap_vma_list, list) {
-        SGX_DBG(DBG_M, "[%d] %p - %p\n", pal_sec.pid, vma->bottom, vma->top);
-        if (last_addr < vma->top || vma->top <= vma->bottom) {
-            SGX_DBG(DBG_E, "*** [%d] corrupted heap vma: %p - %p (last = %p) ***\n", pal_sec.pid, vma->bottom, vma->top, last_addr);
-#ifdef DEBUG
-            if (pal_sec.in_gdb)
-                __asm__ volatile ("int $3" ::: "memory");
-#endif
-            ocall_exit(1, /*is_exitgroup=*/true);
+    if (pal_sec.exec_addr < g_heap_top && pal_sec.exec_addr + pal_sec.exec_size > g_heap_bottom) {
+        /* there is an executable mapped inside the heap, carve a VMA for its area; this can happen
+         * in case of non-PIE executables that start at a predefined address (typically 0x400000) */
+        exec_vma = malloc(sizeof(*exec_vma));
+        if (!exec_vma) {
+            SGX_DBG(DBG_E, "*** Cannot initialize VMA for executable ***\n");
+            return -PAL_ERROR_NOMEM;
         }
-        last_addr = vma->bottom;
+        exec_vma->bottom = SATURATED_P_SUB(pal_sec.exec_addr, MEMORY_GAP, g_heap_bottom);
+        exec_vma->top = SATURATED_P_ADD(pal_sec.exec_addr + pal_sec.exec_size, MEMORY_GAP, g_heap_top);
+        INIT_LIST_HEAD(exec_vma, list);
+        LISTP_ADD(exec_vma, &g_heap_vma_list, list);
+
+        reserved_size += exec_vma->top - exec_vma->bottom;
     }
-#endif
-}
 
-static void * reserve_area(void * addr, size_t size, struct heap_vma * prev)
-{
-    struct heap_vma * next;
+    atomic_add(reserved_size / g_page_size, &g_alloced_pages);
 
-    if (prev) {
-        // If this is the last entry, don't wrap around
-        if (prev->list.next == LISTP_FIRST_ENTRY(&heap_vma_list, struct heap_vma, list))
-            next = NULL;
-        else
-            next = prev->list.next;
-    } else {
-        /* In this case, the list is empty, or
-         * first vma starts at or below the allocation site.
-         *
-         * The next field will be used to merge vmas with the allocation, if
-         * they overlap, until the vmas drop below the requested addr
-         * (traversing in decreasing virtual address order)
-         */
-        next = LISTP_EMPTY(&heap_vma_list) ? NULL :
-            LISTP_FIRST_ENTRY(&heap_vma_list, struct heap_vma, list);
-    }
+    SGX_DBG(DBG_M, "Heap size: %luM\n", (g_heap_top - g_heap_bottom - reserved_size) / 1024 / 1024);
+    return 0;
+}
 
-    if (prev && next)
-        SGX_DBG(DBG_M, "insert vma between %p-%p and %p-%p\n",
-                next->bottom, next->top, prev->bottom, prev->top);
-    else if (prev)
-        SGX_DBG(DBG_M, "insert vma below %p-%p\n", prev->bottom, prev->top);
-    else if (next)
-        SGX_DBG(DBG_M, "insert vma above %p-%p\n", next->bottom, next->top);
+static void* __create_vma_and_merge(void* addr, size_t size, struct heap_vma* vma_above) {
+    assert(_DkInternalIsLocked(&g_heap_vma_lock));
+    assert(addr && size);
 
-    struct heap_vma * vma = NULL;
-    while (prev) {
-        struct heap_vma * prev_prev = NULL;
+    if (addr < g_heap_bottom)
+        return NULL;
 
-        if (prev->bottom > addr + size)
-            break;
+    /* create VMA with [addr, addr+size); in case of existing overlapping VMAs, the created VMA is
+     * merged with them and the old VMAs are discarded, similar to mmap(MAX_FIXED) */
+    struct heap_vma* vma = malloc(sizeof(*vma));
+    if (!vma)
+        return NULL;
 
-        /* This appears to be doing a reverse search; we should stop before we
-         * wrap back to the last entry */
-        if (prev->list.prev != LISTP_LAST_ENTRY(&heap_vma_list, struct heap_vma, list))
-            prev_prev = LIST_ENTRY(prev->list.prev, struct heap_vma, list);
-
-        if (!vma) {
-            SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", addr, addr + size,
-                    prev->bottom, prev->top);
-
-            vma = prev;
-            vma->top = (addr + size > vma->top) ? addr + size : vma->top;
-            vma->bottom = addr;
-        } else {
-            SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", vma->bottom, vma->top,
-                    prev->bottom, prev->top);
-
-            vma->top = (prev->top > vma->top) ? prev->top : vma->top;
-            LISTP_DEL(prev, &heap_vma_list,list);
-            free(prev);
-        }
+    vma->bottom = addr;
+    vma->top    = addr + size;
 
-        prev = prev_prev;
+    /* find VMAs to merge:
+     *   (1) start from `vma_above` and iterate through VMAs with higher-addresses for merges
+     *   (2) start from `vma_below` and iterate through VMAs with lower-addresses for merges */
+    struct heap_vma* vma_below;
+    if (vma_above) {
+        vma_below = LISTP_NEXT_ENTRY(vma_above, &g_heap_vma_list, list);
+    } else {
+        /* no VMA above `addr`; VMA right below `addr` must be the first (highest-address) in list */
+        vma_below = LISTP_FIRST_ENTRY(&g_heap_vma_list, struct heap_vma, list);
     }
 
-    while (next) {
-        struct heap_vma * next_next = NULL;
+    while (vma_above && vma_above->bottom <= vma->top) {
+        /* newly created VMA grows into above VMA; expand newly created VMA and free above-VMA */
+        SGX_DBG(DBG_M, "Merge %p-%p and %p-%p\n", vma->bottom, vma->top,
+                vma_above->bottom, vma_above->top);
 
-        if (next->top < addr)
-            break;
+        struct heap_vma* vma_above_above = LISTP_PREV_ENTRY(vma_above, &g_heap_vma_list, list);
+
+        vma->bottom = MIN(vma_above->bottom, vma->bottom);
+        vma->top    = MAX(vma_above->top, vma->top);
+        LISTP_DEL(vma_above, &g_heap_vma_list, list);
 
-        if (next->list.next != LISTP_FIRST_ENTRY(&heap_vma_list, struct heap_vma, list))
-            next_next = LIST_ENTRY(next->list.next, struct heap_vma, list);
+        free(vma_above);
+        vma_above = vma_above_above;
+    }
 
-        if (!vma) {
-            SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", addr, addr + size,
-                    next->bottom, next->top);
+    while (vma_below && vma_below->top >= vma->bottom) {
+        /* newly created VMA grows into below VMA; expand newly create VMA and free below-VMA */
+        SGX_DBG(DBG_M, "Merge %p-%p and %p-%p\n", vma->bottom, vma->top,
+                vma_below->bottom, vma_below->top);
 
-            vma = next;
-            vma->top = (addr + size > vma->top) ? addr + size : vma->top;
-        } else {
-            SGX_DBG(DBG_M, "merge %p-%p and %p-%p\n", vma->bottom, vma->top,
-                    next->bottom, next->top);
+        struct heap_vma* vma_below_below = LISTP_NEXT_ENTRY(vma_below, &g_heap_vma_list, list);
 
-            vma->bottom = next->bottom;
-            LISTP_DEL(next, &heap_vma_list, list);
-            free(next);
-        }
+        vma->bottom = MIN(vma_below->bottom, vma->bottom);
+        vma->top    = MAX(vma_below->top, vma->top);
+        LISTP_DEL(vma_below, &g_heap_vma_list, list);
 
-        next = next_next;
+        free(vma_below);
+        vma_below = vma_below_below;
     }
 
-    if (!vma) {
-        vma = malloc(sizeof(struct heap_vma));
-        if (!vma) {
-            return NULL;
-        }
-        vma->top = addr + size;
-        vma->bottom = addr;
-        INIT_LIST_HEAD(vma, list);
-        LISTP_ADD_AFTER(vma, prev, &heap_vma_list, list);
-    }
+    INIT_LIST_HEAD(vma, list);
+    LISTP_ADD_AFTER(vma, vma_above, &g_heap_vma_list, list);
+    SGX_DBG(DBG_M, "Created vma %p-%p\n", vma->bottom, vma->top);
 
     if (vma->bottom >= vma->top) {
-        SGX_DBG(DBG_E, "*** Bad memory bookkeeping: %p - %p ***\n",
-                vma->bottom, vma->top);
-#ifdef DEBUG
-        if (pal_sec.in_gdb)
-            __asm__ volatile ("int $3" ::: "memory");
-#endif
+        SGX_DBG(DBG_E, "*** Bad memory bookkeeping: %p - %p ***\n", vma->bottom, vma->top);
+        ocall_exit(/*exitcode=*/1, /*is_exitgroup=*/true);
     }
-    assert_vma_list();
 
-    atomic_add(size / g_page_size, &alloced_pages);
+    atomic_add(size / g_page_size, &g_alloced_pages);
     return addr;
 }
 
+void* get_enclave_pages(void* addr, size_t size) {
+    void* ret = NULL;
 
-// TODO: This function should be fixed to always either return exactly `addr` or
-// fail.
-void * get_reserved_pages(void * addr, size_t size)
-{
     if (!size)
         return NULL;
 
-    SGX_DBG(DBG_M, "*** get_reserved_pages: heap_base %p, heap_size %lu, limit %p ***\n", heap_base, heap_size, heap_base + heap_size);
-    if (addr >= heap_base + heap_size) {
-        SGX_DBG(DBG_E, "*** allocating out of heap: %p ***\n", addr);
-        return NULL;
-    }
-
     size = ALIGN_UP(size, g_page_size);
     addr = ALIGN_DOWN_PTR(addr, g_page_size);
 
-    SGX_DBG(DBG_M, "allocate %ld bytes at %p\n", size, addr);
+    assert(access_ok(addr, size));
 
-    _DkInternalLock(&heap_vma_lock);
+    SGX_DBG(DBG_M, "Allocating %ld bytes at %p\n", size, addr);
 
-    struct heap_vma * prev = NULL;
-    struct heap_vma * vma;
+    struct heap_vma* vma_above = NULL;
+    struct heap_vma* vma;
 
-    /* Allocating in the heap region.  This loop searches the vma list to
-     * find the first vma with a starting address lower than the requested
-     * address.  Recall that vmas are in descending order.
-     *
-     * If the very first vma matches, prev will be null.
-     */
-    if (addr && addr >= heap_base &&
-        addr + size <= heap_base + heap_size) {
-        LISTP_FOR_EACH_ENTRY(vma, &heap_vma_list, list) {
-            if (vma->bottom < addr)
-                break;
-            prev = vma;
-        }
-        void * ret = reserve_area(addr, size, prev);
-        _DkInternalUnlock(&heap_vma_lock);
-        return ret;
-    }
+    _DkInternalLock(&g_heap_vma_lock);
 
     if (addr) {
-        _DkInternalUnlock(&heap_vma_lock);
-        return NULL;
-    }
+        /* caller specified concrete address; find VMA right-above this address */
+        if (addr < g_heap_bottom || addr + size > g_heap_top)
+            goto out;
 
-    void * avail_top = heap_base + heap_size;
-
-    LISTP_FOR_EACH_ENTRY(vma, &heap_vma_list, list) {
-        if ((size_t)(avail_top - vma->top) > size) {
-            addr = avail_top - size;
-            void * ret = reserve_area(addr, size, prev);
-            _DkInternalUnlock(&heap_vma_lock);
-            return ret;
+        LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
+            if (vma->bottom < addr) {
+                /* current VMA is not above `addr`, thus vma_above is VMA right-above `addr` */
+                break;
+            }
+            vma_above = vma;
+        }
+        ret = __create_vma_and_merge(addr, size, vma_above);
+    } else {
+        /* caller did not specify address; find first (highest-address) empty slot that fits */
+        void* vma_above_bottom = g_heap_top;
+
+        LISTP_FOR_EACH_ENTRY(vma, &g_heap_vma_list, list) {
+            if (vma->top < vma_above_bottom - size) {
+                ret = __create_vma_and_merge(vma_above_bottom - size, size, vma_above);
+                goto out;
+            }
+            vma_above = vma;
+            vma_above_bottom = vma_above->bottom;
         }
-        prev = vma;
-        avail_top = prev->bottom;
-    }
 
-    if (avail_top >= heap_base + size) {
-        addr = avail_top - size;
-        void * ret = reserve_area(addr, size, prev);
-        _DkInternalUnlock(&heap_vma_lock);
-        return ret;
+        /* corner case: there may be enough space between heap bottom and the lowest-address VMA */
+        if (g_heap_bottom < vma_above_bottom - size)
+            ret = __create_vma_and_merge(vma_above_bottom - size, size, vma_above);
     }
 
-    _DkInternalUnlock(&heap_vma_lock);
+out:
+    _DkInternalUnlock(&g_heap_vma_lock);
 
-    SGX_DBG(DBG_E, "*** Not enough space on the heap (requested = %lu) ***\n", size);
-    __asm__ volatile("int $3");
-    return NULL;
+    if (!ret) {
+        SGX_DBG(DBG_E, "*** Cannot allocate %lu bytes on the heap (at address %p) ***\n", size, addr);
+    }
+    return ret;
 }
 
-void free_pages(void * addr, size_t size)
-{
-    void * addr_top = addr + size;
+int free_enclave_pages(void* addr, size_t size) {
+    int ret = 0;
 
-    SGX_DBG(DBG_M, "free_pages: trying to free %p %lu\n", addr, size);
-
-    if (!addr || !size)
-        return;
-
-    addr = ALIGN_DOWN_PTR(addr, g_page_size);
-    addr_top = ALIGN_UP_PTR(addr_top, g_page_size);
+    if (!size)
+        return -PAL_ERROR_NOMEM;
 
-    if (addr >= heap_base + heap_size)
-        return;
-    if (addr_top <= heap_base)
-        return;
-    if (addr_top > heap_base + heap_size)
-        addr_top = heap_base + heap_size;
-    if (addr < heap_base)
-        addr = heap_base;
+    size = ALIGN_UP(size, g_page_size);
 
-    SGX_DBG(DBG_M, "free %ld bytes at %p\n", size, addr);
+    if (!access_ok(addr, size) || !IS_ALIGNED_PTR(addr, g_page_size) ||
+        addr < g_heap_bottom || addr + size > g_heap_top) {
+        return -PAL_ERROR_INVAL;
+    }
 
-    _DkInternalLock(&heap_vma_lock);
+    SGX_DBG(DBG_M, "Freeing %ld bytes at %p\n", size, addr);
 
-    struct heap_vma * vma, * p;
+    _DkInternalLock(&g_heap_vma_lock);
 
-    LISTP_FOR_EACH_ENTRY_SAFE(vma, p, &heap_vma_list, list) {
-        if (vma->bottom >= addr_top)
+    struct heap_vma* vma;
+    struct heap_vma* p;
+    LISTP_FOR_EACH_ENTRY_SAFE(vma, p, &g_heap_vma_list, list) {
+        if (vma->bottom >= addr + size)
             continue;
         if (vma->top <= addr)
             break;
+
+        /* found VMA overlapping with memory area to free */
         if (vma->bottom < addr) {
-            struct heap_vma * new = malloc(sizeof(struct heap_vma));
-            new->top = addr;
+            /* create VMA [vma->bottom, addr); this may leave VMA [addr + size, vma->top), see below */
+            struct heap_vma* new = malloc(sizeof(*new));
+            if (!new) {
+                SGX_DBG(DBG_E, "*** Cannot create split VMA during free of address %p ***\n", addr);
+                ret = -PAL_ERROR_NOMEM;
+                goto out;
+            }
+            new->top    = addr;
             new->bottom = vma->bottom;
             INIT_LIST_HEAD(new, list);
             LIST_ADD(new, vma, list);
         }
 
-        vma->bottom = addr_top;
-        if (vma->top <= vma->bottom) {
-            LISTP_DEL(vma, &heap_vma_list, list); free(vma);
+        /* compress overlapping VMA to [addr + size, vma->top) */
+        vma->bottom = addr + size;
+        if (vma->top <= addr + size) {
+            /* memory area to free completely covers/extends above the rest of the VMA */
+            LISTP_DEL(vma, &g_heap_vma_list, list);
+            free(vma);
         }
     }
 
-    assert_vma_list();
-
-    _DkInternalUnlock(&heap_vma_lock);
-
-    unsigned int val = atomic_read(&alloced_pages);
-    atomic_sub(size / g_page_size, &alloced_pages);
-    if (val > atomic_read(&max_alloced_pages))
-        atomic_set(&max_alloced_pages, val);
-}
-
-void print_alloced_pages (void)
-{
-    unsigned int val = atomic_read(&alloced_pages);
-    unsigned int max = atomic_read(&max_alloced_pages);
+    atomic_sub(size / g_page_size, &g_alloced_pages);
 
-    printf("                >>>>>>>> "
-           "Enclave heap size =         %10d pages / %10ld pages\n",
-           val > max ? val : max, heap_size / g_page_size);
+out:
+    _DkInternalUnlock(&g_heap_vma_lock);
+    return ret;
 }

+ 5 - 4
Pal/src/host/Linux-SGX/enclave_pages.h

@@ -1,4 +1,5 @@
-extern void* heap_base;
-void init_pages(void);
-void* get_reserved_pages(void* addr, size_t size);
-void free_pages(void* addr, size_t size);
+#include <stddef.h>
+
+int init_enclave_pages(void);
+void* get_enclave_pages(void* addr, size_t size);
+int free_enclave_pages(void* addr, size_t size);