Browse Source

Fixes for the allocator/VMA code (#180)

* Simplify code in slab.c

* Remove unused (and broken?) function

* Don't use alloca for large buffers

* Various allocator code simplifications and fixes

* Fixup for daf845972f3ece9783edda9adbd2f6670f035e30

(should be squashed with it before merge)

* Add documentation to get_reserved_pages and cp_alloc

* Fixup for lookup_overlap_vma broken by one of previous commits

* Mark LTP test futex_wait03 as FLAKY
Michał Kowalczyk 6 years ago
parent
commit
d83536ff0e

+ 2 - 2
LibOS/shim/include/shim_checkpoint.h

@@ -50,7 +50,7 @@ extern char __migratable_end;
 /* TSAI 7/11/2012:
    The checkpoint scheme we are expecting is to support an easy syntax to
    implement migration procedure. A migration procedure can be written
-   in teh following syntax:
+   in the following syntax:
 
    BEGIN_CP_DEFINITION(exec)
    {
@@ -85,7 +85,7 @@ struct shim_cp_entry
     union
     {
         ptr_t cp_val;   /* interger value */
-        /* orignally there is a pointer, now we don't need them */
+        /* originally there is a pointer, now we don't need them */
     } cp_un;
 };
 

+ 6 - 4
LibOS/shim/include/shim_internal.h

@@ -657,9 +657,11 @@ static inline int __ref_dec (REFTYPE * ref)
     register int _c;
     do {
         _c = atomic_read(ref);
-        assert(_c > 0);
-        if (!_c)
+        if (!_c) {
+            debug("Fail: Trying to drop reference count below 0\n");
+            bug();
             return 0;
+        }
     } while (atomic_cmpxchg(ref, _c, _c - 1) != _c);
     return _c - 1;
 }
@@ -708,8 +710,8 @@ extern void * migrated_memory_start;
 extern void * migrated_memory_end;
 
 #define MEMORY_MIGRATED(mem)                                    \
-        ((void *) mem >= migrated_memory_start &&               \
-         (void *) mem < migrated_memory_end)
+        ((void *) (mem) >= migrated_memory_start &&             \
+         (void *) (mem) < migrated_memory_end)
 
 extern void * __load_address, * __load_address_end;
 extern void * __code_address, * __code_address_end;

+ 5 - 5
LibOS/shim/include/shim_vma.h

@@ -94,15 +94,18 @@ int bkeep_mmap (void * addr, uint64_t length, int prot, int flags,
                 struct shim_handle * file, uint64_t offset, const char * comment);
 
 /* Bookkeeping munmap() system call */
-int bkeep_munmap (void * addr, uint64_t length, const int * flags);
+int bkeep_munmap (void * addr, uint64_t length, int flags);
 
 /* Bookkeeping mprotect() system call */
-int bkeep_mprotect (void * addr, uint64_t length, int prot, const int * flags);
+int bkeep_mprotect (void * addr, uint64_t length, int prot, int flags);
 
 /* Get vma bookkeeping handle */
 void get_vma (struct shim_vma * vma);
 void put_vma (struct shim_vma * vma);
 
+/* Returns 0 on success, -E* on failure.
+   Calls `get_vma` on the result before returning it.
+*/
 int lookup_supervma (const void * addr, uint64_t len, struct shim_vma ** vma);
 int lookup_overlap_vma (const void * addr, uint64_t len, struct shim_vma ** vma);
 
@@ -118,9 +121,6 @@ void unmap_all_vmas (void);
 /* Debugging */
 void debug_print_vma_list (void);
 
-void print_vma_hash (struct shim_vma * vma, void * addr, uint64_t len,
-                     bool force_protect);
-
 /* Constants */
 extern unsigned long mem_max_npages;
 extern unsigned long brk_max_size;

+ 39 - 71
LibOS/shim/src/bookkeep/shim_vma.c

@@ -35,6 +35,7 @@
 
 #include <asm/mman.h>
 #include <errno.h>
+#include <stdbool.h>
 
 unsigned long mem_max_npages __attribute_migratable = DEFAULT_MEM_MAX_NPAGES;
 
@@ -190,7 +191,7 @@ void put_vma (struct shim_vma * vma)
             put_handle(vma->file);
 
         if (MEMORY_MIGRATED(vma))
-            memset(vma, 0, sizeof(struct shim_vma));
+            memset(vma, 0, sizeof(*vma));
         else
             free_mem_obj_to_mgr(vma_mgr, vma);
     }
@@ -209,7 +210,7 @@ static int __bkeep_mmap (void * addr, uint64_t length, int prot, int flags,
                          const char * comment);
 
 static int __bkeep_mprotect (void * addr, uint64_t length, int prot,
-                             const int * flags);
+                             int flags);
 
 static void __check_delayed_bkeep (void)
 {
@@ -244,16 +245,13 @@ static struct shim_vma * get_new_vma (void)
     return tmp;
 }
 
-static bool check_vma_flags (const struct shim_vma * vma, const int * flags)
+static bool check_vma_flags (const struct shim_vma * vma, int flags)
 {
-    if (!flags)
-        return true;
-
     if (vma->flags & VMA_UNMAPPED)
         return true;
 
-    if ((vma->flags & VMA_INTERNAL) != ((*flags) & VMA_INTERNAL)) {
-        debug("Check vma flag failure: vma flags %x, checked flags %x\n", vma->flags, *flags);
+    if ((vma->flags & VMA_INTERNAL) != (flags & VMA_INTERNAL)) {
+        debug("Check vma flag failure: vma flags %x, checked flags %x\n", vma->flags, flags);
         bug();
         return false;
     }
@@ -294,13 +292,13 @@ static int __bkeep_mmap (void * addr, uint64_t length,
     if (tmp) { /* the range is included in a vma */
         if (tmp->addr != addr || tmp->length != length) {
             /* we are inside some unmapped area, do a split case */
-            ret = __bkeep_mprotect(addr, length, prot, &flags);
+            ret = __bkeep_mprotect(addr, length, prot, flags);
             if (ret < 0)
                 goto err;
             /* now we get the exact vma handle */
             tmp = __lookup_vma(addr, length);
             assert(tmp);
-            assert(check_vma_flags(tmp, &flags));
+            assert(check_vma_flags(tmp, flags));
         }
     } else {
         struct shim_vma * cont = NULL, * n; /* cont: continue to scan vmas */
@@ -315,7 +313,7 @@ static int __bkeep_mmap (void * addr, uint64_t length,
 
             if (prev) { /* has a precendent vma */
                 if (test_vma_endin(prev, addr, length)) {
-                    if (!check_vma_flags(prev, &flags)) {
+                    if (!check_vma_flags(prev, flags)) {
                         ret = -EACCES;
                         goto err;
                     }
@@ -340,7 +338,7 @@ static int __bkeep_mmap (void * addr, uint64_t length,
                 if (!test_vma_startin(cont, addr, length))
                     break;
 
-                if (!check_vma_flags(cont, &flags)) {
+                if (!check_vma_flags(cont, flags)) {
                     ret = -EACCES;
                     goto err;
                 }
@@ -400,7 +398,7 @@ int bkeep_mmap (void * addr, uint64_t length, int prot, int flags,
  * We need to split the area aur reduce the size
  * Check the address falls between alread allocated area or not
  */
-static int __bkeep_munmap (void * addr, uint64_t length, const int * flags)
+static int __bkeep_munmap (void * addr, uint64_t length, int flags)
 {
     struct shim_vma * tmp, * n;
 
@@ -472,7 +470,7 @@ static int __bkeep_munmap (void * addr, uint64_t length, const int * flags)
     return 0;
 }
 
-int bkeep_munmap (void * addr, uint64_t length, const int * flags)
+int bkeep_munmap (void * addr, uint64_t length, int flags)
 {
     if (!addr || !length)
         return -EINVAL;
@@ -487,7 +485,7 @@ int bkeep_munmap (void * addr, uint64_t length, const int * flags)
 }
 
 static int __bkeep_mprotect (void * addr, uint64_t length, int prot,
-                             const int * flags)
+                             int flags)
 {
     struct shim_vma * tmp = __lookup_vma(addr, length);
     int ret;
@@ -632,7 +630,7 @@ static int __bkeep_mprotect (void * addr, uint64_t length, int prot,
             if (!candidate) {
                 /* no more vmas, protect the whole area */
                 ret = __bkeep_mmap((void *) addr, length, prot,
-                                   VMA_UNMAPPED|(flags ? *flags : 0),
+                                   VMA_UNMAPPED | flags,
                                    NULL, 0, NULL);
                 if (ret < 0)
                     return ret;
@@ -654,7 +652,7 @@ static int __bkeep_mprotect (void * addr, uint64_t length, int prot,
     return 0;
 }
 
-int bkeep_mprotect (void * addr, uint64_t length, int prot, const int * flags)
+int bkeep_mprotect (void * addr, uint64_t length, int prot, int flags)
 {
     if (!addr || !length)
         return -EINVAL;
@@ -705,7 +703,7 @@ void * get_unmapped_vma (uint64_t length, int flags)
     debug("find unmapped vma between %p-%p\n", heap_bottom, heap_top);
 
     do {
-        int found = 0;
+        bool found = false;
         new->addr   = heap_top - length;
         new->length = length;
         new->flags  = flags|VMA_UNMAPPED;
@@ -713,12 +711,12 @@ void * get_unmapped_vma (uint64_t length, int flags)
 
         listp_for_each_entry_reverse(prev, &vma_list, list) {
             if (new->addr >= prev->addr + prev->length) {
-                found = 1;
+                found = true;
                 break;
             }
 
             if (new->addr < heap_bottom) {
-                found = 1;
+                found = true;
                 break;
             }
 
@@ -833,7 +831,7 @@ static struct shim_vma * __lookup_overlap_vma (const void * addr, uint64_t lengt
 
         /* Assert we are really sorted */
         assert(!prev || prev->addr < tmp->addr);
-        /* Insert in order; break once we are past the appropriate point  */
+        /* Insert in order; break once we are past the appropriate point */
         if (tmp->addr > addr)
             break;
         prev = tmp;
@@ -845,29 +843,30 @@ static struct shim_vma * __lookup_overlap_vma (const void * addr, uint64_t lengt
 }
 
 int lookup_overlap_vma (const void * addr, uint64_t length,
-                        struct shim_vma ** vma)
+                        struct shim_vma ** res_vma)
 {
-    struct shim_vma * tmp = NULL;
-    void * tmp_addr = NULL;
-    uint64_t tmp_length;
     lock(vma_list_lock);
 
-    if ((tmp = __lookup_overlap_vma(addr, length, NULL)) && vma)
-        get_vma((tmp));
-
-    if (tmp) {
-        tmp_addr = tmp->addr;
-        tmp_length = tmp->length;
+    struct shim_vma * vma = __lookup_overlap_vma(addr, length, NULL);
+    if (!vma) {
+        unlock(vma_list_lock);
+        if (res_vma)
+            *res_vma = NULL;
+        return -ENOENT;
     }
 
-    unlock(vma_list_lock);
+    if (res_vma)
+        get_vma(vma);
 
-    if (tmp)
-        debug("vma overlapped at %p-%p\n", tmp_addr, tmp_addr + tmp_length);
+    void * tmp_addr = vma->addr;
+    uint64_t tmp_length = vma->length;
 
-    if (vma)
-        *vma = tmp;
-    return tmp ? 0: -ENOENT;
+    unlock(vma_list_lock);
+
+    debug("vma overlapped at %p-%p\n", tmp_addr, tmp_addr + tmp_length);
+    if (res_vma)
+        *res_vma = vma;
+    return 0;
 }
 
 static struct shim_vma * __lookup_vma (const void * addr, uint64_t length)
@@ -918,7 +917,7 @@ static struct shim_vma * __lookup_supervma (const void * addr, uint64_t length,
                 warn("prev addr is %llx, len is %llx\n", prev->addr, prev->length);
         }
         assert(!prev || prev->addr + prev->length <= tmp->addr);
-        /* Insert in order; break once we are past the appropriate point  */
+        /* Insert in order; break once we are past the appropriate point */
         if (tmp->addr > addr)
             break;
         prev = tmp;
@@ -936,7 +935,7 @@ int lookup_supervma (const void * addr, uint64_t length, struct shim_vma ** vma)
     lock(vma_list_lock);
 
     if ((tmp = __lookup_supervma(addr, length, NULL)) && vma)
-        get_vma((tmp));
+        get_vma(tmp);
 
     unlock(vma_list_lock);
 
@@ -1264,7 +1263,7 @@ BEGIN_RS_FUNC(vma)
     SAVE_PROFILE_INTERVAL(vma_lookup_overlap);
 
     if (tmp) {
-        if ((ret = __bkeep_munmap(vma->addr, vma->length, &vma->flags)) < 0)
+        if ((ret = __bkeep_munmap(vma->addr, vma->length, vma->flags)) < 0)
             return ret;
 
         if (prev->list.next == tmp &&
@@ -1411,34 +1410,3 @@ void debug_print_vma_list (void)
                    vma->comment[0] ? vma->comment : "");
     }
 }
-
-void print_vma_hash (struct shim_vma * vma, void * addr, uint64_t len,
-                     bool force_protect)
-{
-    if (!addr)
-        addr = vma->addr;
-    if (!len)
-        len = vma->length - (addr - vma->addr);
-
-    if (addr < vma->addr || addr + len > vma->addr + vma->length)
-        return;
-
-    if (!(vma->prot & PROT_READ)) {
-        if (!force_protect)
-            return;
-        DkVirtualMemoryProtect(vma->addr, vma->length, PAL_PROT_READ);
-    }
-
-    for (uint64_t p = (uint64_t) addr ;
-         p < (uint64_t) addr + len ; p += allocsize) {
-            uint64_t hash = 0;
-            struct shim_md5_ctx ctx;
-            md5_init(&ctx);
-            md5_update(&ctx, (void *) p, allocsize);
-            md5_final(&ctx);
-            memcpy(&hash, ctx.digest, sizeof(uint64_t));
-        }
-
-    if (!(vma->prot & PROT_READ))
-        DkVirtualMemoryProtect(vma->addr, vma->length, vma->prot);
-}

+ 1 - 1
LibOS/shim/src/elf/shim_rtld.c

@@ -575,7 +575,7 @@ map_error:
 #endif
                 bkeep_mprotect((void *) RELOCATE(l, c->mapend),
                                l->loadcmds[l->nloadcmds - 1].mapstart -
-                               c->mapend, PROT_NONE, &flags);
+                               c->mapend, PROT_NONE, flags);
             }
         }
 

+ 2 - 4
LibOS/shim/src/fs/chroot/fs.c

@@ -623,8 +623,7 @@ static int chroot_flush (struct shim_handle * hdl)
 
         if (mapbuf) {
             DkStreamUnmap(mapbuf, mapsize);
-            int flags = VMA_INTERNAL;
-            bkeep_munmap(mapbuf, mapsize, &flags);
+            bkeep_munmap(mapbuf, mapsize, VMA_INTERNAL);
         }
     }
 
@@ -641,8 +640,7 @@ static inline int __map_buffer (struct shim_handle * hdl, int size)
             return 0;
 
         DkStreamUnmap(file->mapbuf, file->mapsize);
-        int flags = VMA_INTERNAL;
-        bkeep_munmap(file->mapbuf, file->mapsize, &flags);
+        bkeep_munmap(file->mapbuf, file->mapsize, VMA_INTERNAL);
 
         file->mapbuf    = NULL;
         file->mapoffset = 0;

+ 1 - 1
LibOS/shim/src/ipc/shim_ipc_child.c

@@ -192,7 +192,7 @@ DEFINE_PROFILE_INTERVAL(ipc_cld_exit_callback, ipc);
 
 int ipc_cld_exit_send (IDTYPE ppid, IDTYPE tid, unsigned int exitcode, unsigned int term_signal)
 {
-    unsigned long send_time = GET_PROFILE_INTERVAL();
+    __attribute__((unused)) unsigned long send_time = GET_PROFILE_INTERVAL();
     BEGIN_PROFILE_INTERVAL_SET(send_time);
     int ret = 0;
 

+ 23 - 34
LibOS/shim/src/shim_checkpoint.c

@@ -801,47 +801,36 @@ int receive_handles_on_stream (struct palhdl_header * hdr, ptr_t base,
     return 0;
 }
 
-#define NTRIES      4
-
 static void * cp_alloc (struct shim_cp_store * store, void * addr, int size)
 {
-    void * requested = addr;
-    struct shim_vma * vma;
-    int ret, n = 0;
-
-    if (!requested) {
-again:
-        if (n == NTRIES)
-            return NULL;
-        if (!(addr = get_unmapped_vma_for_cp(size)))
-            return NULL;
-    } else {
-        ret = lookup_overlap_vma(addr, size, &vma);
-
-        if (!ret) {
-            if (vma->addr != addr || vma->length != size ||
-                !(vma->flags & VMA_UNMAPPED)) {
+    if (addr) {
+        // Caller specified an exact region to alloc.
+        struct shim_vma * vma;
+        bool found = !lookup_overlap_vma(addr, size, &vma);
+        if (found) {
+            bool allocable = vma->addr == addr && vma->length == size
+                             && (vma->flags & VMA_UNMAPPED);
+            if (!allocable) {
                 put_vma(vma);
                 return NULL;
             }
         }
+        return DkVirtualMemoryAlloc(addr, size, 0,
+                                    PAL_PROT_READ|PAL_PROT_WRITE);
+    } else {
+        // Alloc on any address, with specified size.
+        // We need to retry because `get_unmapped_vma_for_cp` is randomized.
+        // TODO: Fix this to remove the need for retrying.
+        while (true) {
+            addr = get_unmapped_vma_for_cp(size);
+            if (!addr)
+                return NULL;
+            addr = (void *) DkVirtualMemoryAlloc(addr, size, 0,
+                                                 PAL_PROT_READ|PAL_PROT_WRITE);
+            if (addr)
+                return addr;
+        }
     }
-
-    addr = (void *) DkVirtualMemoryAlloc(addr, size, 0,
-                                         PAL_PROT_READ|PAL_PROT_WRITE);
-
-    if (!addr) {
-        if (!requested)
-            goto again;
-        return NULL;
-    }
-
-    if (requested && addr != requested) {
-        DkVirtualMemoryFree(addr, size);
-        return NULL;
-    }
-
-    return addr;
 }
 
 DEFINE_PROFILE_CATAGORY(migrate_proc, migrate);

+ 14 - 4
LibOS/shim/src/shim_malloc.c

@@ -129,7 +129,7 @@ static struct shim_heap * __alloc_enough_heap (size_t size)
             DkVirtualMemoryFree(heap->current, heap->end - heap->current);
             int flags = VMA_INTERNAL;
             unlock(shim_heap_lock);
-            bkeep_munmap(heap->current, heap->end - heap->current, &flags);
+            bkeep_munmap(heap->current, heap->end - heap->current, flags);
             lock(shim_heap_lock);
         }
 
@@ -162,10 +162,17 @@ void * __system_malloc (size_t size)
          */
         int flags = MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL;
         addr = get_unmapped_vma(alloc_size, flags);
-        if (!addr) return NULL;
+        if (!addr) {
+            unlock(shim_heap_lock);
+            return NULL;
+        }
         addr_new = (void *) DkVirtualMemoryAlloc(addr, alloc_size, 0,
                                                  PAL_PROT_WRITE|PAL_PROT_READ);
-        if (!addr_new) return NULL;
+        if (!addr_new) {
+            bkeep_munmap(addr, alloc_size, flags);
+            unlock(shim_heap_lock);
+            return NULL;
+        }
         assert (addr == addr_new);
         bkeep_mmap(addr, alloc_size, PROT_READ|PROT_WRITE,
                    flags, NULL, 0, NULL);
@@ -204,7 +211,7 @@ void __system_free (void * addr, size_t size)
         }
     
     if (! in_reserved_area)
-        bkeep_munmap(addr, ALIGN_UP(size), &flags);
+        bkeep_munmap(addr, ALIGN_UP(size), flags);
 }
 
 int init_heap (void)
@@ -340,7 +347,10 @@ extern_alias(malloc);
 
 void * calloc (size_t nmemb, size_t size)
 {
+    // This overflow checking is not a UB, because the operands are unsigned.
     size_t total = nmemb * size;
+    if (total / size != nmemb)
+        return NULL;
     void *ptr = malloc(total);
     if (ptr)
         memset(ptr, 0, total);

+ 2 - 3
LibOS/shim/src/sys/shim_exec.c

@@ -121,9 +121,8 @@ int shim_do_execve_rtld (struct shim_handle * hdl, const char ** argv,
 
     DkVirtualMemoryFree(old_stack, old_stack_top - old_stack);
     DkVirtualMemoryFree(old_stack_red, old_stack - old_stack_red);
-    int flags = 0;
-    bkeep_munmap(old_stack, old_stack_top - old_stack, &flags);
-    bkeep_munmap(old_stack_red, old_stack - old_stack_red, &flags);
+    bkeep_munmap(old_stack, old_stack_top - old_stack, /*flags=*/0);
+    bkeep_munmap(old_stack_red, old_stack - old_stack_red, /*flags=*/0);
 
     remove_loaded_libraries();
     clean_link_map_list();

+ 3 - 7
LibOS/shim/src/sys/shim_mmap.c

@@ -134,7 +134,7 @@ void * shim_do_mmap (void * addr, size_t length, int prot, int flags, int fd,
 
 free_reserved:
     if (reserved)
-        bkeep_munmap((void *) mapped, mapped_end - mapped, &flags);
+        bkeep_munmap((void *) mapped, mapped_end - mapped, flags);
     return (void *) ret;
 }
 
@@ -142,9 +142,7 @@ int shim_do_mprotect (void * addr, size_t len, int prot)
 {
     uintptr_t mapped = ALIGN_DOWN((uintptr_t) addr);
     uintptr_t mapped_end = ALIGN_UP((uintptr_t) addr + len);
-    int flags = 0;
-
-    if (bkeep_mprotect((void *) mapped, mapped_end - mapped, prot, &flags) < 0)
+    if (bkeep_mprotect((void *) mapped, mapped_end - mapped, prot, /*flags=*/0) < 0)
         return -EACCES;
 
     if (!DkVirtualMemoryProtect((void *) mapped, mapped_end - mapped, prot))
@@ -167,9 +165,7 @@ int shim_do_munmap (void * addr, size_t len)
 
     uintptr_t mapped = ALIGN_DOWN((uintptr_t) addr);
     uintptr_t mapped_end = ALIGN_UP((uintptr_t) addr + len);
-    int flags = 0;
-
-    if (bkeep_munmap((void *) mapped, mapped_end - mapped, &flags) < 0)
+    if (bkeep_munmap((void *) mapped, mapped_end - mapped, /*flags=*/0) < 0)
         return -EACCES;
 
     DkVirtualMemoryFree((void *) mapped, mapped_end - mapped);

+ 2 - 0
LibOS/shim/test/apps/ltp/FLAKY

@@ -23,6 +23,8 @@ Prone to hanging - I think a memory corruption issue that may have a pending fix
 recvfrom01,1
 recvfrom01,2
 
+futex_wait03,1 (see https://github.com/oscarlab/graphene/pull/180#issuecomment-368970338)
+
 Intermittent seg fault
 kill03,1
 

+ 0 - 1
LibOS/shim/test/apps/ltp/PASSED

@@ -212,7 +212,6 @@ futex_wait01,1
 futex_wait01,2
 futex_wait01,3
 futex_wait01,4
-futex_wait03,1
 futex_wait04,1
 futex_wait05,1
 futex_wait_bitset01,1

+ 6 - 0
Pal/src/host/Linux-SGX/db_memory.c

@@ -85,6 +85,12 @@ int _DkVirtualMemoryAlloc (void ** paddr, uint64_t size, int alloc_type, int pro
     mem = get_reserved_pages(addr, size);
     if (!mem)
         return addr ? -PAL_ERROR_DENIED : -PAL_ERROR_NOMEM;
+    if (addr && mem != addr) {
+        // TODO: This case should be made impossible by fixing
+        // `get_reserved_pages` semantics.
+        free_pages(mem, size);
+        return -PAL_ERROR_INVAL; // `addr` was unaligned.
+    }
 
     memset(mem, 0, size);
 

+ 30 - 8
Pal/src/host/Linux-SGX/enclave_framework.c

@@ -433,7 +433,7 @@ static int init_trusted_file (const char * key, const char * uri)
 int init_trusted_files (void)
 {
     struct config_store * store = pal_state.root_config;
-    char * cfgbuf;
+    char * cfgbuf = NULL;
     ssize_t cfgsize;
     int nuris, ret;
 
@@ -443,7 +443,12 @@ int init_trusted_files (void)
             goto out;
     }
 
-    cfgbuf = __alloca(CONFIG_MAX);
+    cfgbuf = malloc(CONFIG_MAX);
+    if (!cfgbuf) {
+        ret = -PAL_ERROR_NOMEM;
+        goto out;
+    }
+
     ssize_t len = get_config(store, "loader.preload", cfgbuf, CONFIG_MAX);
     if (len > 0) {
         int npreload = 0;
@@ -469,7 +474,14 @@ int init_trusted_files (void)
     if (cfgsize <= 0)
         goto no_trusted;
 
-    cfgbuf = __alloca(cfgsize);
+    free(cfgbuf);
+    cfgbuf = malloc(cfgsize);
+    if (!cfgbuf) {
+        ret = -PAL_ERROR_NOMEM;
+        goto out;
+    }
+
+
     nuris = get_config_entries(store, "sgx.trusted_files", cfgbuf, cfgsize);
     if (nuris <= 0)
         goto no_trusted;
@@ -499,7 +511,13 @@ no_trusted:
     if (cfgsize <= 0)
         goto no_allowed;
 
-    cfgbuf = __alloca(cfgsize);
+    free(cfgbuf);
+    cfgbuf = malloc(cfgsize);
+    if (!cfgbuf) {
+        ret = -PAL_ERROR_NOMEM;
+        goto out;
+    }
+
     nuris = get_config_entries(store, "sgx.allowed_files", cfgbuf, cfgsize);
     if (nuris <= 0)
         goto no_allowed;
@@ -523,6 +541,7 @@ no_trusted:
 no_allowed:
     ret = 0;
 out:
+    free(cfgbuf);
     return ret;
 }
 
@@ -540,7 +559,10 @@ int init_trusted_children (void)
     if (cfgsize <= 0)
         return 0;
 
-    char * cfgbuf = __alloca(cfgsize);
+    char * cfgbuf = malloc(cfgsize);
+    if (!cfgbuf)
+        return -PAL_ERROR_NOMEM;
+
     int nuris = get_config_entries(store, "sgx.trusted_mrenclave",
                                    cfgbuf, cfgsize);
     if (nuris > 0) {
@@ -560,7 +582,7 @@ int init_trusted_children (void)
                 register_trusted_child(uri, mrenclave);
         }
     }
-
+    free(cfgbuf);
     return 0;
 }
 
@@ -921,13 +943,13 @@ int _DkStreamAttestationRespond (PAL_HANDLE stream, void * data,
     }
 
     if (ret == 1) {
-        SGX_DBG(DBG_S, "Not an allowed encalve (mrenclave = %s)\n",
+        SGX_DBG(DBG_S, "Not an allowed enclave (mrenclave = %s)\n",
                 hex2str(att.mrenclave));
         ret = -PAL_ERROR_DENIED;
         goto out;
     }
 
-    SGX_DBG(DBG_S, "Remote attestation succeed!\n");
+    SGX_DBG(DBG_S, "Remote attestation succeeded!\n");
     return 0;
 
 out:

+ 2 - 0
Pal/src/host/Linux-SGX/enclave_pages.c

@@ -68,6 +68,8 @@ static void assert_vma_list (void)
 #endif
 }
 
+// TODO: This function should be fixed to always either return exactly `addr` or
+// fail.
 void * get_reserved_pages(void * addr, uint64_t size)
 {
     if (!size)

+ 2 - 1
Pal/src/pal.h

@@ -30,7 +30,7 @@
 #include <stddef.h>
 #include <stdint.h>
 
-typedef uint64_t PAL_NUM;
+typedef uint64_t      PAL_NUM;
 typedef const char *  PAL_STR;
 typedef void *        PAL_PTR;
 typedef uint32_t      PAL_FLG;
@@ -217,6 +217,7 @@ PAL_CONTROL * pal_control_addr (void);
 #define PAL_PROT_WRITECOPY  0x8     /* 0x8 Copy on write */
 
 
+// If addr != NULL, then the returned region is always exactly at addr.
 PAL_PTR
 DkVirtualMemoryAlloc (PAL_PTR addr, PAL_NUM size, PAL_FLG alloc_type,
                       PAL_FLG prot);

+ 3 - 3
Pal/src/slab.c

@@ -41,8 +41,8 @@ static PAL_LOCK slab_mgr_lock = LOCK_INIT;
 #if STATIC_SLAB == 1
 # define POOL_SIZE 64 * 1024 * 1024 /* 64MB by default */
 static char mem_pool[POOL_SIZE];
-static char *bump = mem_pool;
-static char *mem_pool_end = &mem_pool[POOL_SIZE];
+static void *bump = mem_pool;
+static void *mem_pool_end = &mem_pool[POOL_SIZE];
 #else
 # define PAGE_SIZE (slab_alignment)
 #endif
@@ -71,7 +71,7 @@ static inline void * __malloc (int size)
 static inline void __free (void * addr, int size)
 {
 #if STATIC_SLAB == 1
-    if ((char *) addr >= (char *) mem_pool && (char *) addr + size <= (char *) mem_pool_end)
+    if (addr >= (void *)mem_pool && addr < mem_pool_end)
         return;
 #endif