Browse Source

[LibOS] Merge page size and allocation alignment

Those two values are currently (and we don't think this will ever
change) indistinguishable from each other from the LibOS perspective.
Michał Kowalczyk 4 years ago
parent
commit
bae8baa2dd

+ 6 - 2
Documentation/oldwiki/PAL-Host-ABI.md

@@ -130,8 +130,12 @@ The fields of the Graphene control block are defined as follows:
         PAL_PTR_RANGE manifest_preload;
 
         /***** Host information *****/
-        /* host page size / allocation alignment */
-        PAL_NUM pagesize, alloc_align;
+        /* Host allocation alignment.
+         * This currently is (and most likely will always be) indistinguishable from the page size,
+         * looking from the LibOS perspective. The two values can be different on the PAL level though,
+         * see e.g. SYSTEM_INFO::dwAllocationGranularity on Windows.
+         */
+        PAL_NUM alloc_align;
         /* CPU information */
         PAL_CPU_INFO cpu_info;
         /* Memory information */

+ 7 - 7
LibOS/shim/include/shim_internal.h

@@ -743,13 +743,13 @@ static inline uint64_t hash64 (uint64_t key)
 #endif
 
 extern size_t g_pal_alloc_align;
-#define PAGE_SIZE g_pal_alloc_align
-#define IS_PAGE_ALIGNED(x) IS_ALIGNED_POW2(x, g_pal_alloc_align)
-#define IS_PAGE_ALIGNED_PTR(x) IS_ALIGNED_PTR_POW2(x, g_pal_alloc_align)
-#define PAGE_ALIGN_DOWN(x) ALIGN_DOWN_POW2(x, g_pal_alloc_align)
-#define PAGE_ALIGN_UP(x) ALIGN_UP_POW2(x, g_pal_alloc_align)
-#define PAGE_ALIGN_DOWN_PTR(x) ALIGN_DOWN_PTR_POW2(x, g_pal_alloc_align)
-#define PAGE_ALIGN_UP_PTR(x) ALIGN_UP_PTR_POW2(x, g_pal_alloc_align)
+#define ALLOC_ALIGNMENT         g_pal_alloc_align
+#define IS_ALLOC_ALIGNED(x)     IS_ALIGNED_POW2(x, g_pal_alloc_align)
+#define IS_ALLOC_ALIGNED_PTR(x) IS_ALIGNED_PTR_POW2(x, g_pal_alloc_align)
+#define ALLOC_ALIGN_DOWN(x)     ALIGN_DOWN_POW2(x, g_pal_alloc_align)
+#define ALLOC_ALIGN_UP(x)       ALIGN_UP_POW2(x, g_pal_alloc_align)
+#define ALLOC_ALIGN_DOWN_PTR(x) ALIGN_DOWN_PTR_POW2(x, g_pal_alloc_align)
+#define ALLOC_ALIGN_UP_PTR(x)   ALIGN_UP_PTR_POW2(x, g_pal_alloc_align)
 
 void * __system_malloc (size_t size);
 void __system_free (void * addr, size_t size);

+ 4 - 4
LibOS/shim/src/bookkeep/shim_signal.c

@@ -384,7 +384,7 @@ bool test_user_memory (void * addr, size_t size, bool write)
         } else {
             *(volatile char *) tmp;
         }
-        tmp = PAGE_ALIGN_UP_PTR(tmp + 1);
+        tmp = ALLOC_ALIGN_UP_PTR(tmp + 1);
     }
 
 ret_fault:
@@ -411,7 +411,7 @@ bool test_user_string (const char * addr)
         return true;
 
     size_t size, maxlen;
-    const char* next = PAGE_ALIGN_UP_PTR(addr + 1);
+    const char* next = ALLOC_ALIGN_UP_PTR(addr + 1);
 
     /* SGX path: check if [addr, addr+size) is addressable (in some VMA). */
     if (is_sgx_pal()) {
@@ -425,7 +425,7 @@ bool test_user_string (const char * addr)
 
             size = strnlen(addr, maxlen);
             addr = next;
-            next = PAGE_ALIGN_UP_PTR(addr + 1);
+            next = ALLOC_ALIGN_UP_PTR(addr + 1);
         } while (size == maxlen);
 
         return false;
@@ -457,7 +457,7 @@ bool test_user_string (const char * addr)
 
         size = strnlen(addr, maxlen);
         addr = next;
-        next = PAGE_ALIGN_UP_PTR(addr + 1);
+        next = ALLOC_ALIGN_UP_PTR(addr + 1);
     } while (size == maxlen);
 
 ret_fault:

+ 4 - 4
LibOS/shim/src/bookkeep/shim_vma.c

@@ -72,7 +72,7 @@ static void * __bkeep_unmapped (void * top_addr, void * bottom_addr,
 static inline void * __malloc (size_t size)
 {
     void * addr;
-    size = PAGE_ALIGN_UP(size);
+    size = ALLOC_ALIGN_UP(size);
 
     /*
      * Chia-Che 3/3/18: We must enforce the policy that all VMAs have to
@@ -318,7 +318,7 @@ int init_vma (void)
 
     /* Keep track of LibOS code itself so nothing overwrites it */
     ret = __bkeep_preloaded(&__load_address,
-                            PAGE_ALIGN_UP_PTR(&__load_address_end),
+                            ALLOC_ALIGN_UP_PTR(&__load_address_end),
                             PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL,
                             "LibOS");
     if (ret < 0)
@@ -365,7 +365,7 @@ int init_vma (void)
     ret = DkRandomBitsRead(&rand, sizeof(rand));
     if (ret < 0)
         return -convert_pal_errno(-ret);
-    current_heap_top -= PAGE_ALIGN_DOWN(rand % addr_rand_size);
+    current_heap_top -= ALLOC_ALIGN_DOWN(rand % addr_rand_size);
 #endif
 
     debug("heap top adjusted to %p\n", current_heap_top);
@@ -1091,7 +1091,7 @@ BEGIN_CP_FUNC(vma)
                     (off_t)(vma->offset + vma->length) > file_len) {
                     send_size = file_len > vma->offset ?
                                 file_len - vma->offset : 0;
-                    send_size = PAGE_ALIGN_UP(send_size);
+                    send_size = ALLOC_ALIGN_UP(send_size);
                 }
             }
             if (send_size > 0) {

+ 21 - 21
LibOS/shim/src/elf/shim_rtld.c

@@ -197,8 +197,8 @@ static int protect_page(struct link_map* l, void* addr, size_t size) {
             return 0;
     }
 
-    void* start = PAGE_ALIGN_DOWN_PTR(addr);
-    void* end   = PAGE_ALIGN_UP_PTR(addr + size);
+    void* start = ALLOC_ALIGN_DOWN_PTR(addr);
+    void* end   = ALLOC_ALIGN_UP_PTR(addr + size);
 
     if (!DkVirtualMemoryProtect(start, end - start, PAL_PROT_READ | PAL_PROT_WRITE | prot))
         return -PAL_ERRNO;
@@ -430,7 +430,7 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
             case PT_LOAD:
                 /* A load command tells us to map in part of the file.
                    We record the load commands and process them all later.  */
-                if (!IS_PAGE_ALIGNED(ph->p_align)) {
+                if (!IS_ALLOC_ALIGNED(ph->p_align)) {
                     errstring = "ELF load command alignment not page-aligned";
                     goto call_lose;
                 }
@@ -446,11 +446,11 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
                 }
 
                 c           = &l->loadcmds[l->nloadcmds++];
-                c->mapstart = PAGE_ALIGN_DOWN(ph->p_vaddr);
-                c->mapend   = PAGE_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
+                c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
+                c->mapend   = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
                 c->dataend  = ph->p_vaddr + ph->p_filesz;
                 c->allocend = ph->p_vaddr + ph->p_memsz;
-                c->mapoff   = PAGE_ALIGN_DOWN(ph->p_offset);
+                c->mapoff   = ALLOC_ALIGN_DOWN(ph->p_offset);
 
                 /* Determine whether there is a gap between the last segment
                    and this one.  */
@@ -510,12 +510,12 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
                 mappref = (ElfW(Addr))c->mapstart + (ElfW(Addr))addr;
             else
                 mappref = (ElfW(Addr))bkeep_unmapped_heap(
-                    PAGE_ALIGN_UP(maplength), c->prot,
+                    ALLOC_ALIGN_UP(maplength), c->prot,
                     c->flags | MAP_PRIVATE | (type == OBJECT_INTERNAL ? VMA_INTERNAL : 0), file,
                     c->mapoff, NULL);
 
             /* Remember which part of the address space this object uses.  */
-            ret = (*mmap)(file, (void**)&mappref, PAGE_ALIGN_UP(maplength), c->prot,
+            ret = (*mmap)(file, (void**)&mappref, ALLOC_ALIGN_UP(maplength), c->prot,
                           c->flags | MAP_PRIVATE, c->mapoff);
 
             if (ret < 0) {
@@ -600,8 +600,8 @@ do_remap:
             ElfW(Addr) zero, zeroend, zeropage;
 
             zero     = (ElfW(Addr))RELOCATE(l, c->dataend);
-            zeroend  = PAGE_ALIGN_UP((ElfW(Addr))RELOCATE(l, c->allocend));
-            zeropage = PAGE_ALIGN_UP(zero);
+            zeroend  = ALLOC_ALIGN_UP((ElfW(Addr))RELOCATE(l, c->allocend));
+            zeropage = ALLOC_ALIGN_UP(zero);
 
             if (zeroend < zeropage)
                 /* All the extra data is in the last page of the segment.
@@ -613,13 +613,13 @@ do_remap:
                 /* Zero the final part of the last page of the segment.  */
                 if ((c->prot & PROT_WRITE) == 0) {
                     /* Dag nab it.  */
-                    if (!DkVirtualMemoryProtect((caddr_t)PAGE_ALIGN_DOWN(zero), g_pal_alloc_align,
+                    if (!DkVirtualMemoryProtect((caddr_t)ALLOC_ALIGN_DOWN(zero), g_pal_alloc_align,
                                                 c->prot | PAL_PROT_WRITE)) {
                         errstring = "cannot change memory protections";
                         goto call_lose;
                     }
                     memset((void*)zero, '\0', zeropage - zero);
-                    if (!DkVirtualMemoryProtect((caddr_t)PAGE_ALIGN_DOWN(zero), g_pal_alloc_align,
+                    if (!DkVirtualMemoryProtect((caddr_t)ALLOC_ALIGN_DOWN(zero), g_pal_alloc_align,
                                                 c->prot)) {
                         errstring = "cannot change memory protections";
                         goto call_lose;
@@ -786,7 +786,7 @@ static int __free_elf_object(struct link_map* l) {
 
             zero     = l->l_addr + c->dataend;
             zeroend  = l->l_addr + c->allocend;
-            zeropage = PAGE_ALIGN_UP(zero);
+            zeropage = ALLOC_ALIGN_UP(zero);
 
             if (zeroend < zeropage)
                 /* All the extra data is in the last page of the segment.
@@ -1426,20 +1426,20 @@ static int vdso_map_init(void) {
      * When LibOS is loaded at different address, it may overlap with the old vDSO
      * area.
      */
-    void* addr = bkeep_unmapped_heap(PAGE_ALIGN_UP(vdso_so_size), PROT_READ | PROT_EXEC, 0, NULL, 0,
+    void* addr = bkeep_unmapped_heap(ALLOC_ALIGN_UP(vdso_so_size), PROT_READ | PROT_EXEC, 0, NULL, 0,
                                      "linux-vdso.so.1");
     if (addr == NULL)
         return -ENOMEM;
-    assert(addr == PAGE_ALIGN_UP_PTR(addr));
+    assert(addr == ALLOC_ALIGN_UP_PTR(addr));
 
-    void* ret_addr = (void*)DkVirtualMemoryAlloc(addr, PAGE_ALIGN_UP(vdso_so_size), 0,
+    void* ret_addr = (void*)DkVirtualMemoryAlloc(addr, ALLOC_ALIGN_UP(vdso_so_size), 0,
                                                  PAL_PROT_READ | PAL_PROT_WRITE);
     if (!ret_addr)
         return -PAL_ERRNO;
     assert(addr == ret_addr);
 
     memcpy(addr, &vdso_so, vdso_so_size);
-    memset(addr + vdso_so_size, 0, PAGE_ALIGN_UP(vdso_so_size) - vdso_so_size);
+    memset(addr + vdso_so_size, 0, ALLOC_ALIGN_UP(vdso_so_size) - vdso_so_size);
     __load_elf_object(NULL, addr, OBJECT_VDSO, NULL);
     vdso_map->l_name = "vDSO";
 
@@ -1453,7 +1453,7 @@ static int vdso_map_init(void) {
         **vsyms[i].func = vsyms[i].value;
     }
 
-    if (!DkVirtualMemoryProtect(addr, PAGE_ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_EXEC))
+    if (!DkVirtualMemoryProtect(addr, ALLOC_ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_EXEC))
         return -PAL_ERRNO;
 
     vdso_addr = addr;
@@ -1464,7 +1464,7 @@ int vdso_map_migrate(void) {
     if (!vdso_addr)
         return 0;
 
-    if (!DkVirtualMemoryProtect(vdso_addr, PAGE_ALIGN_UP(vdso_so_size),
+    if (!DkVirtualMemoryProtect(vdso_addr, ALLOC_ALIGN_UP(vdso_so_size),
                                 PAL_PROT_READ | PAL_PROT_WRITE))
         return -PAL_ERRNO;
 
@@ -1473,7 +1473,7 @@ int vdso_map_migrate(void) {
         **vsyms[i].func = vsyms[i].value;
     }
 
-    if (!DkVirtualMemoryProtect(vdso_addr, PAGE_ALIGN_UP(vdso_so_size),
+    if (!DkVirtualMemoryProtect(vdso_addr, ALLOC_ALIGN_UP(vdso_so_size),
                                 PAL_PROT_READ | PAL_PROT_EXEC))
         return -PAL_ERRNO;
     return 0;
@@ -1532,7 +1532,7 @@ int init_brk_from_executable(struct shim_handle* exec) {
             if (!(c->prot & PROT_EXEC))
                 data_segment_size += c->allocend - c->mapstart;
 
-        return init_brk_region((void*)PAGE_ALIGN_UP(exec_map->l_map_end), data_segment_size);
+        return init_brk_region((void*)ALLOC_ALIGN_UP(exec_map->l_map_end), data_segment_size);
     }
     return 0;
 }

+ 2 - 2
LibOS/shim/src/fs/chroot/fs.c

@@ -45,8 +45,8 @@
 
 #define TTY_FILE_MODE   0666
 
-#define FILE_BUFMAP_SIZE (PAL_CB(pagesize) * 4)
-#define FILE_BUF_SIZE (PAL_CB(pagesize))
+#define FILE_BUFMAP_SIZE (PAL_CB(alloc_align) * 4)
+#define FILE_BUF_SIZE (PAL_CB(alloc_align))
 
 struct mount_data {
     size_t              data_size;

+ 12 - 12
LibOS/shim/src/shim_checkpoint.c

@@ -339,8 +339,8 @@ BEGIN_CP_FUNC(gipc)
 {
     ptr_t off = ADD_CP_OFFSET(sizeof(struct shim_gipc_entry));
 
-    void* send_addr  = (void*)PAGE_ALIGN_DOWN_PTR(obj);
-    size_t send_size = (void*)PAGE_ALIGN_UP_PTR(obj + size) - send_addr;
+    void* send_addr  = (void*)ALLOC_ALIGN_DOWN_PTR(obj);
+    size_t send_size = (void*)ALLOC_ALIGN_UP_PTR(obj + size) - send_addr;
 
     struct shim_gipc_entry * entry = (void *) (base + off);
 
@@ -399,7 +399,7 @@ static int send_checkpoint_by_gipc (PAL_HANDLE gipc_store,
 {
     PAL_PTR hdr_addr = (PAL_PTR) store->base;
     PAL_NUM hdr_size = (PAL_NUM) store->offset + store->mem_size;
-    assert(IS_PAGE_ALIGNED_PTR(hdr_addr));
+    assert(IS_ALLOC_ALIGNED_PTR(hdr_addr));
 
     int mem_nentries = store->mem_nentries;
 
@@ -428,7 +428,7 @@ static int send_checkpoint_by_gipc (PAL_HANDLE gipc_store,
         }
     }
 
-    hdr_size = PAGE_ALIGN_UP(hdr_size);
+    hdr_size = ALLOC_ALIGN_UP(hdr_size);
     int npages = DkPhysicalMemoryCommit(gipc_store, 1, &hdr_addr, &hdr_size);
     if (!npages)
         return -EPERM;
@@ -620,8 +620,8 @@ int restore_checkpoint (struct cp_header * cphdr, struct mem_header * memhdr,
                 debug("memory entry [%p]: %p-%p\n", entry, entry->addr,
                       entry->addr + entry->size);
 
-                PAL_PTR addr = PAGE_ALIGN_DOWN_PTR(entry->addr);
-                PAL_NUM size = PAGE_ALIGN_UP_PTR(entry->addr + entry->size) - (void*)addr;
+                PAL_PTR addr = ALLOC_ALIGN_DOWN_PTR(entry->addr);
+                PAL_NUM size = ALLOC_ALIGN_UP_PTR(entry->addr + entry->size) - (void*)addr;
                 PAL_FLG prot = entry->prot;
 
                 if (!DkVirtualMemoryAlloc(addr, size, 0, prot|PAL_PROT_WRITE)) {
@@ -752,7 +752,7 @@ int restore_from_file (const char * filename, struct newproc_cp_header * hdr,
         goto out;
 
     void * cpaddr = cphdr.addr;
-    ret = fs->fs_ops->mmap(file, &cpaddr, PAGE_ALIGN_UP(cphdr.size), PROT_READ|PROT_WRITE,
+    ret = fs->fs_ops->mmap(file, &cpaddr, ALLOC_ALIGN_UP(cphdr.size), PROT_READ|PROT_WRITE,
                            MAP_PRIVATE|MAP_FILE, 0);
     if (ret < 0)
         goto out;
@@ -866,7 +866,7 @@ static void * cp_alloc (struct shim_cp_store * store, void * addr, size_t size)
          * checkpoint space. The reserved space is half of the size of the
          * checkpoint space, but can be further fine-tuned.
          */
-        size_t reserve_size = PAGE_ALIGN_UP(size >> 1);
+        size_t reserve_size = ALLOC_ALIGN_UP(size >> 1);
 
         debug("try allocate checkpoint store (size = %ld, reserve = %ld)\n",
               size, reserve_size);
@@ -1198,8 +1198,8 @@ int do_migration (struct newproc_cp_header * hdr, void ** cpptr)
 
         /* Try to load the checkpoint at the same address */
         base = hdr->hdr.addr;
-        mapaddr = (PAL_PTR)PAGE_ALIGN_DOWN_PTR(base);
-        mapsize = (PAL_PTR)PAGE_ALIGN_UP_PTR(base + size) - mapaddr;
+        mapaddr = (PAL_PTR)ALLOC_ALIGN_DOWN_PTR(base);
+        mapsize = (PAL_PTR)ALLOC_ALIGN_UP_PTR(base + size) - mapaddr;
 
         /* Need to create VMA before allocation */
         ret = bkeep_mmap((void *) mapaddr, mapsize,
@@ -1211,13 +1211,13 @@ int do_migration (struct newproc_cp_header * hdr, void ** cpptr)
 #endif
 
     if (!base) {
-        base = bkeep_unmapped_any(PAGE_ALIGN_UP(size), PROT_READ|PROT_WRITE, CP_VMA_FLAGS, 0,
+        base = bkeep_unmapped_any(ALLOC_ALIGN_UP(size), PROT_READ|PROT_WRITE, CP_VMA_FLAGS, 0,
                                   "cpstore");
         if (!base)
             return -ENOMEM;
 
         mapaddr = (PAL_PTR)base;
-        mapsize = (PAL_NUM)PAGE_ALIGN_UP(size);
+        mapsize = (PAL_NUM)ALLOC_ALIGN_UP(size);
     }
 
     debug("checkpoint mapped at %p-%p\n", base, base + size);

+ 6 - 6
LibOS/shim/src/shim_init.c

@@ -246,8 +246,8 @@ DEFINE_PROFILE_OCCURENCE(alloc_stack_count, memory);
 
 void * allocate_stack (size_t size, size_t protect_size, bool user)
 {
-    size = PAGE_ALIGN_UP(size);
-    protect_size = PAGE_ALIGN_UP(protect_size);
+    size = ALLOC_ALIGN_UP(size);
+    protect_size = ALLOC_ALIGN_UP(protect_size);
 
     /* preserve a non-readable, non-writable page below the user
        stack to stop user program to clobber other vmas */
@@ -377,7 +377,7 @@ int init_stack (const char ** argv, const char ** envp,
     if (root_config) {
         char stack_cfg[CONFIG_MAX];
         if (get_config(root_config, "sys.stack.size", stack_cfg, sizeof(stack_cfg)) > 0) {
-            stack_size = PAGE_ALIGN_UP(parse_int(stack_cfg));
+            stack_size = ALLOC_ALIGN_UP(parse_int(stack_cfg));
             set_rlimit_cur(RLIMIT_STACK, stack_size);
         }
     }
@@ -483,13 +483,13 @@ int init_manifest (PAL_HANDLE manifest_handle)
             return -PAL_ERRNO;
 
         size = attr.pending_size;
-        map_size = PAGE_ALIGN_UP(size);
+        map_size = ALLOC_ALIGN_UP(size);
         addr = bkeep_unmapped_any(map_size, PROT_READ, MAP_FLAGS,
                                   0, "manifest");
         if (!addr)
             return -ENOMEM;
 
-        void* ret_addr = DkStreamMap(manifest_handle, addr, PAL_PROT_READ, 0, PAGE_ALIGN_UP(size));
+        void* ret_addr = DkStreamMap(manifest_handle, addr, PAL_PROT_READ, 0, ALLOC_ALIGN_UP(size));
 
         if (!ret_addr) {
             bkeep_munmap(addr, map_size, MAP_FLAGS);
@@ -1067,7 +1067,7 @@ void check_stack_hook (void)
     __asm__ volatile ("movq %%rsp, %0" : "=r"(rsp) :: "memory");
 
     if (rsp <= cur_thread->stack_top && rsp > cur_thread->stack) {
-        if ((uintptr_t) rsp - (uintptr_t) cur_thread->stack < PAL_CB(pagesize))
+        if ((uintptr_t)rsp - (uintptr_t)cur_thread->stack < PAL_CB(alloc_align))
             SYS_PRINTF("*** stack is almost drained (RSP = %p, stack = %p-%p) ***\n",
                        rsp, cur_thread->stack, cur_thread->stack_top);
     } else {

+ 3 - 3
LibOS/shim/src/shim_malloc.c

@@ -53,7 +53,7 @@ DEFINE_PROFILE_CATEGORY(memory, );
 
 /* Returns NULL on failure */
 void* __system_malloc(size_t size) {
-    size_t alloc_size = PAGE_ALIGN_UP(size);
+    size_t alloc_size = ALLOC_ALIGN_UP(size);
     void* addr;
     void* ret_addr;
     int flags = MAP_PRIVATE | MAP_ANONYMOUS | VMA_INTERNAL;
@@ -91,9 +91,9 @@ void* __system_malloc(size_t size) {
 }
 
 void __system_free(void* addr, size_t size) {
-    DkVirtualMemoryFree(addr, PAGE_ALIGN_UP(size));
+    DkVirtualMemoryFree(addr, ALLOC_ALIGN_UP(size));
 
-    if (bkeep_munmap(addr, PAGE_ALIGN_UP(size), VMA_INTERNAL) < 0)
+    if (bkeep_munmap(addr, ALLOC_ALIGN_UP(size), VMA_INTERNAL) < 0)
         BUG();
 }
 

+ 2 - 2
LibOS/shim/src/sys/shim_brk.c

@@ -59,7 +59,7 @@ int init_brk_region(void* brk_region, size_t data_segment_size) {
     if (region.brk_start)
         return 0;
 
-    data_segment_size = PAGE_ALIGN_UP(data_segment_size);
+    data_segment_size = ALLOC_ALIGN_UP(data_segment_size);
     uint64_t brk_max_size = DEFAULT_BRK_MAX_SIZE;
 
     if (root_config) {
@@ -103,7 +103,7 @@ int init_brk_region(void* brk_region, size_t data_segment_size) {
                     return -convert_pal_errno(-ret);
                 rand %= MIN((size_t)0x2000000,
                             (size_t)(PAL_CB(user_address.end) - brk_region - brk_max_size));
-                rand = PAGE_ALIGN_DOWN(rand);
+                rand = ALLOC_ALIGN_DOWN(rand);
 
                 if (brk_region + rand + brk_max_size >= PAL_CB(user_address.end))
                     continue;

+ 2 - 2
LibOS/shim/src/sys/shim_clone.c

@@ -147,7 +147,7 @@ static int clone_implementation_wrapper(struct clone_args * arg)
     void * stack = arg->stack;
 
     struct shim_vma_val vma;
-    lookup_vma(PAGE_ALIGN_DOWN_PTR(stack), &vma);
+    lookup_vma(ALLOC_ALIGN_DOWN_PTR(stack), &vma);
     my_thread->stack_top = vma.addr + vma.length;
     my_thread->stack_red = my_thread->stack = vma.addr;
 
@@ -331,7 +331,7 @@ int shim_do_clone (int flags, void * user_stack_addr, int * parent_tidptr,
 
         if (user_stack_addr) {
             struct shim_vma_val vma;
-            lookup_vma(PAGE_ALIGN_DOWN_PTR(user_stack_addr), &vma);
+            lookup_vma(ALLOC_ALIGN_DOWN_PTR(user_stack_addr), &vma);
             thread->stack_top = vma.addr + vma.length;
             thread->stack_red = thread->stack = vma.addr;
             parent_stack = (void *)self->shim_tcb->context.regs->rsp;

+ 8 - 8
LibOS/shim/src/sys/shim_fs.c

@@ -450,9 +450,9 @@ static ssize_t handle_copy (struct shim_handle * hdli, off_t * offseti,
             expectsize = bufsize = count - bytes;
 
         if (do_mapi && !bufi) {
-            boffi = offi - PAGE_ALIGN_DOWN(offi);
+            boffi = offi - ALLOC_ALIGN_DOWN(offi);
 
-            if (fsi->fs_ops->mmap(hdli, &bufi, PAGE_ALIGN_UP(bufsize + boffi),
+            if (fsi->fs_ops->mmap(hdli, &bufi, ALLOC_ALIGN_UP(bufsize + boffi),
                                   PROT_READ, MAP_FILE, offi - boffi) < 0) {
                 do_mapi = false;
                 boffi = 0;
@@ -470,9 +470,9 @@ static ssize_t handle_copy (struct shim_handle * hdli, off_t * offseti,
         }
 
         if (do_mapo && !bufo) {
-            boffo = offo - PAGE_ALIGN_DOWN(offo);
+            boffo = offo - ALLOC_ALIGN_DOWN(offo);
 
-            if (fso->fs_ops->mmap(hdlo, &bufo, PAGE_ALIGN_UP(bufsize + boffo),
+            if (fso->fs_ops->mmap(hdlo, &bufo, ALLOC_ALIGN_UP(bufsize + boffo),
                                   PROT_WRITE, MAP_FILE, offo - boffo) < 0) {
                 do_mapo = false;
                 boffo = 0;
@@ -493,19 +493,19 @@ static ssize_t handle_copy (struct shim_handle * hdli, off_t * offseti,
             copysize = count - bytes > bufsize ? bufsize :
                        count - bytes;
             memcpy(bufo + boffo, bufi + boffi, copysize);
-            DkVirtualMemoryFree(bufi, PAGE_ALIGN_UP(bufsize + boffi));
+            DkVirtualMemoryFree(bufi, ALLOC_ALIGN_UP(bufsize + boffi));
             bufi = NULL;
-            DkVirtualMemoryFree(bufo, PAGE_ALIGN_UP(bufsize + boffo));
+            DkVirtualMemoryFree(bufo, ALLOC_ALIGN_UP(bufsize + boffo));
             bufo = NULL;
         } else if (do_mapo) {
             copysize = fsi->fs_ops->read(hdli, bufo + boffo, bufsize);
-            DkVirtualMemoryFree(bufo, PAGE_ALIGN_UP(bufsize + boffo));
+            DkVirtualMemoryFree(bufo, ALLOC_ALIGN_UP(bufsize + boffo));
             bufo = NULL;
             if (copysize < 0)
                 break;
         } else if (do_mapi) {
             copysize = fso->fs_ops->write(hdlo, bufi + boffi, bufsize);
-            DkVirtualMemoryFree(bufi, PAGE_ALIGN_UP(bufsize + boffi));
+            DkVirtualMemoryFree(bufi, ALLOC_ALIGN_UP(bufsize + boffi));
             bufi = NULL;
             if (copysize < 0)
                 break;

+ 12 - 12
LibOS/shim/src/sys/shim_mmap.c

@@ -42,17 +42,17 @@ void* shim_do_mmap(void* addr, size_t length, int prot, int flags, int fd, off_t
      * According to the manpage, both addr and offset have to be page-aligned,
      * but not the length. mmap() will automatically round up the length.
      */
-    if (addr && !IS_PAGE_ALIGNED_PTR(addr))
+    if (addr && !IS_ALLOC_ALIGNED_PTR(addr))
         return (void*)-EINVAL;
 
-    if (fd >= 0 && !IS_PAGE_ALIGNED(offset))
+    if (fd >= 0 && !IS_ALLOC_ALIGNED(offset))
         return (void*)-EINVAL;
 
     if (!length || !access_ok(addr, length))
         return (void*)-EINVAL;
 
-    if (!IS_PAGE_ALIGNED(length))
-        length = PAGE_ALIGN_UP(length);
+    if (!IS_ALLOC_ALIGNED(length))
+        length = ALLOC_ALIGN_UP(length);
 
     /* ignore MAP_32BIT when MAP_FIXED is set */
     if ((flags & (MAP_32BIT | MAP_FIXED)) == (MAP_32BIT | MAP_FIXED))
@@ -148,11 +148,11 @@ int shim_do_mprotect(void* addr, size_t length, int prot) {
      * According to the manpage, addr has to be page-aligned, but not the
      * length. mprotect() will automatically round up the length.
      */
-    if (!addr || !IS_PAGE_ALIGNED_PTR(addr))
+    if (!addr || !IS_ALLOC_ALIGNED_PTR(addr))
         return -EINVAL;
 
-    if (!IS_PAGE_ALIGNED(length))
-        length = PAGE_ALIGN_UP(length);
+    if (!IS_ALLOC_ALIGNED(length))
+        length = ALLOC_ALIGN_UP(length);
 
     if (bkeep_mprotect(addr, length, prot, 0) < 0)
         return -EPERM;
@@ -168,14 +168,14 @@ int shim_do_munmap(void* addr, size_t length) {
      * According to the manpage, addr has to be page-aligned, but not the
      * length. munmap() will automatically round up the length.
      */
-    if (!addr || !IS_PAGE_ALIGNED_PTR(addr))
+    if (!addr || !IS_ALLOC_ALIGNED_PTR(addr))
         return -EINVAL;
 
     if (!length || !access_ok(addr, length))
         return -EINVAL;
 
-    if (!IS_PAGE_ALIGNED(length))
-        length = PAGE_ALIGN_UP(length);
+    if (!IS_ALLOC_ALIGNED(length))
+        length = ALLOC_ALIGN_UP(length);
 
     struct shim_vma_val vma;
 
@@ -208,13 +208,13 @@ int shim_do_munmap(void* addr, size_t length) {
  * Possibly it may cause performance(or other) issue due to this lying.
  */
 int shim_do_mincore(void* addr, size_t len, unsigned char* vec) {
-    if (!IS_PAGE_ALIGNED_PTR(addr))
+    if (!IS_ALLOC_ALIGNED_PTR(addr))
         return -EINVAL;
 
     if (test_user_memory(addr, len, false))
         return -ENOMEM;
 
-    unsigned long pages = PAGE_ALIGN_UP(len) / g_pal_alloc_align;
+    unsigned long pages = ALLOC_ALIGN_UP(len) / g_pal_alloc_align;
     if (test_user_memory(vec, pages, true))
         return -EFAULT;
 

+ 4 - 4
LibOS/shim/src/sys/shim_msgget.c

@@ -757,7 +757,7 @@ static int __store_msg_persist(struct shim_msg_handle* msgq) {
 
     void* mem =
         (void*)DkStreamMap(file, NULL, PAL_PROT_READ | PAL_PROT_WRITE, 0,
-                           PAGE_ALIGN_UP(expected_size));
+                           ALLOC_ALIGN_UP(expected_size));
     if (!mem) {
         ret = -EFAULT;
         goto err_file;
@@ -784,7 +784,7 @@ static int __store_msg_persist(struct shim_msg_handle* msgq) {
         mtype->msgs = mtype->msg_tail = NULL;
     }
 
-    DkStreamUnmap(mem, PAGE_ALIGN_UP(expected_size));
+    DkStreamUnmap(mem, ALLOC_ALIGN_UP(expected_size));
 
     if (msgq->owned)
         for (mtype = msgq->types; mtype < &msgq->types[msgq->ntypes]; mtype++) {
@@ -849,7 +849,7 @@ static int __load_msg_persist(struct shim_msg_handle* msgq, bool readmsg) {
     int expected_size = sizeof(struct msg_handle_backup) + sizeof(struct msg_backup) * mback.nmsgs +
                         mback.currentsize;
 
-    void* mem = (void*)DkStreamMap(file, NULL, PAL_PROT_READ, 0, PAGE_ALIGN_UP(expected_size));
+    void* mem = (void*)DkStreamMap(file, NULL, PAL_PROT_READ, 0, ALLOC_ALIGN_UP(expected_size));
 
     if (!mem) {
         ret = -PAL_ERRNO;
@@ -872,7 +872,7 @@ static int __load_msg_persist(struct shim_msg_handle* msgq, bool readmsg) {
             goto out;
     };
 
-    DkStreamUnmap(mem, PAGE_ALIGN_UP(expected_size));
+    DkStreamUnmap(mem, ALLOC_ALIGN_UP(expected_size));
 
 done:
     DkStreamDelete(file, 0);

+ 9 - 9
Pal/lib/memmgr.h

@@ -76,35 +76,35 @@ typedef struct mem_mgr {
 #define __MIN_MEM_SIZE()     (sizeof(MEM_MGR_TYPE) + sizeof(MEM_AREA_TYPE))
 #define __MAX_MEM_SIZE(size) (__MIN_MEM_SIZE() + __SUM_OBJ_SIZE(size))
 
-#ifdef PAGE_SIZE
+#ifdef ALLOC_ALIGNMENT
 static inline int size_align_down(int size) {
-    assert(IS_POWER_OF_2(PAGE_SIZE));
+    assert(IS_POWER_OF_2(ALLOC_ALIGNMENT));
     int s = __MAX_MEM_SIZE(size) - sizeof(MEM_MGR_TYPE);
-    int p = s - ALIGN_DOWN_POW2(s, PAGE_SIZE);
+    int p = s - ALIGN_DOWN_POW2(s, ALLOC_ALIGNMENT);
     int o = __SUM_OBJ_SIZE(1);
     return size - p / o - (p % o ? 1 : 0);
 }
 
 static inline int size_align_up(int size) {
-    assert(IS_POWER_OF_2(PAGE_SIZE));
+    assert(IS_POWER_OF_2(ALLOC_ALIGNMENT));
     int s = __MAX_MEM_SIZE(size) - sizeof(MEM_MGR_TYPE);
-    int p = ALIGN_UP_POW2(s, PAGE_SIZE) - s;
+    int p = ALIGN_UP_POW2(s, ALLOC_ALIGNMENT) - s;
     int o = __SUM_OBJ_SIZE(1);
     return size + p / o;
 }
 
 static inline int init_align_down(int size) {
-    assert(IS_POWER_OF_2(PAGE_SIZE));
+    assert(IS_POWER_OF_2(ALLOC_ALIGNMENT));
     int s = __MAX_MEM_SIZE(size);
-    int p = s - ALIGN_DOWN_POW2(s, PAGE_SIZE);
+    int p = s - ALIGN_DOWN_POW2(s, ALLOC_ALIGNMENT);
     int o = __SUM_OBJ_SIZE(1);
     return size - p / o - (p % o ? 1 : 0);
 }
 
 static inline int init_align_up(int size) {
-    assert(IS_POWER_OF_2(PAGE_SIZE));
+    assert(IS_POWER_OF_2(ALLOC_ALIGNMENT));
     int s = __MAX_MEM_SIZE(size);
-    int p = ALIGN_UP_POW2(s, PAGE_SIZE) - s;
+    int p = ALIGN_UP_POW2(s, ALLOC_ALIGNMENT) - s;
     int o = __SUM_OBJ_SIZE(1);
     return size + p / o;
 }

+ 11 - 11
Pal/lib/slabmgr.h

@@ -169,39 +169,39 @@ typedef struct __attribute__((packed)) large_mem_obj {
 #define __INIT_MIN_MEM_SIZE()     (sizeof(SLAB_MGR_TYPE) + sizeof(SLAB_AREA_TYPE) * SLAB_LEVEL)
 #define __INIT_MAX_MEM_SIZE(size) (__INIT_MIN_MEM_SIZE() + __INIT_SUM_OBJ_SIZE(size))
 
-#ifdef PAGE_SIZE
+#ifdef ALLOC_ALIGNMENT
 static inline int size_align_down(int slab_size, int size) {
-    assert(IS_POWER_OF_2(PAGE_SIZE));
+    assert(IS_POWER_OF_2(ALLOC_ALIGNMENT));
     int s = __MAX_MEM_SIZE(slab_size, size);
-    int p = s - ALIGN_DOWN_POW2(s, PAGE_SIZE);
+    int p = s - ALIGN_DOWN_POW2(s, ALLOC_ALIGNMENT);
     int o = __SUM_OBJ_SIZE(slab_size, 1);
     return size - p / o - (p % o ? 1 : 0);
 }
 
 static inline int size_align_up(int slab_size, int size) {
-    assert(IS_POWER_OF_2(PAGE_SIZE));
+    assert(IS_POWER_OF_2(ALLOC_ALIGNMENT));
     int s = __MAX_MEM_SIZE(slab_size, size);
-    int p = ALIGN_UP_POW2(s, PAGE_SIZE) - s;
+    int p = ALIGN_UP_POW2(s, ALLOC_ALIGNMENT) - s;
     int o = __SUM_OBJ_SIZE(slab_size, 1);
     return size + p / o;
 }
 
 static inline int init_align_down(int size) {
-    assert(IS_POWER_OF_2(PAGE_SIZE));
+    assert(IS_POWER_OF_2(ALLOC_ALIGNMENT));
     int s = __INIT_MAX_MEM_SIZE(size);
-    int p = s - ALIGN_DOWN_POW2(s, PAGE_SIZE);
+    int p = s - ALIGN_DOWN_POW2(s, ALLOC_ALIGNMENT);
     int o = __INIT_SUM_OBJ_SIZE(1);
     return size - p / o - (p % o ? 1 : 0);
 }
 
 static inline int init_size_align_up(int size) {
-    assert(IS_POWER_OF_2(PAGE_SIZE));
+    assert(IS_POWER_OF_2(ALLOC_ALIGNMENT));
     int s = __INIT_MAX_MEM_SIZE(size);
-    int p = ALIGN_UP_POW2(s, PAGE_SIZE) - s;
+    int p = ALIGN_UP_POW2(s, ALLOC_ALIGNMENT) - s;
     int o = __INIT_SUM_OBJ_SIZE(1);
     return size + p / o;
 }
-#endif /* PAGE_SIZE */
+#endif /* ALLOC_ALIGNMENT */
 
 #ifndef STARTUP_SIZE
 #define STARTUP_SIZE 16
@@ -216,7 +216,7 @@ static inline void __set_free_slab_area(SLAB_AREA area, SLAB_MGR mgr, int level)
 }
 
 static inline SLAB_MGR create_slab_mgr(void) {
-#ifdef PAGE_SIZE
+#ifdef ALLOC_ALIGNMENT
     size_t size = init_size_align_up(STARTUP_SIZE);
 #else
     size_t size = STARTUP_SIZE;

+ 0 - 2
Pal/regression/Bootstrap.c

@@ -40,8 +40,6 @@ int main(int argc, char** argv, char** envp) {
     char* msg = "Written to Debug Stream\n";
     DkStreamWrite(pal_control.debug_stream, 0, strlen(msg), msg, NULL);
 
-    /* page size */
-    pal_printf("Page Size: %ld\n", pal_control.pagesize);
     /* Allocation Alignment */
     pal_printf("Allocation Alignment: %ld\n", pal_control.alloc_align);
 

+ 0 - 3
Pal/regression/test_pal.py

@@ -55,9 +55,6 @@ class TC_01_Bootstrap(RegressionTestCase):
         # Control Block: Debug Stream (Inline)
         self.assertIn('Written to Debug Stream', stdout)
 
-        # Control Block: Page Size
-        self.assertIn('Page Size: {}'.format(mmap.PAGESIZE), stderr)
-
         # Control Block: Allocation Alignment
         self.assertIn('Allocation Alignment: {}'.format(mmap.ALLOCATIONGRANULARITY), stderr)
 

+ 0 - 2
Pal/src/db_main.c

@@ -233,7 +233,6 @@ noreturn void pal_main (
 #endif
 
     pal_state.instance_id = instance_id;
-    pal_state.pagesize    = _DkGetPagesize();
     pal_state.alloc_align = _DkGetAllocationAlignment();
     assert(IS_POWER_OF_2(pal_state.alloc_align));
 
@@ -443,7 +442,6 @@ noreturn void pal_main (
                                     &__pal_control.user_address_hole.start,
                                     &__pal_control.user_address_hole.end);
 
-    __pal_control.pagesize           = pal_state.pagesize;
     __pal_control.alloc_align        = pal_state.alloc_align;
     __pal_control.broadcast_stream   = _DkBroadcastStreamOpen();
 

+ 5 - 5
Pal/src/host/FreeBSD/db_main.c

@@ -55,7 +55,7 @@ asm (".global pal_start \n"
 struct pal_bsd_state bsd_state;
 struct pal_sec pal_sec;
 
-static int pagesz = PRESET_PAGESIZE;
+static size_t g_page_size = PRESET_PAGESIZE;
 static uid_t uid;
 static gid_t gid;
 
@@ -101,7 +101,7 @@ static void pal_init_bootstrap (void * args, const char ** pal_name,
     for (av = (ElfW(auxv_t) *)auxv ; av->a_type != AT_NULL ; av++)
         switch (av->a_type) {
             case AT_PAGESZ:
-                pagesz = av->a_un.a_val;
+                g_page_size = av->a_un.a_val;
                 break;
             case AT_UID:
             case AT_EUID:
@@ -127,12 +127,12 @@ static void pal_init_bootstrap (void * args, const char ** pal_name,
 
 unsigned long _DkGetPagesize (void)
 {
-    return pagesz;
+    return g_page_size;
 }
 
 unsigned long _DkGetAllocationAlignment (void)
 {
-    return pagesz;
+    return g_page_size;
 }
 
 void _DkGetAvailableUserAddressRange (PAL_PTR * start, PAL_PTR * end,
@@ -215,7 +215,7 @@ void pal_bsd_main (void * args)
                          pal_map.l_info, pal_map.l_addr);
     ELF_DYNAMIC_RELOCATE(&pal_map);
 
-    init_slab_mgr(pagesz);
+    init_slab_mgr(g_page_size);
     setup_pal_map(&pal_map);
 
     bsd_state.start_time = 1000000ULL * time.tv_sec + time.tv_usec;

+ 7 - 7
Pal/src/host/Linux-SGX/db_main.c

@@ -45,23 +45,23 @@
 struct pal_linux_state linux_state;
 struct pal_sec pal_sec;
 
-unsigned int pagesz = PRESET_PAGESIZE;
+size_t g_page_size = PRESET_PAGESIZE;
 
 unsigned long _DkGetPagesize (void)
 {
-    return pagesz;
+    return g_page_size;
 }
 
 unsigned long _DkGetAllocationAlignment (void)
 {
-    return pagesz;
+    return g_page_size;
 }
 
 void _DkGetAvailableUserAddressRange (PAL_PTR * start, PAL_PTR * end,
                                       PAL_PTR * hole_start, PAL_PTR * hole_end)
 {
     *start = (PAL_PTR) pal_sec.heap_min;
-    *end = (PAL_PTR) get_reserved_pages(NULL, pagesz);
+    *end = (PAL_PTR) get_reserved_pages(NULL, g_page_size);
     *hole_start = SATURATED_P_SUB(pal_sec.exec_addr, MEMORY_GAP, *start);
     *hole_end = SATURATED_P_ADD(pal_sec.exec_addr + pal_sec.exec_size, MEMORY_GAP, *end);
 }
@@ -305,7 +305,7 @@ void pal_linux_main(char * uptr_args, uint64_t args_size,
     }
 
     /* set up page allocator and slab manager */
-    init_slab_mgr(pagesz);
+    init_slab_mgr(g_page_size);
     init_untrusted_slab_mgr();
     init_pages();
     init_enclave_key();
@@ -314,7 +314,7 @@ void pal_linux_main(char * uptr_args, uint64_t args_size,
     setup_pal_map(&pal_map);
 
     /* Set the alignment early */
-    pal_state.alloc_align = pagesz;
+    pal_state.alloc_align = g_page_size;
 
     /* initialize enclave properties */
     rv = init_enclave();
@@ -371,7 +371,7 @@ void pal_linux_main(char * uptr_args, uint64_t args_size,
     }
 
     uint64_t manifest_size = GET_ENCLAVE_TLS(manifest_size);
-    void* manifest_addr = enclave_top - ALIGN_UP_PTR_POW2(manifest_size, pagesz);
+    void* manifest_addr = enclave_top - ALIGN_UP_PTR_POW2(manifest_size, g_page_size);
 
     /* parse manifest data into config storage */
     struct config_store * root_config =

+ 2 - 2
Pal/src/host/Linux-SGX/db_memory.c

@@ -131,10 +131,10 @@ unsigned long _DkMemoryQuota (void)
 }
 
 extern struct atomic_int alloced_pages;
-extern unsigned int pagesz;
+extern unsigned int g_page_size;
 
 unsigned long _DkMemoryAvailableQuota (void)
 {
     return (pal_sec.heap_max - pal_sec.heap_min) -
-        atomic_read(&alloced_pages) * pagesz;
+        atomic_read(&alloced_pages) * g_page_size;
 }

+ 8 - 8
Pal/src/host/Linux-SGX/enclave_pages.c

@@ -8,7 +8,7 @@
 
 #include <stdint.h>
 
-static unsigned long pgsz = PRESET_PAGESIZE;
+static size_t g_page_size = PRESET_PAGESIZE;
 void * heap_base;
 static uint64_t heap_size;
 
@@ -178,7 +178,7 @@ static void * reserve_area(void * addr, size_t size, struct heap_vma * prev)
     }
     assert_vma_list();
 
-    atomic_add(size / pgsz, &alloced_pages);
+    atomic_add(size / g_page_size, &alloced_pages);
     return addr;
 }
 
@@ -196,8 +196,8 @@ void * get_reserved_pages(void * addr, size_t size)
         return NULL;
     }
 
-    size = ALIGN_UP(size, pgsz);
-    addr = ALIGN_DOWN_PTR(addr, pgsz);
+    size = ALIGN_UP(size, g_page_size);
+    addr = ALIGN_DOWN_PTR(addr, g_page_size);
 
     SGX_DBG(DBG_M, "allocate %ld bytes at %p\n", size, addr);
 
@@ -265,8 +265,8 @@ void free_pages(void * addr, size_t size)
     if (!addr || !size)
         return;
 
-    addr = ALIGN_DOWN_PTR(addr, pgsz);
-    addr_top = ALIGN_UP_PTR(addr_top, pgsz);
+    addr = ALIGN_DOWN_PTR(addr, g_page_size);
+    addr_top = ALIGN_UP_PTR(addr_top, g_page_size);
 
     if (addr >= heap_base + heap_size)
         return;
@@ -307,7 +307,7 @@ void free_pages(void * addr, size_t size)
     _DkInternalUnlock(&heap_vma_lock);
 
     unsigned int val = atomic_read(&alloced_pages);
-    atomic_sub(size / pgsz, &alloced_pages);
+    atomic_sub(size / g_page_size, &alloced_pages);
     if (val > atomic_read(&max_alloced_pages))
         atomic_set(&max_alloced_pages, val);
 }
@@ -319,5 +319,5 @@ void print_alloced_pages (void)
 
     printf("                >>>>>>>> "
            "Enclave heap size =         %10d pages / %10ld pages\n",
-           val > max ? val : max, heap_size / pgsz);
+           val > max ? val : max, heap_size / g_page_size);
 }

+ 2 - 2
Pal/src/host/Linux-SGX/enclave_untrusted.c

@@ -22,12 +22,12 @@
 #include "enclave_ocalls.h"
 
 static PAL_LOCK malloc_lock = LOCK_INIT;
-static size_t pagesize      = PRESET_PAGESIZE;
+static size_t g_page_size   = PRESET_PAGESIZE;
 
 #define SYSTEM_LOCK()   _DkSpinLock(&malloc_lock)
 #define SYSTEM_UNLOCK() _DkSpinUnlock(&malloc_lock)
 
-#define PAGE_SIZE pagesize
+#define ALLOC_ALIGNMENT g_page_size
 
 static inline void* __malloc(int size) {
     void* addr = NULL;

+ 7 - 7
Pal/src/host/Linux-SGX/sgx_framework.c

@@ -145,14 +145,14 @@ int create_enclave(sgx_arch_secs_t * secs,
 
     if (!zero_page) {
         zero_page = (void *)
-            INLINE_SYSCALL(mmap, 6, NULL, pagesize,
+            INLINE_SYSCALL(mmap, 6, NULL, g_page_size,
                            PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS,
                            -1, 0);
         if (IS_ERR_P(zero_page))
             return -ENOMEM;
     }
 
-    secs->ssa_frame_size = get_ssaframesize(token->body.attributes.xfrm) / pagesize;
+    secs->ssa_frame_size = get_ssaframesize(token->body.attributes.xfrm) / g_page_size;
     secs->misc_select = token->masked_misc_select_le;
     memcpy(&secs->attributes, &token->body.attributes, sizeof(sgx_attributes_t));
 
@@ -259,7 +259,7 @@ int add_pages_to_enclave(sgx_arch_secs_t * secs,
             p[2] = 'X';
     }
 
-    if (size == pagesize)
+    if (size == g_page_size)
         SGX_DBG(DBG_I, "adding page  to enclave: %p [%s:%s] (%s)%s\n",
                 addr, t, p, comment, m);
     else
@@ -284,9 +284,9 @@ int add_pages_to_enclave(sgx_arch_secs_t * secs,
             return -ERRNO(ret);
         }
 
-        param.addr += pagesize;
-        if (param.src != (uint64_t) zero_page) param.src += pagesize;
-        added_size += pagesize;
+        param.addr += g_page_size;
+        if (param.src != (uint64_t) zero_page) param.src += g_page_size;
+        added_size += g_page_size;
     }
 #else
     struct gsgx_enclave_add_pages param = {
@@ -319,7 +319,7 @@ int init_enclave(sgx_arch_secs_t * secs,
                  sgx_arch_token_t * token)
 {
     unsigned long enclave_valid_addr =
-                secs->base + secs->size - pagesize;
+                secs->base + secs->size - g_page_size;
 
     SGX_DBG(DBG_I, "enclave initializing:\n");
     SGX_DBG(DBG_I, "    enclave id:   0x%016lx\n", enclave_valid_addr);

+ 7 - 7
Pal/src/host/Linux-SGX/sgx_internal.h

@@ -41,7 +41,7 @@ int snprintf(char * str, int size, const char * fmt, ...) __attribute__((format(
 
 /* constants and macros to help rounding addresses to page
    boundaries */
-extern size_t pagesize;
+extern size_t g_page_size;
 
 #undef IS_ALLOC_ALIGNED
 #undef IS_ALLOC_ALIGNED_PTR
@@ -50,12 +50,12 @@ extern size_t pagesize;
 #undef ALLOC_ALIGN_DOWN
 #undef ALLOC_ALIGN_DOWN_PTR
 
-#define IS_ALLOC_ALIGNED(addr)     IS_ALIGNED_POW2(addr, pagesize)
-#define IS_ALLOC_ALIGNED_PTR(addr) IS_ALIGNED_PTR_POW2(addr, pagesize)
-#define ALLOC_ALIGN_UP(addr)       ALIGN_UP_POW2(addr, pagesize)
-#define ALLOC_ALIGN_UP_PTR(addr)   ALIGN_UP_PTR_POW2(addr, pagesize)
-#define ALLOC_ALIGN_DOWN(addr)     ALIGN_DOWN_POW2(addr, pagesize)
-#define ALLOC_ALIGN_DOWN_PTR(addr) ALIGN_DOWN_PTR_POW2(addr, pagesize)
+#define IS_ALLOC_ALIGNED(addr)     IS_ALIGNED_POW2(addr, g_page_size)
+#define IS_ALLOC_ALIGNED_PTR(addr) IS_ALIGNED_PTR_POW2(addr, g_page_size)
+#define ALLOC_ALIGN_UP(addr)       ALIGN_UP_POW2(addr, g_page_size)
+#define ALLOC_ALIGN_UP_PTR(addr)   ALIGN_UP_PTR_POW2(addr, g_page_size)
+#define ALLOC_ALIGN_DOWN(addr)     ALIGN_DOWN_POW2(addr, g_page_size)
+#define ALLOC_ALIGN_DOWN_PTR(addr) ALIGN_DOWN_PTR_POW2(addr, g_page_size)
 
 uint32_t htonl (uint32_t longval);
 uint16_t htons (uint16_t shortval);

+ 13 - 13
Pal/src/host/Linux-SGX/sgx_main.c

@@ -19,7 +19,7 @@
 #include <sysdep.h>
 #include <sysdeps/generic/ldsodefs.h>
 
-size_t pagesize = PRESET_PAGESIZE;
+size_t g_page_size = PRESET_PAGESIZE;
 
 struct pal_enclave pal_enclave;
 
@@ -308,7 +308,7 @@ int initialize_enclave (struct pal_enclave * enclave)
         goto out;
     }
 
-    enclave->ssaframesize = enclave_secs.ssa_frame_size * pagesize;
+    enclave->ssaframesize = enclave_secs.ssa_frame_size * g_page_size;
 
     struct stat stat;
     ret = INLINE_SYSCALL(fstat, 2, enclave->manifest, &stat);
@@ -353,14 +353,14 @@ int initialize_enclave (struct pal_enclave * enclave)
 
     areas[area_num] = (struct mem_area) {
         .desc = "tcs", .skip_eextend = false, .fd = -1,
-        .is_binary = false, .addr = 0, .size = enclave->thread_num * pagesize,
+        .is_binary = false, .addr = 0, .size = enclave->thread_num * g_page_size,
         .prot = 0, .type = SGX_PAGE_TCS
     };
     struct mem_area* tcs_area = &areas[area_num++];
 
     areas[area_num] = (struct mem_area) {
         .desc = "tls", .skip_eextend = false, .fd = -1,
-        .is_binary = false, .addr = 0, .size = enclave->thread_num * pagesize,
+        .is_binary = false, .addr = 0, .size = enclave->thread_num * g_page_size,
         .prot = PROT_READ | PROT_WRITE, .type = SGX_PAGE_REG
     };
     struct mem_area* tls_area = &areas[area_num++];
@@ -471,13 +471,13 @@ int initialize_enclave (struct pal_enclave * enclave)
             }
 
             for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
-                struct enclave_tls * gs = data + pagesize * t;
-                memset(gs, 0, pagesize);
-                assert(sizeof(*gs) <= pagesize);
+                struct enclave_tls * gs = data + g_page_size * t;
+                memset(gs, 0, g_page_size);
+                assert(sizeof(*gs) <= g_page_size);
                 gs->common.self = (PAL_TCB *)(
-                    tls_area->addr + pagesize * t + enclave_secs.base);
+                    tls_area->addr + g_page_size * t + enclave_secs.base);
                 gs->enclave_size = enclave->size;
-                gs->tcs_offset = tcs_area->addr + pagesize * t;
+                gs->tcs_offset = tcs_area->addr + g_page_size * t;
                 gs->initial_stack_offset =
                     stack_areas[t].addr + ENCLAVE_STACK_SIZE;
                 gs->ssa = (void *) ssa_area->addr +
@@ -505,17 +505,17 @@ int initialize_enclave (struct pal_enclave * enclave)
             }
 
             for (uint32_t t = 0 ; t < enclave->thread_num ; t++) {
-                sgx_arch_tcs_t * tcs = data + pagesize * t;
-                memset(tcs, 0, pagesize);
+                sgx_arch_tcs_t * tcs = data + g_page_size * t;
+                memset(tcs, 0, g_page_size);
                 tcs->ossa = ssa_area->addr +
                     enclave->ssaframesize * SSAFRAMENUM * t;
                 tcs->nssa = SSAFRAMENUM;
                 tcs->oentry = enclave_entry_addr;
                 tcs->ofs_base = 0;
-                tcs->ogs_base = tls_area->addr + t * pagesize;
+                tcs->ogs_base = tls_area->addr + t * g_page_size;
                 tcs->ofs_limit = 0xfff;
                 tcs->ogs_limit = 0xfff;
-                tcs_addrs[t] = (void *) enclave_secs.base + tcs_area->addr + pagesize * t;
+                tcs_addrs[t] = (void *) enclave_secs.base + tcs_area->addr + g_page_size * t;
             }
         } else if (areas[i].fd != -1) {
             data = (void *) INLINE_SYSCALL(mmap, 6, NULL, areas[i].size,

+ 5 - 5
Pal/src/host/Linux/db_main.c

@@ -62,7 +62,7 @@ __asm__ (".pushsection \".debug_gdb_scripts\", \"MS\",@progbits,1\r\n"
 struct pal_linux_state linux_state;
 struct pal_sec pal_sec;
 
-static int pagesz = PRESET_PAGESIZE;
+static size_t g_page_size = PRESET_PAGESIZE;
 static int uid, gid;
 #if USE_VDSO_GETTIME == 1
 static ElfW(Addr) sysinfo_ehdr;
@@ -112,7 +112,7 @@ static void pal_init_bootstrap (void * args, const char ** pal_name,
     for (av = (ElfW(auxv_t) *) (e + 1) ; av->a_type != AT_NULL ; av++)
         switch (av->a_type) {
             case AT_PAGESZ:
-                pagesz = av->a_un.a_val;
+                g_page_size = av->a_un.a_val;
                 break;
             case AT_UID:
             case AT_EUID:
@@ -139,12 +139,12 @@ static void pal_init_bootstrap (void * args, const char ** pal_name,
 
 unsigned long _DkGetPagesize (void)
 {
-    return pagesz;
+    return g_page_size;
 }
 
 unsigned long _DkGetAllocationAlignment (void)
 {
-    return pagesz;
+    return g_page_size;
 }
 
 void _DkGetAvailableUserAddressRange (PAL_PTR * start, PAL_PTR * end,
@@ -229,7 +229,7 @@ void pal_linux_main (void * args)
 
     linux_state.environ = envp;
 
-    init_slab_mgr(pagesz);
+    init_slab_mgr(g_page_size);
 
     first_thread = malloc(HANDLE_SIZE(thread));
     if (!first_thread)

+ 6 - 2
Pal/src/pal.h

@@ -187,8 +187,12 @@ typedef struct {
     PAL_PTR_RANGE manifest_preload;
 
     /***** Host information *****/
-    /* host page size / allocation alignment */
-    PAL_NUM pagesize, alloc_align;
+    /* Host allocation alignment.
+     * This currently is (and most likely will always be) indistinguishable from the page size,
+     * looking from the LibOS perspective. The two values can be different on the PAL level though,
+     * see e.g. SYSTEM_INFO::dwAllocationGranularity on Windows.
+     */
+    PAL_NUM alloc_align;
     /* CPU information (only required ones) */
     PAL_CPU_INFO cpu_info;
     /* Memory information (only required ones) */

+ 1 - 1
Pal/src/slab.c

@@ -41,7 +41,7 @@ static char mem_pool[POOL_SIZE];
 static void* bump         = mem_pool;
 static void* mem_pool_end = &mem_pool[POOL_SIZE];
 #else
-#define PAGE_SIZE slab_alignment
+#define ALLOC_ALIGNMENT slab_alignment
 #endif
 
 #define STARTUP_SIZE 2