Browse Source

Refactor alignment macros

Michał Kowalczyk 4 years ago
parent
commit
bc0beaa253

+ 1 - 2
LibOS/shim/include/shim_checkpoint.h

@@ -199,8 +199,7 @@ enum {
 
 #define ADD_CP_OFFSET(size)                                         \
     ({                                                              \
-        size_t _size = ((size) + sizeof(void *) - 1) &              \
-                    ~(sizeof(void *) - 1);                          \
+        size_t _size = ALIGN_UP(size, sizeof(void*));               \
         struct shim_cp_entry * oob =                                \
                 (void *) base +                                     \
                 __ADD_CP_OFFSET(sizeof(struct shim_cp_entry));      \

+ 1 - 3
LibOS/shim/include/shim_handle.h

@@ -215,9 +215,7 @@ struct shim_dirent {
 #define SHIM_DIRENT_ALIGNMENT alignof(struct shim_dirent)
 /* Size of struct shim_dirent instance together with alignment,
  * which might be different depending on the length of the name field */
-#define SHIM_DIRENT_ALIGNED_SIZE(len)                                                 \
-    ((SHIM_DIRENT_SIZE + (len) + SHIM_DIRENT_ALIGNMENT - 1) / SHIM_DIRENT_ALIGNMENT * \
-     SHIM_DIRENT_ALIGNMENT)
+#define SHIM_DIRENT_ALIGNED_SIZE(len) ALIGN_UP(SHIM_DIRENT_SIZE + (len), SHIM_DIRENT_ALIGNMENT)
 
 struct shim_dir_handle {
     int offset;

+ 8 - 9
LibOS/shim/include/shim_internal.h

@@ -742,9 +742,14 @@ static inline uint64_t hash64 (uint64_t key)
 # define __alloca __builtin_alloca
 #endif
 
-extern unsigned long allocsize;
-extern unsigned long allocshift;
-extern unsigned long allocmask;
+extern size_t g_pal_alloc_align;
+#define PAGE_SIZE g_pal_alloc_align
+#define IS_PAGE_ALIGNED(x) IS_ALIGNED_POW2(x, g_pal_alloc_align)
+#define IS_PAGE_ALIGNED_PTR(x) IS_ALIGNED_PTR_POW2(x, g_pal_alloc_align)
+#define PAGE_ALIGN_DOWN(x) ALIGN_DOWN_POW2(x, g_pal_alloc_align)
+#define PAGE_ALIGN_UP(x) ALIGN_UP_POW2(x, g_pal_alloc_align)
+#define PAGE_ALIGN_DOWN_PTR(x) ALIGN_DOWN_PTR_POW2(x, g_pal_alloc_align)
+#define PAGE_ALIGN_UP_PTR(x) ALIGN_UP_PTR_POW2(x, g_pal_alloc_align)
 
 void * __system_malloc (size_t size);
 void __system_free (void * addr, size_t size);
@@ -772,12 +777,6 @@ unsigned long parse_int (const char * str);
 extern void * initial_stack;
 extern const char ** initial_envp;
 
-#define ALIGNED(addr)   (!(((unsigned long)(addr)) & allocshift))
-#define ALIGN_UP(addr)      \
-    ((__typeof__(addr)) ((((unsigned long)(addr)) + allocshift) & allocmask))
-#define ALIGN_DOWN(addr)    \
-    ((__typeof__(addr)) (((unsigned long)(addr)) & allocmask))
-
 void get_brk_region (void ** start, void ** end, void ** current);
 
 int reset_brk (void);

+ 0 - 1
LibOS/shim/src/bookkeep/shim_handle.c

@@ -34,7 +34,6 @@ static struct shim_lock handle_mgr_lock;
 
 #define SYSTEM_LOCK()   lock(&handle_mgr_lock)
 #define SYSTEM_UNLOCK() unlock(&handle_mgr_lock)
-#define PAGE_SIZE       allocsize
 
 #define OBJ_TYPE struct shim_handle
 #include <memmgr.h>

+ 4 - 4
LibOS/shim/src/bookkeep/shim_signal.c

@@ -384,7 +384,7 @@ bool test_user_memory (void * addr, size_t size, bool write)
         } else {
             *(volatile char *) tmp;
         }
-        tmp = ALIGN_UP(tmp + 1);
+        tmp = PAGE_ALIGN_UP_PTR(tmp + 1);
     }
 
 ret_fault:
@@ -411,7 +411,7 @@ bool test_user_string (const char * addr)
         return true;
 
     size_t size, maxlen;
-    const char * next = ALIGN_UP(addr + 1);
+    const char* next = PAGE_ALIGN_UP_PTR(addr + 1);
 
     /* SGX path: check if [addr, addr+size) is addressable (in some VMA). */
     if (is_sgx_pal()) {
@@ -425,7 +425,7 @@ bool test_user_string (const char * addr)
 
             size = strnlen(addr, maxlen);
             addr = next;
-            next = ALIGN_UP(addr + 1);
+            next = PAGE_ALIGN_UP_PTR(addr + 1);
         } while (size == maxlen);
 
         return false;
@@ -457,7 +457,7 @@ bool test_user_string (const char * addr)
 
         size = strnlen(addr, maxlen);
         addr = next;
-        next = ALIGN_UP(addr + 1);
+        next = PAGE_ALIGN_UP_PTR(addr + 1);
     } while (size == maxlen);
 
 ret_fault:

+ 4 - 5
LibOS/shim/src/bookkeep/shim_vma.c

@@ -53,7 +53,6 @@ struct shim_vma {
 };
 
 #define VMA_MGR_ALLOC   DEFAULT_VMA_COUNT
-#define PAGE_SIZE       allocsize
 #define RESERVED_VMAS   6
 
 static struct shim_vma * reserved_vmas[RESERVED_VMAS];
@@ -73,7 +72,7 @@ static void * __bkeep_unmapped (void * top_addr, void * bottom_addr,
 static inline void * __malloc (size_t size)
 {
     void * addr;
-    size = ALIGN_UP(size);
+    size = PAGE_ALIGN_UP(size);
 
     /*
      * Chia-Che 3/3/18: We must enforce the policy that all VMAs have to
@@ -319,7 +318,7 @@ int init_vma (void)
 
     /* Keep track of LibOS code itself so nothing overwrites it */
     ret = __bkeep_preloaded(&__load_address,
-                            ALIGN_UP(&__load_address_end),
+                            PAGE_ALIGN_UP_PTR(&__load_address_end),
                             PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL,
                             "LibOS");
     if (ret < 0)
@@ -366,7 +365,7 @@ int init_vma (void)
     ret = DkRandomBitsRead(&rand, sizeof(rand));
     if (ret < 0)
         return -convert_pal_errno(-ret);
-    current_heap_top -= ALIGN_DOWN(rand % addr_rand_size);
+    current_heap_top -= PAGE_ALIGN_DOWN(rand % addr_rand_size);
 #endif
 
     debug("heap top adjusted to %p\n", current_heap_top);
@@ -1092,7 +1091,7 @@ BEGIN_CP_FUNC(vma)
                     (off_t)(vma->offset + vma->length) > file_len) {
                     send_size = file_len > vma->offset ?
                                 file_len - vma->offset : 0;
-                    send_size = ALIGN_UP(send_size);
+                    send_size = PAGE_ALIGN_UP(send_size);
                 }
             }
             if (send_size > 0) {

+ 28 - 24
LibOS/shim/src/elf/shim_rtld.c

@@ -197,8 +197,8 @@ static int protect_page(struct link_map* l, void* addr, size_t size) {
             return 0;
     }
 
-    void* start = ALIGN_DOWN(addr);
-    void* end   = ALIGN_UP(addr + size);
+    void* start = PAGE_ALIGN_DOWN_PTR(addr);
+    void* end   = PAGE_ALIGN_UP_PTR(addr + size);
 
     if (!DkVirtualMemoryProtect(start, end - start, PAL_PROT_READ | PAL_PROT_WRITE | prot))
         return -PAL_ERRNO;
@@ -430,12 +430,13 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
             case PT_LOAD:
                 /* A load command tells us to map in part of the file.
                    We record the load commands and process them all later.  */
-                if (__builtin_expect(!ALIGNED(ph->p_align), 0)) {
+                if (__builtin_expect(!IS_PAGE_ALIGNED(ph->p_align), 0)) {
                     errstring = "ELF load command alignment not page-aligned";
                     goto call_lose;
                 }
 
-                if (__builtin_expect(((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1)) != 0, 0)) {
+                if (__builtin_expect(!IS_ALIGNED_POW2(ph->p_vaddr - ph->p_offset, ph->p_align),
+                                     0)) {
                     errstring = "ELF load command address/offset not properly aligned";
                     goto call_lose;
                 }
@@ -446,11 +447,11 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
                 }
 
                 c           = &l->loadcmds[l->nloadcmds++];
-                c->mapstart = ALIGN_DOWN(ph->p_vaddr);
-                c->mapend   = ALIGN_UP(ph->p_vaddr + ph->p_filesz);
+                c->mapstart = PAGE_ALIGN_DOWN(ph->p_vaddr);
+                c->mapend   = PAGE_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
                 c->dataend  = ph->p_vaddr + ph->p_filesz;
                 c->allocend = ph->p_vaddr + ph->p_memsz;
-                c->mapoff   = ALIGN_DOWN(ph->p_offset);
+                c->mapoff   = PAGE_ALIGN_DOWN(ph->p_offset);
 
                 /* Determine whether there is a gap between the last segment
                    and this one.  */
@@ -510,12 +511,12 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
                 mappref = (ElfW(Addr))c->mapstart + (ElfW(Addr))addr;
             else
                 mappref = (ElfW(Addr))bkeep_unmapped_heap(
-                    ALIGN_UP(maplength), c->prot,
+                    PAGE_ALIGN_UP(maplength), c->prot,
                     c->flags | MAP_PRIVATE | (type == OBJECT_INTERNAL ? VMA_INTERNAL : 0), file,
                     c->mapoff, NULL);
 
             /* Remember which part of the address space this object uses.  */
-            ret = (*mmap)(file, (void**)&mappref, ALIGN_UP(maplength), c->prot,
+            ret = (*mmap)(file, (void**)&mappref, PAGE_ALIGN_UP(maplength), c->prot,
                           c->flags | MAP_PRIVATE, c->mapoff);
 
             if (__builtin_expect(ret < 0, 0)) {
@@ -600,8 +601,8 @@ do_remap:
             ElfW(Addr) zero, zeroend, zeropage;
 
             zero     = (ElfW(Addr))RELOCATE(l, c->dataend);
-            zeroend  = ALIGN_UP((ElfW(Addr))RELOCATE(l, c->allocend));
-            zeropage = ALIGN_UP(zero);
+            zeroend  = PAGE_ALIGN_UP((ElfW(Addr))RELOCATE(l, c->allocend));
+            zeropage = PAGE_ALIGN_UP(zero);
 
             if (zeroend < zeropage)
                 /* All the extra data is in the last page of the segment.
@@ -613,13 +614,14 @@ do_remap:
                 /* Zero the final part of the last page of the segment.  */
                 if (__builtin_expect((c->prot & PROT_WRITE) == 0, 0)) {
                     /* Dag nab it.  */
-                    if (!DkVirtualMemoryProtect((caddr_t)ALIGN_DOWN(zero), allocsize,
+                    if (!DkVirtualMemoryProtect((caddr_t)PAGE_ALIGN_DOWN(zero), g_pal_alloc_align,
                                                 c->prot | PAL_PROT_WRITE)) {
                         errstring = "cannot change memory protections";
                         goto call_lose;
                     }
                     memset((void*)zero, '\0', zeropage - zero);
-                    if (!DkVirtualMemoryProtect((caddr_t)ALIGN_DOWN(zero), allocsize, c->prot)) {
+                    if (!DkVirtualMemoryProtect((caddr_t)PAGE_ALIGN_DOWN(zero), g_pal_alloc_align,
+                                                c->prot)) {
                         errstring = "cannot change memory protections";
                         goto call_lose;
                     }
@@ -785,7 +787,7 @@ static int __free_elf_object(struct link_map* l) {
 
             zero     = l->l_addr + c->dataend;
             zeroend  = l->l_addr + c->allocend;
-            zeropage = ALIGN_UP(zero);
+            zeropage = PAGE_ALIGN_UP(zero);
 
             if (zeroend < zeropage)
                 /* All the extra data is in the last page of the segment.
@@ -1430,20 +1432,20 @@ static int vdso_map_init(void) {
      * When LibOS is loaded at different address, it may overlap with the old vDSO
      * area.
      */
-    void* addr = bkeep_unmapped_heap(ALIGN_UP(vdso_so_size), PROT_READ | PROT_EXEC, 0, NULL, 0,
+    void* addr = bkeep_unmapped_heap(PAGE_ALIGN_UP(vdso_so_size), PROT_READ | PROT_EXEC, 0, NULL, 0,
                                      "linux-vdso.so.1");
     if (addr == NULL)
         return -ENOMEM;
-    assert(addr == ALIGN_UP(addr));
+    assert(addr == PAGE_ALIGN_UP_PTR(addr));
 
-    void* ret_addr = (void*)DkVirtualMemoryAlloc(addr, ALIGN_UP(vdso_so_size), 0,
+    void* ret_addr = (void*)DkVirtualMemoryAlloc(addr, PAGE_ALIGN_UP(vdso_so_size), 0,
                                                  PAL_PROT_READ | PAL_PROT_WRITE);
     if (!ret_addr)
         return -PAL_ERRNO;
     assert(addr == ret_addr);
 
     memcpy(addr, &vdso_so, vdso_so_size);
-    memset(addr + vdso_so_size, 0, ALIGN_UP(vdso_so_size) - vdso_so_size);
+    memset(addr + vdso_so_size, 0, PAGE_ALIGN_UP(vdso_so_size) - vdso_so_size);
     __load_elf_object(NULL, addr, OBJECT_VDSO, NULL);
     vdso_map->l_name = "vDSO";
 
@@ -1457,7 +1459,7 @@ static int vdso_map_init(void) {
         **vsyms[i].func = vsyms[i].value;
     }
 
-    if (!DkVirtualMemoryProtect(addr, ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_EXEC))
+    if (!DkVirtualMemoryProtect(addr, PAGE_ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_EXEC))
         return -PAL_ERRNO;
 
     vdso_addr = addr;
@@ -1468,7 +1470,8 @@ int vdso_map_migrate(void) {
     if (!vdso_addr)
         return 0;
 
-    if (!DkVirtualMemoryProtect(vdso_addr, ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_WRITE))
+    if (!DkVirtualMemoryProtect(vdso_addr, PAGE_ALIGN_UP(vdso_so_size),
+                                PAL_PROT_READ | PAL_PROT_WRITE))
         return -PAL_ERRNO;
 
     /* adjust funcs to loaded address for newly loaded libsysdb */
@@ -1476,7 +1479,8 @@ int vdso_map_migrate(void) {
         **vsyms[i].func = vsyms[i].value;
     }
 
-    if (!DkVirtualMemoryProtect(vdso_addr, ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_EXEC))
+    if (!DkVirtualMemoryProtect(vdso_addr, PAGE_ALIGN_UP(vdso_so_size),
+                                PAL_PROT_READ | PAL_PROT_EXEC))
         return -PAL_ERRNO;
     return 0;
 }
@@ -1534,7 +1538,7 @@ int init_brk_from_executable(struct shim_handle* exec) {
             if (!(c->prot & PROT_EXEC))
                 data_segment_size += c->allocend - c->mapstart;
 
-        return init_brk_region((void*)ALIGN_UP(exec_map->l_map_end), data_segment_size);
+        return init_brk_region((void*)PAGE_ALIGN_UP(exec_map->l_map_end), data_segment_size);
     }
     return 0;
 }
@@ -1569,7 +1573,7 @@ noreturn void execute_elf_object(struct shim_handle* exec, int* argcp, const cha
 
     struct link_map* exec_map = __search_map_by_handle(exec);
     assert(exec_map);
-    assert((uintptr_t)argcp % 16 == 0); /* stack must be 16B-aligned */
+    assert(IS_ALIGNED_PTR(argcp, 16)); /* stack must be 16B-aligned */
     assert((void*)argcp + sizeof(long) == argp || argp == NULL);
 
     static_assert(REQUIRED_ELF_AUXV >= 8, "not enough space on stack for auxv");
@@ -1578,7 +1582,7 @@ noreturn void execute_elf_object(struct shim_handle* exec, int* argcp, const cha
     auxp[1].a_type     = AT_PHNUM;
     auxp[1].a_un.a_val = exec_map->l_phnum;
     auxp[2].a_type     = AT_PAGESZ;
-    auxp[2].a_un.a_val = allocsize;
+    auxp[2].a_un.a_val = g_pal_alloc_align;
     auxp[3].a_type     = AT_ENTRY;
     auxp[3].a_un.a_val = exec_map->l_entry;
     auxp[4].a_type     = AT_BASE;

+ 2 - 1
LibOS/shim/src/fs/chroot/fs.c

@@ -645,7 +645,8 @@ static inline int __map_buffer (struct shim_handle * hdl, size_t size)
 
     /* second, reallocate the buffer */
     size_t bufsize = file->mapsize ? : FILE_BUFMAP_SIZE;
-    off_t  mapoff = file->marker & ~(bufsize - 1);
+    assert(IS_POWER_OF_2(bufsize));
+    off_t  mapoff = ALIGN_DOWN_POW2(file->marker, bufsize);
     size_t maplen = bufsize;
     int flags = MAP_FILE | MAP_PRIVATE | VMA_INTERNAL;
     int prot = PROT_READ;

+ 1 - 2
LibOS/shim/src/fs/shim_dcache.c

@@ -31,8 +31,7 @@
 
 struct shim_lock dcache_lock;
 
-#define DCACHE_MGR_ALLOC    64
-#define PAGE_SIZE           allocsize
+#define DCACHE_MGR_ALLOC 64
 
 #define OBJ_TYPE struct shim_dentry
 #include <memmgr.h>

+ 1 - 2
LibOS/shim/src/fs/shim_fs.c

@@ -60,8 +60,7 @@ static struct shim_lock mount_mgr_lock;
 #define SYSTEM_LOCK()       lock(&mount_mgr_lock)
 #define SYSTEM_UNLOCK()     unlock(&mount_mgr_lock)
 
-#define MOUNT_MGR_ALLOC     64
-#define PAGE_SIZE           allocsize
+#define MOUNT_MGR_ALLOC 64
 
 #define OBJ_TYPE struct shim_mount
 #include <memmgr.h>

+ 0 - 1
LibOS/shim/src/ipc/shim_ipc.c

@@ -35,7 +35,6 @@
 #include <shim_utils.h>
 
 #define IPC_INFO_MGR_ALLOC 32
-#define PAGE_SIZE          allocsize
 #define OBJ_TYPE           struct shim_ipc_info
 #include "memmgr.h"
 static MEM_MGR ipc_info_mgr;

+ 2 - 3
LibOS/shim/src/ipc/shim_ipc_helper.c

@@ -33,10 +33,9 @@
 #include <pal_error.h>
 #include <list.h>
 
-#define IPC_HELPER_STACK_SIZE (allocsize * 4)
+#define IPC_HELPER_STACK_SIZE (g_pal_alloc_align * 4)
 
 #define PORT_MGR_ALLOC  32
-#define PAGE_SIZE       allocsize
 #define OBJ_TYPE struct shim_ipc_port
 #include "memmgr.h"
 static MEM_MGR port_mgr;
@@ -801,7 +800,7 @@ static void shim_ipc_helper_prepare(void* arg) {
     bool notme = (self != ipc_helper_thread);
     unlock(&ipc_helper_lock);
 
-    void* stack = allocate_stack(IPC_HELPER_STACK_SIZE, allocsize, false);
+    void* stack = allocate_stack(IPC_HELPER_STACK_SIZE, g_pal_alloc_align, false);
 
     if (notme || !stack) {
         free(stack);

+ 20 - 23
LibOS/shim/src/shim_checkpoint.c

@@ -330,7 +330,7 @@ BEGIN_RS_FUNC(qstr)
      * oflow string's base address and update qstr to point to it. */
     struct shim_qstr * qstr = (void *) (base + GET_CP_FUNC_ENTRY());
     size_t size = qstr->len + 1;
-    size = ((size) + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
+    size = ALIGN_UP(size, sizeof(void*));
     qstr->oflow = (void *)entry - size;
 }
 END_RS_FUNC(qstr)
@@ -339,8 +339,8 @@ BEGIN_CP_FUNC(gipc)
 {
     ptr_t off = ADD_CP_OFFSET(sizeof(struct shim_gipc_entry));
 
-    void * send_addr = (void *) ALIGN_DOWN(obj);
-    size_t send_size = (void *) ALIGN_UP(obj + size) - send_addr;
+    void* send_addr  = (void*)PAGE_ALIGN_DOWN_PTR(obj);
+    size_t send_size = (void*)PAGE_ALIGN_UP_PTR(obj + size) - send_addr;
 
     struct shim_gipc_entry * entry = (void *) (base + off);
 
@@ -354,7 +354,7 @@ BEGIN_CP_FUNC(gipc)
 #if HASH_GIPC == 1
     struct md5_ctx ctx;
     md5_init(&ctx);
-    md5_update(&ctx, send_addr, allocsize);
+    md5_update(&ctx, send_addr, g_pal_alloc_align);
     md5_final(&ctx);
     entry->first_hash = *(unsigned long *) ctx.digest;
 #endif /* HASH_GIPC == 1 */
@@ -378,17 +378,17 @@ BEGIN_RS_FUNC(gipc)
 
     PAL_FLG pal_prot = PAL_PROT(entry->prot, 0);
     if (!(pal_prot & PROT_READ))
-        DkVirtualMemoryProtect(entry->addr, entry->npages * allocsize,
+        DkVirtualMemoryProtect(entry->addr, entry->npages * g_pal_alloc_align,
                                pal_prot|PAL_PROT_READ);
 
     struct md5_ctx ctx;
     md5_init(&ctx);
-    md5_update(&ctx, entry->addr, allocsize);
+    md5_update(&ctx, entry->addr, g_pal_alloc_align);
     md5_final(&ctx);
     assert(*(unsigned long *) ctx.digest == entry->first_hash);
 
     if (!(pal_prot & PAL_PROT_READ))
-        DkVirtualMemoryProtect(entry->addr, entry->npages * allocsize,
+        DkVirtualMemoryProtect(entry->addr, entry->npages * g_pal_alloc_align,
                                pal_prot);
 #endif /* HASH_GIPC == 1 */
 }
@@ -399,7 +399,7 @@ static int send_checkpoint_by_gipc (PAL_HANDLE gipc_store,
 {
     PAL_PTR hdr_addr = (PAL_PTR) store->base;
     PAL_NUM hdr_size = (PAL_NUM) store->offset + store->mem_size;
-    assert(ALIGNED(hdr_addr));
+    assert(IS_PAGE_ALIGNED_PTR(hdr_addr));
 
     int mem_nentries = store->mem_nentries;
 
@@ -428,7 +428,7 @@ static int send_checkpoint_by_gipc (PAL_HANDLE gipc_store,
         }
     }
 
-    hdr_size = ALIGN_UP(hdr_size);
+    hdr_size = PAGE_ALIGN_UP(hdr_size);
     int npages = DkPhysicalMemoryCommit(gipc_store, 1, &hdr_addr, &hdr_size);
     if (!npages)
         return -EPERM;
@@ -446,7 +446,7 @@ static int send_checkpoint_by_gipc (PAL_HANDLE gipc_store,
         cnt--;
         gipc_addrs[cnt] = ent->mem.addr;
         gipc_sizes[cnt] = ent->mem.size;
-        total_pages += ent->mem.size / allocsize;
+        total_pages += ent->mem.size / g_pal_alloc_align;
     }
 
     gipc_addrs += cnt;
@@ -620,9 +620,8 @@ int restore_checkpoint (struct cp_header * cphdr, struct mem_header * memhdr,
                 debug("memory entry [%p]: %p-%p\n", entry, entry->addr,
                       entry->addr + entry->size);
 
-                PAL_PTR addr = ALIGN_DOWN(entry->addr);
-                PAL_NUM size = ALIGN_UP(entry->addr + entry->size) -
-                               (void *) addr;
+                PAL_PTR addr = PAGE_ALIGN_DOWN_PTR(entry->addr);
+                PAL_NUM size = PAGE_ALIGN_UP_PTR(entry->addr + entry->size) - (void*)addr;
                 PAL_FLG prot = entry->prot;
 
                 if (!DkVirtualMemoryAlloc(addr, size, 0, prot|PAL_PROT_WRITE)) {
@@ -753,8 +752,7 @@ int restore_from_file (const char * filename, struct newproc_cp_header * hdr,
         goto out;
 
     void * cpaddr = cphdr.addr;
-    ret = fs->fs_ops->mmap(file, &cpaddr, ALIGN_UP(cphdr.size),
-                           PROT_READ|PROT_WRITE,
+    ret = fs->fs_ops->mmap(file, &cpaddr, PAGE_ALIGN_UP(cphdr.size), PROT_READ|PROT_WRITE,
                            MAP_PRIVATE|MAP_FILE, 0);
     if (ret < 0)
         goto out;
@@ -868,7 +866,7 @@ static void * cp_alloc (struct shim_cp_store * store, void * addr, size_t size)
          * checkpoint space. The reserved space is half of the size of the
          * checkpoint space, but can be further fine-tuned.
          */
-        size_t reserve_size = ALIGN_UP(size >> 1);
+        size_t reserve_size = PAGE_ALIGN_UP(size >> 1);
 
         debug("try allocate checkpoint store (size = %ld, reserve = %ld)\n",
               size, reserve_size);
@@ -1005,7 +1003,7 @@ int do_migrate_process (int (*migrate) (struct shim_cp_store *,
             break;
 
         cpstore.bound >>= 1;
-        if (cpstore.bound < allocsize)
+        if (cpstore.bound < g_pal_alloc_align)
             break;
     }
 
@@ -1199,8 +1197,8 @@ int do_migration (struct newproc_cp_header * hdr, void ** cpptr)
 
         /* Try to load the checkpoint at the same address */
         base = hdr->hdr.addr;
-        mapaddr = (PAL_PTR) ALIGN_DOWN(base);
-        mapsize = (PAL_PTR) ALIGN_UP(base + size) - mapaddr;
+        mapaddr = (PAL_PTR)PAGE_ALIGN_DOWN_PTR(base);
+        mapsize = (PAL_PTR)PAGE_ALIGN_UP_PTR(base + size) - mapaddr;
 
         /* Need to create VMA before allocation */
         ret = bkeep_mmap((void *) mapaddr, mapsize,
@@ -1212,14 +1210,13 @@ int do_migration (struct newproc_cp_header * hdr, void ** cpptr)
 #endif
 
     if (!base) {
-        base = bkeep_unmapped_any(ALIGN_UP(size),
-                                  PROT_READ|PROT_WRITE, CP_VMA_FLAGS, 0,
+        base = bkeep_unmapped_any(PAGE_ALIGN_UP(size), PROT_READ|PROT_WRITE, CP_VMA_FLAGS, 0,
                                   "cpstore");
         if (!base)
             return -ENOMEM;
 
-        mapaddr = (PAL_PTR) base;
-        mapsize = (PAL_NUM) ALIGN_UP(size);
+        mapaddr = (PAL_PTR)base;
+        mapsize = (PAL_NUM)PAGE_ALIGN_UP(size);
     }
 
     debug("checkpoint mapped at %p-%p\n", base, base + size);

+ 14 - 16
LibOS/shim/src/shim_init.c

@@ -40,9 +40,7 @@
 #include <asm/unistd.h>
 #include <asm/fcntl.h>
 
-unsigned long allocsize;
-unsigned long allocshift;
-unsigned long allocmask;
+size_t g_pal_alloc_align;
 
 /* The following constants will help matching glibc version with compatible
    SHIM libraries */
@@ -257,8 +255,8 @@ DEFINE_PROFILE_OCCURENCE(alloc_stack_count, memory);
 
 void * allocate_stack (size_t size, size_t protect_size, bool user)
 {
-    size = ALIGN_UP(size);
-    protect_size = ALIGN_UP(protect_size);
+    size = PAGE_ALIGN_UP(size);
+    protect_size = PAGE_ALIGN_UP(protect_size);
 
     /* preserve a non-readable, non-writable page below the user
        stack to stop user program to clobber other vmas */
@@ -287,7 +285,7 @@ void * allocate_stack (size_t size, size_t protect_size, bool user)
 
     stack += protect_size;
     // Ensure proper alignment for process' initial stack pointer value.
-    stack += (16 - (uintptr_t)stack % 16) % 16;
+    stack = ALIGN_UP_PTR(stack, 16);
     DkVirtualMemoryProtect(stack, size, PAL_PROT_READ|PAL_PROT_WRITE);
 
     if (bkeep_mprotect(stack, size, PROT_READ|PROT_WRITE, flags) < 0)
@@ -378,7 +376,7 @@ int init_stack (const char ** argv, const char ** envp,
     if (root_config) {
         char stack_cfg[CONFIG_MAX];
         if (get_config(root_config, "sys.stack.size", stack_cfg, CONFIG_MAX) > 0) {
-            stack_size = ALIGN_UP(parse_int(stack_cfg));
+            stack_size = PAGE_ALIGN_UP(parse_int(stack_cfg));
             set_rlimit_cur(RLIMIT_STACK, stack_size);
         }
     }
@@ -388,7 +386,7 @@ int init_stack (const char ** argv, const char ** envp,
     if (!cur_thread || cur_thread->stack)
         return 0;
 
-    void * stack = allocate_stack(stack_size, allocsize, true);
+    void * stack = allocate_stack(stack_size, g_pal_alloc_align, true);
     if (!stack)
         return -ENOMEM;
 
@@ -405,7 +403,7 @@ int init_stack (const char ** argv, const char ** envp,
 
     cur_thread->stack_top = stack + stack_size;
     cur_thread->stack     = stack;
-    cur_thread->stack_red = stack - allocsize;
+    cur_thread->stack_red = stack - g_pal_alloc_align;
 
     return 0;
 }
@@ -484,15 +482,13 @@ int init_manifest (PAL_HANDLE manifest_handle)
             return -PAL_ERRNO;
 
         size = attr.pending_size;
-        map_size = ALIGN_UP(size);
+        map_size = PAGE_ALIGN_UP(size);
         addr = bkeep_unmapped_any(map_size, PROT_READ, MAP_FLAGS,
                                   0, "manifest");
         if (!addr)
             return -ENOMEM;
 
-        void * ret_addr = DkStreamMap(manifest_handle, addr,
-                                      PAL_PROT_READ, 0,
-                                      ALIGN_UP(size));
+        void* ret_addr = DkStreamMap(manifest_handle, addr, PAL_PROT_READ, 0, PAGE_ALIGN_UP(size));
 
         if (!ret_addr) {
             bkeep_munmap(addr, map_size, MAP_FLAGS);
@@ -694,9 +690,11 @@ noreturn void* shim_init (int argc, void * args)
 
     DkSetExceptionHandler(&handle_failure, PAL_EVENT_FAILURE);
 
-    allocsize = PAL_CB(alloc_align);
-    allocshift = allocsize - 1;
-    allocmask = ~allocshift;
+    g_pal_alloc_align = PAL_CB(alloc_align);
+    if (!IS_POWER_OF_2(g_pal_alloc_align)) {
+        SYS_PRINTF("shim_init(): error: PAL allocation alignment not a power of 2\n");
+        shim_terminate(-EINVAL);
+    }
 
     create_lock(&__master_lock);
 

+ 3 - 4
LibOS/shim/src/shim_malloc.c

@@ -37,7 +37,6 @@ static struct shim_lock slab_mgr_lock;
 
 #define SYSTEM_LOCK()   lock(&slab_mgr_lock)
 #define SYSTEM_UNLOCK() unlock(&slab_mgr_lock)
-#define PAGE_SIZE       allocsize
 
 #ifdef SLAB_DEBUG_TRACE
 #define SLAB_DEBUG
@@ -54,7 +53,7 @@ DEFINE_PROFILE_CATEGORY(memory, );
 
 /* Returns NULL on failure */
 void* __system_malloc(size_t size) {
-    size_t alloc_size = ALIGN_UP(size);
+    size_t alloc_size = PAGE_ALIGN_UP(size);
     void* addr;
     void* ret_addr;
     int flags = MAP_PRIVATE | MAP_ANONYMOUS | VMA_INTERNAL;
@@ -92,9 +91,9 @@ void* __system_malloc(size_t size) {
 }
 
 void __system_free(void* addr, size_t size) {
-    DkVirtualMemoryFree(addr, ALIGN_UP(size));
+    DkVirtualMemoryFree(addr, PAGE_ALIGN_UP(size));
 
-    if (bkeep_munmap(addr, ALIGN_UP(size), VMA_INTERNAL) < 0)
+    if (bkeep_munmap(addr, PAGE_ALIGN_UP(size), VMA_INTERNAL) < 0)
         BUG();
 }
 

+ 2 - 2
LibOS/shim/src/sys/shim_brk.c

@@ -59,7 +59,7 @@ int init_brk_region(void* brk_region, size_t data_segment_size) {
     if (region.brk_start)
         return 0;
 
-    data_segment_size = ALIGN_UP(data_segment_size);
+    data_segment_size = PAGE_ALIGN_UP(data_segment_size);
     uint64_t brk_max_size = DEFAULT_BRK_MAX_SIZE;
 
     if (root_config) {
@@ -103,7 +103,7 @@ int init_brk_region(void* brk_region, size_t data_segment_size) {
                     return -convert_pal_errno(-ret);
                 rand %= MIN((size_t)0x2000000,
                             (size_t)(PAL_CB(user_address.end) - brk_region - brk_max_size));
-                rand = ALIGN_DOWN(rand);
+                rand = PAGE_ALIGN_DOWN(rand);
 
                 if (brk_region + rand + brk_max_size >= PAL_CB(user_address.end))
                     continue;

+ 2 - 2
LibOS/shim/src/sys/shim_clone.c

@@ -149,7 +149,7 @@ int clone_implementation_wrapper(struct clone_args * arg)
     void * stack = arg->stack;
 
     struct shim_vma_val vma;
-    lookup_vma(ALIGN_DOWN(stack), &vma);
+    lookup_vma(PAGE_ALIGN_DOWN_PTR(stack), &vma);
     my_thread->stack_top = vma.addr + vma.length;
     my_thread->stack_red = my_thread->stack = vma.addr;
 
@@ -335,7 +335,7 @@ int shim_do_clone (int flags, void * user_stack_addr, int * parent_tidptr,
 
         if (user_stack_addr) {
             struct shim_vma_val vma;
-            lookup_vma(ALIGN_DOWN(user_stack_addr), &vma);
+            lookup_vma(PAGE_ALIGN_DOWN_PTR(user_stack_addr), &vma);
             thread->stack_top = vma.addr + vma.length;
             thread->stack_red = thread->stack = vma.addr;
             parent_stack = (void *)tcb->shim_tcb.context.regs->rsp;

+ 10 - 10
LibOS/shim/src/sys/shim_fs.c

@@ -342,8 +342,8 @@ int shim_do_fchown (int fd, uid_t uid, gid_t gid)
     return 0;
 }
 
-#define MAP_SIZE    (allocsize * 4)
-#define BUF_SIZE    (2048)
+#define MAP_SIZE (g_pal_alloc_align * 4)
+#define BUF_SIZE 2048
 
 static ssize_t handle_copy (struct shim_handle * hdli, off_t * offseti,
                             struct shim_handle * hdlo, off_t * offseto,
@@ -450,9 +450,9 @@ static ssize_t handle_copy (struct shim_handle * hdli, off_t * offseti,
             expectsize = bufsize = count - bytes;
 
         if (do_mapi && !bufi) {
-            boffi = offi - ALIGN_DOWN(offi);
+            boffi = offi - PAGE_ALIGN_DOWN(offi);
 
-            if (fsi->fs_ops->mmap(hdli, &bufi, ALIGN_UP(bufsize + boffi),
+            if (fsi->fs_ops->mmap(hdli, &bufi, PAGE_ALIGN_UP(bufsize + boffi),
                                   PROT_READ, MAP_FILE, offi - boffi) < 0) {
                 do_mapi = false;
                 boffi = 0;
@@ -470,9 +470,9 @@ static ssize_t handle_copy (struct shim_handle * hdli, off_t * offseti,
         }
 
         if (do_mapo && !bufo) {
-            boffo = offo - ALIGN_DOWN(offo);
+            boffo = offo - PAGE_ALIGN_DOWN(offo);
 
-            if (fso->fs_ops->mmap(hdlo, &bufo, ALIGN_UP(bufsize + boffo),
+            if (fso->fs_ops->mmap(hdlo, &bufo, PAGE_ALIGN_UP(bufsize + boffo),
                                   PROT_WRITE, MAP_FILE, offo - boffo) < 0) {
                 do_mapo = false;
                 boffo = 0;
@@ -493,19 +493,19 @@ static ssize_t handle_copy (struct shim_handle * hdli, off_t * offseti,
             copysize = count - bytes > bufsize ? bufsize :
                        count - bytes;
             memcpy(bufo + boffo, bufi + boffi, copysize);
-            DkVirtualMemoryFree(bufi, ALIGN_UP(bufsize + boffi));
+            DkVirtualMemoryFree(bufi, PAGE_ALIGN_UP(bufsize + boffi));
             bufi = NULL;
-            DkVirtualMemoryFree(bufo, ALIGN_UP(bufsize + boffo));
+            DkVirtualMemoryFree(bufo, PAGE_ALIGN_UP(bufsize + boffo));
             bufo = NULL;
         } else if (do_mapo) {
             copysize = fsi->fs_ops->read(hdli, bufo + boffo, bufsize);
-            DkVirtualMemoryFree(bufo, ALIGN_UP(bufsize + boffo));
+            DkVirtualMemoryFree(bufo, PAGE_ALIGN_UP(bufsize + boffo));
             bufo = NULL;
             if (copysize < 0)
                 break;
         } else if (do_mapi) {
             copysize = fso->fs_ops->write(hdlo, bufi + boffi, bufsize);
-            DkVirtualMemoryFree(bufi, ALIGN_UP(bufsize + boffi));
+            DkVirtualMemoryFree(bufi, PAGE_ALIGN_UP(bufsize + boffi));
             bufi = NULL;
             if (copysize < 0)
                 break;

+ 1 - 1
LibOS/shim/src/sys/shim_futex.c

@@ -62,7 +62,7 @@ int shim_do_futex(int* uaddr, int op, int val, void* utime, int* uaddr2, int val
     uint32_t val2 = 0;
     int ret       = 0;
 
-    if (!uaddr || ((uintptr_t)uaddr % sizeof(unsigned int)))
+    if (!uaddr || !IS_ALIGNED_PTR(uaddr, sizeof(unsigned int)))
         return -EINVAL;
 
     create_lock_runtime(&futex_list_lock);

+ 13 - 13
LibOS/shim/src/sys/shim_mmap.c

@@ -42,17 +42,17 @@ void* shim_do_mmap(void* addr, size_t length, int prot, int flags, int fd, off_t
      * According to the manpage, both addr and offset have to be page-aligned,
      * but not the length. mmap() will automatically round up the length.
      */
-    if (addr && !ALIGNED(addr))
+    if (addr && !IS_PAGE_ALIGNED_PTR(addr))
         return (void*)-EINVAL;
 
-    if (fd >= 0 && !ALIGNED(offset))
+    if (fd >= 0 && !IS_PAGE_ALIGNED(offset))
         return (void*)-EINVAL;
 
     if (!length || !access_ok(addr, length))
         return (void*)-EINVAL;
 
-    if (!ALIGNED(length))
-        length = ALIGN_UP(length);
+    if (!IS_PAGE_ALIGNED(length))
+        length = PAGE_ALIGN_UP(length);
 
     /* ignore MAP_32BIT when MAP_FIXED is set */
     if ((flags & (MAP_32BIT | MAP_FIXED)) == (MAP_32BIT | MAP_FIXED))
@@ -148,11 +148,11 @@ int shim_do_mprotect(void* addr, size_t length, int prot) {
      * According to the manpage, addr has to be page-aligned, but not the
      * length. mprotect() will automatically round up the length.
      */
-    if (!addr || !ALIGNED(addr))
+    if (!addr || !IS_PAGE_ALIGNED_PTR(addr))
         return -EINVAL;
 
-    if (!ALIGNED(length))
-        length = ALIGN_UP(length);
+    if (!IS_PAGE_ALIGNED(length))
+        length = PAGE_ALIGN_UP(length);
 
     if (bkeep_mprotect(addr, length, prot, 0) < 0)
         return -EPERM;
@@ -168,14 +168,14 @@ int shim_do_munmap(void* addr, size_t length) {
      * According to the manpage, addr has to be page-aligned, but not the
      * length. munmap() will automatically round up the length.
      */
-    if (!addr || !ALIGNED(addr))
+    if (!addr || !IS_PAGE_ALIGNED_PTR(addr))
         return -EINVAL;
 
     if (!length || !access_ok(addr, length))
         return -EINVAL;
 
-    if (!ALIGNED(length))
-        length = ALIGN_UP(length);
+    if (!IS_PAGE_ALIGNED(length))
+        length = PAGE_ALIGN_UP(length);
 
     struct shim_vma_val vma;
 
@@ -208,19 +208,19 @@ int shim_do_munmap(void* addr, size_t length) {
  * Possibly it may cause performance(or other) issue due to this lying.
  */
 int shim_do_mincore(void* addr, size_t len, unsigned char* vec) {
-    if (!ALIGNED(addr))
+    if (!IS_PAGE_ALIGNED_PTR(addr))
         return -EINVAL;
 
     if (test_user_memory(addr, len, false))
         return -ENOMEM;
 
-    unsigned long pages = ALIGN_UP(len) / allocsize;
+    unsigned long pages = PAGE_ALIGN_UP(len) / g_pal_alloc_align;
     if (test_user_memory(vec, pages, true))
         return -EFAULT;
 
     for (unsigned long i = 0; i < pages; i++) {
         struct shim_vma_val vma;
-        if (lookup_overlap_vma(addr + i * allocsize, 1, &vma) < 0)
+        if (lookup_overlap_vma(addr + i * g_pal_alloc_align, 1, &vma) < 0)
             return -ENOMEM;
         /*
          * lookup_overlap_vma() calls __dump_vma() which adds a reference to

+ 5 - 4
LibOS/shim/src/sys/shim_msgget.c

@@ -758,7 +758,8 @@ static int __store_msg_persist(struct shim_msg_handle* msgq) {
         goto err_file;
 
     void* mem =
-        (void*)DkStreamMap(file, NULL, PAL_PROT_READ | PAL_PROT_WRITE, 0, ALIGN_UP(expected_size));
+        (void*)DkStreamMap(file, NULL, PAL_PROT_READ | PAL_PROT_WRITE, 0,
+                           PAGE_ALIGN_UP(expected_size));
     if (!mem) {
         ret = -EFAULT;
         goto err_file;
@@ -785,7 +786,7 @@ static int __store_msg_persist(struct shim_msg_handle* msgq) {
         mtype->msgs = mtype->msg_tail = NULL;
     }
 
-    DkStreamUnmap(mem, ALIGN_UP(expected_size));
+    DkStreamUnmap(mem, PAGE_ALIGN_UP(expected_size));
 
     if (msgq->owned)
         for (mtype = msgq->types; mtype < &msgq->types[msgq->ntypes]; mtype++) {
@@ -846,7 +847,7 @@ static int __load_msg_persist(struct shim_msg_handle* msgq, bool readmsg) {
     int expected_size = sizeof(struct msg_handle_backup) + sizeof(struct msg_backup) * mback.nmsgs +
                         mback.currentsize;
 
-    void* mem = (void*)DkStreamMap(file, NULL, PAL_PROT_READ, 0, ALIGN_UP(expected_size));
+    void* mem = (void*)DkStreamMap(file, NULL, PAL_PROT_READ, 0, PAGE_ALIGN_UP(expected_size));
 
     if (!mem) {
         ret = -PAL_ERRNO;
@@ -869,7 +870,7 @@ static int __load_msg_persist(struct shim_msg_handle* msgq, bool readmsg) {
             goto out;
     };
 
-    DkStreamUnmap(mem, ALIGN_UP(expected_size));
+    DkStreamUnmap(mem, PAGE_ALIGN_UP(expected_size));
 
 done:
     DkStreamDelete(file, 0);

+ 0 - 1
LibOS/shim/src/utils/strobjs.c

@@ -29,7 +29,6 @@ static struct shim_lock str_mgr_lock;
 #define SYSTEM_UNLOCK() unlock(&str_mgr_lock)
 
 #define STR_MGR_ALLOC 32
-#define PAGE_SIZE     allocsize
 
 #define OBJ_TYPE struct shim_str
 #include "memmgr.h"

+ 18 - 4
Pal/lib/api.h

@@ -70,10 +70,24 @@ typedef ptrdiff_t ssize_t;
 
 #define IS_POWER_OF_2(x) (((x) & ((x) - 1)) == 0)
 
-#define ALIGN_DOWN_PTR(ptr, size) \
-    ((__typeof__(ptr)) (((uintptr_t)(ptr)) & -(size)))
-#define ALIGN_UP_PTR(ptr, size) \
-    ((__typeof__(ptr)) ALIGN_DOWN_PTR((uintptr_t)(ptr) + ((size) - 1), (size)))
+#define IS_ALIGNED(val, alignment) ((val) % (alignment) == 0)
+#define ALIGN_DOWN(val, alignment) ((val) - (val) % (alignment))
+#define ALIGN_UP(val, alignment)   ALIGN_DOWN((val) + (alignment) - 1, alignment)
+#define IS_ALIGNED_PTR(val, alignment) IS_ALIGNED((uintptr_t)(val), alignment)
+#define ALIGN_DOWN_PTR(ptr, alignment) ((__typeof__(ptr))(ALIGN_DOWN((uintptr_t)(ptr), alignment)))
+#define ALIGN_UP_PTR(ptr, alignment)   ((__typeof__(ptr))(ALIGN_UP((uintptr_t)(ptr), alignment)))
+
+/* Useful only when the alignment is a power of two, but when that's not known compile-time. */
+#define IS_ALIGNED_POW2(val, alignment) (((val) & ((alignment) - 1)) == 0)
+#define ALIGN_DOWN_POW2(val, alignment) \
+    ((val) - ((val) & ((alignment) - 1))) // `~` doesn't work if `alignment` is of a smaller type
+                                          // than `val` and unsigned.
+#define ALIGN_UP_POW2(val, alignment)       ALIGN_DOWN_POW2((val) + (alignment) - 1, alignment)
+#define IS_ALIGNED_PTR_POW2(val, alignment) IS_ALIGNED_POW2((uintptr_t)(val), alignment)
+#define ALIGN_DOWN_PTR_POW2(ptr, alignment) ((__typeof__(ptr))(ALIGN_DOWN_POW2((uintptr_t)(ptr), \
+                                                                               alignment)))
+#define ALIGN_UP_PTR_POW2(ptr, alignment)   ((__typeof__(ptr))(ALIGN_UP_POW2((uintptr_t)(ptr), \
+                                                                             alignment)))
 
 #define SAME_TYPE(a, b) __builtin_types_compatible_p(__typeof__(a), __typeof__(b))
 #define IS_STATIC_ARRAY(a) (!SAME_TYPE(a, &*(a)))

+ 10 - 4
Pal/lib/memmgr.h

@@ -25,6 +25,8 @@
 
 #include <sys/mman.h>
 
+#include "api.h"
+#include "assert.h"
 #include "list.h"
 
 #ifndef OBJ_TYPE
@@ -76,29 +78,33 @@ typedef struct mem_mgr {
 
 #ifdef PAGE_SIZE
 static inline int size_align_down(int size) {
+    assert(IS_POWER_OF_2(PAGE_SIZE));
     int s = __MAX_MEM_SIZE(size) - sizeof(MEM_MGR_TYPE);
-    int p = s - (s & ~(PAGE_SIZE - 1));
+    int p = s - ALIGN_DOWN_POW2(s, PAGE_SIZE);
     int o = __SUM_OBJ_SIZE(1);
     return size - p / o - (p % o ? 1 : 0);
 }
 
 static inline int size_align_up(int size) {
+    assert(IS_POWER_OF_2(PAGE_SIZE));
     int s = __MAX_MEM_SIZE(size) - sizeof(MEM_MGR_TYPE);
-    int p = ((s + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) - s;
+    int p = ALIGN_UP_POW2(s, PAGE_SIZE) - s;
     int o = __SUM_OBJ_SIZE(1);
     return size + p / o;
 }
 
 static inline int init_align_down(int size) {
+    assert(IS_POWER_OF_2(PAGE_SIZE));
     int s = __MAX_MEM_SIZE(size);
-    int p = s - (s & ~(PAGE_SIZE - 1));
+    int p = s - ALIGN_DOWN_POW2(s, PAGE_SIZE);
     int o = __SUM_OBJ_SIZE(1);
     return size - p / o - (p % o ? 1 : 0);
 }
 
 static inline int init_align_up(int size) {
+    assert(IS_POWER_OF_2(PAGE_SIZE));
     int s = __MAX_MEM_SIZE(size);
-    int p = ((s + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) - s;
+    int p = ALIGN_UP_POW2(s, PAGE_SIZE) - s;
     int o = __SUM_OBJ_SIZE(1);
     return size + p / o;
 }

+ 8 - 4
Pal/lib/slabmgr.h

@@ -178,29 +178,33 @@ typedef struct __attribute__((packed)) large_mem_obj {
 
 #ifdef PAGE_SIZE
 static inline int size_align_down(int slab_size, int size) {
+    assert(IS_POWER_OF_2(PAGE_SIZE));
     int s = __MAX_MEM_SIZE(slab_size, size);
-    int p = s - (s & ~(PAGE_SIZE - 1));
+    int p = s - ALIGN_DOWN_POW2(s, PAGE_SIZE);
     int o = __SUM_OBJ_SIZE(slab_size, 1);
     return size - p / o - (p % o ? 1 : 0);
 }
 
 static inline int size_align_up(int slab_size, int size) {
+    assert(IS_POWER_OF_2(PAGE_SIZE));
     int s = __MAX_MEM_SIZE(slab_size, size);
-    int p = ((s + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) - s;
+    int p = ALIGN_UP_POW2(s, PAGE_SIZE) - s;
     int o = __SUM_OBJ_SIZE(slab_size, 1);
     return size + p / o;
 }
 
 static inline int init_align_down(int size) {
+    assert(IS_POWER_OF_2(PAGE_SIZE));
     int s = __INIT_MAX_MEM_SIZE(size);
-    int p = s - (s & ~(PAGE_SIZE - 1));
+    int p = s - ALIGN_DOWN_POW2(s, PAGE_SIZE);
     int o = __INIT_SUM_OBJ_SIZE(1);
     return size - p / o - (p % o ? 1 : 0);
 }
 
 static inline int init_size_align_up(int size) {
+    assert(IS_POWER_OF_2(PAGE_SIZE));
     int s = __INIT_MAX_MEM_SIZE(size);
-    int p = ((s + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) - s;
+    int p = ALIGN_UP_POW2(s, PAGE_SIZE) - s;
     int o = __INIT_SUM_OBJ_SIZE(1);
     return size + p / o;
 }

+ 1 - 1
Pal/regression/Memory.c

@@ -54,7 +54,7 @@ int main(int argc, char** argv, char** envp) {
     void* mem4 = (void*)pal_control.user_address.end - UNIT;
 
     if (mem3 >= pal_control.executable_range.start && mem3 < pal_control.executable_range.end)
-        mem3 = (void*)(((PAL_NUM)pal_control.executable_range.end + UNIT - 1) & ~(UNIT - 1));
+        mem3 = (void*)ALIGN_UP_PTR(pal_control.executable_range.end, UNIT);
 
     mem3 = (void*)DkVirtualMemoryAlloc(mem3, UNIT, 0, PAL_PROT_READ | PAL_PROT_WRITE);
     mem4 = (void*)DkVirtualMemoryAlloc(mem4, UNIT, 0, PAL_PROT_READ | PAL_PROT_WRITE);

+ 2 - 2
Pal/src/db_rtld.c

@@ -220,8 +220,8 @@ map_elf_object_by_handle (PAL_HANDLE handle, enum object_type type,
                     return NULL;
                 }
 
-                if (__builtin_expect (((ph->p_vaddr - ph->p_offset)
-                                       & (ph->p_align - 1)) != 0, 0)) {
+                if (__builtin_expect (!IS_ALIGNED_POW2(ph->p_vaddr - ph->p_offset, ph->p_align),
+                                      0)) {
                     print_error("ELF load command address/offset not properly aligned",
                                 -PAL_ERROR_NOMEM);
                     return NULL;

+ 4 - 7
Pal/src/host/Linux-SGX/db_files.c

@@ -125,11 +125,9 @@ static int64_t file_read(PAL_HANDLE handle, uint64_t offset, uint64_t count, voi
     if (offset >= total)
         return 0;
 
-    static_assert(IS_POWER_OF_2(TRUSTED_STUB_SIZE), "TRUSTED_STUB_SIZE must be a power of two");
-
     uint64_t end       = (offset + count > total) ? total : offset + count;
-    uint64_t map_start = offset & ~(TRUSTED_STUB_SIZE - 1);
-    uint64_t map_end   = (end + TRUSTED_STUB_SIZE - 1) & ~(TRUSTED_STUB_SIZE - 1);
+    uint64_t map_start = ALIGN_DOWN(offset, TRUSTED_STUB_SIZE);
+    uint64_t map_end   = ALIGN_UP(end, TRUSTED_STUB_SIZE);
 
     if (map_end > total)
         map_end = ALLOC_ALIGNUP(total);
@@ -235,9 +233,8 @@ static int file_map(PAL_HANDLE handle, void** addr, int prot, uint64_t offset, u
     uint64_t map_start, map_end;
 
     if (stubs) {
-        static_assert(IS_POWER_OF_2(TRUSTED_STUB_SIZE), "TRUSTED_STUB_SIZE must be a power of two");
-        map_start = offset & ~(TRUSTED_STUB_SIZE - 1);
-        map_end   = (end + TRUSTED_STUB_SIZE - 1) & ~(TRUSTED_STUB_SIZE - 1);
+        map_start = ALIGN_DOWN(offset, TRUSTED_STUB_SIZE);
+        map_end   = ALIGN_UP(end, TRUSTED_STUB_SIZE);
     } else {
         map_start = ALLOC_ALIGNDOWN(offset);
         map_end   = ALLOC_ALIGNUP(end);

+ 1 - 1
Pal/src/host/Linux-SGX/db_main.c

@@ -373,7 +373,7 @@ void pal_linux_main(char * uptr_args, uint64_t args_size,
     }
 
     uint64_t manifest_size = GET_ENCLAVE_TLS(manifest_size);
-    void* manifest_addr = enclave_top - ALIGN_UP_PTR(manifest_size, pagesz);
+    void* manifest_addr = enclave_top - ALIGN_UP_PTR_POW2(manifest_size, pagesz);
 
     /* parse manifest data into config storage */
     struct config_store * root_config =

+ 1 - 1
Pal/src/host/Linux-SGX/enclave_framework.c

@@ -496,7 +496,7 @@ int copy_and_verify_trusted_file (const char * path, const void * umem,
 {
     /* Check that the untrusted mapping is aligned to TRUSTED_STUB_SIZE
      * and includes the range for copying into the buffer */
-    assert(umem_start % TRUSTED_STUB_SIZE == 0);
+    assert(IS_ALIGNED(umem_start, TRUSTED_STUB_SIZE));
     assert(offset >= umem_start && offset + size <= umem_end);
 
     /* Start copying and checking at umem_start. The checked content may or

+ 4 - 7
Pal/src/host/Linux-SGX/enclave_pages.c

@@ -197,8 +197,8 @@ void * get_reserved_pages(void * addr, size_t size)
         return NULL;
     }
 
-    size = ((size + pgsz - 1) & ~(pgsz - 1));
-    addr = (void *)((uintptr_t)addr & ~(pgsz - 1));
+    size = ALIGN_UP(size, pgsz);
+    addr = ALIGN_DOWN_PTR(addr, pgsz);
 
     SGX_DBG(DBG_M, "allocate %ld bytes at %p\n", size, addr);
 
@@ -266,11 +266,8 @@ void free_pages(void * addr, size_t size)
     if (!addr || !size)
         return;
 
-    if ((uintptr_t) addr_top & (pgsz - 1))
-        addr_top = (void *) (((uintptr_t) addr_top + pgsz - 1) & ~(pgsz - 1));
-
-    if ((uintptr_t) addr & (pgsz - 1))
-        addr = (void *) ((uintptr_t) addr & ~(pgsz - 1));
+    addr = ALIGN_DOWN_PTR(addr, pgsz);
+    addr_top = ALIGN_UP_PTR(addr_top, pgsz);
 
     if (addr >= heap_base + heap_size)
         return;

+ 1 - 1
Pal/src/host/Linux-SGX/linux_types.h

@@ -114,7 +114,7 @@ struct cmsghdr {
     ((size_t)(mhdr)->msg_controllen >= sizeof(struct cmsghdr) \
          ? (struct cmsghdr*)(mhdr)->msg_control               \
          : (struct cmsghdr*)0)
-#define CMSG_ALIGN(len) (((len) + sizeof(size_t) - 1) & (size_t) ~(sizeof(size_t) - 1))
+#define CMSG_ALIGN(len) ALIGN_UP(len, sizeof(size_t))
 #define CMSG_SPACE(len) (CMSG_ALIGN(len) + CMSG_ALIGN(sizeof(struct cmsghdr)))
 #define CMSG_LEN(len)   (CMSG_ALIGN(sizeof(struct cmsghdr)) + (len))
 

+ 1 - 1
Pal/src/host/Linux-SGX/sgx_framework.c

@@ -171,7 +171,7 @@ int create_enclave(sgx_arch_secs_t * secs,
      * EINIT in https://software.intel.com/sites/default/files/managed/48/88/329298-002.pdf). */
 
     if (baseaddr) {
-        secs->base = (uint64_t) baseaddr & ~(secs->size - 1);
+        secs->base = ALIGN_DOWN_POW2(baseaddr, secs->size);
     } else {
         secs->base = ENCLAVE_HIGH_ADDRESS;
     }

+ 1 - 1
Pal/src/host/Linux/db_threading.c

@@ -59,7 +59,7 @@ int pal_thread_init (void * tcbptr)
 
     if (tcb->alt_stack) {
         // Align stack to 16 bytes
-        void * alt_stack_top = (void *) ((uint64_t) tcb & ~15);
+        void* alt_stack_top = ALIGN_DOWN_PTR(tcb, 16);
         assert(alt_stack_top > tcb->alt_stack);
         stack_t ss;
         ss.ss_sp    = alt_stack_top;