Browse Source

[PAL] Clean up alignment macros

Michał Kowalczyk 4 years ago
parent
commit
ce7f03f635

+ 3 - 10
Pal/lib/slabmgr.h

@@ -65,12 +65,6 @@
 
 #define LARGE_OBJ_PADDING 8
 
-/* Returns the smallest exact multiple of _y that is at least as large as _x.
- * In other words, returns _x if _x is a multiple of _y, otherwise rounds
- * _x up to be a multiple of _y.
- */
-#define ROUND_UP(_x, _y) ((((_x) + (_y) - 1) / (_y)) * (_y))
-
 DEFINE_LIST(slab_obj);
 
 typedef struct __attribute__((packed)) slab_obj {
@@ -116,10 +110,9 @@ struct slab_debug {
 #define SLAB_CANARY_SIZE 0
 #endif
 
-#define SLAB_HDR_SIZE                                                                 \
-    ROUND_UP((sizeof(SLAB_OBJ_TYPE) - sizeof(LIST_TYPE(slab_obj)) + SLAB_DEBUG_SIZE + \
-              SLAB_CANARY_SIZE),                                                      \
-             MIN_MALLOC_ALIGNMENT)
+#define SLAB_HDR_SIZE                                                                \
+    ALIGN_UP(sizeof(SLAB_OBJ_TYPE) - sizeof(LIST_TYPE(slab_obj)) + SLAB_DEBUG_SIZE + \
+             SLAB_CANARY_SIZE, MIN_MALLOC_ALIGNMENT)
 
 #ifndef SLAB_LEVEL
 #define SLAB_LEVEL 8

+ 2 - 3
Pal/src/db_main.c

@@ -235,8 +235,7 @@ noreturn void pal_main (
     pal_state.instance_id = instance_id;
     pal_state.pagesize    = _DkGetPagesize();
     pal_state.alloc_align = _DkGetAllocationAlignment();
-    pal_state.alloc_shift = pal_state.alloc_align - 1;
-    pal_state.alloc_mask  = ~pal_state.alloc_shift;
+    assert(IS_POWER_OF_2(pal_state.alloc_align));
 
     init_slab_mgr(pal_state.alloc_align);
 
@@ -310,7 +309,7 @@ noreturn void pal_main (
 
         ret = _DkStreamMap(manifest_handle, &cfg_addr,
                            PAL_PROT_READ, 0,
-                           ALLOC_ALIGNUP(cfg_size));
+                           ALLOC_ALIGN_UP(cfg_size));
         if (ret < 0)
             INIT_FAIL(-ret, "cannot open manifest file");
 

+ 4 - 4
Pal/src/db_memory.c

@@ -32,12 +32,12 @@ DkVirtualMemoryAlloc(PAL_PTR addr, PAL_NUM size, PAL_FLG alloc_type, PAL_FLG pro
     ENTER_PAL_CALL(DkVirtualMemoryAlloc);
     void* map_addr = (void*)addr;
 
-    if ((addr && !ALLOC_ALIGNED(addr)) || !size || !ALLOC_ALIGNED(size)) {
+    if ((addr && !IS_ALLOC_ALIGNED_PTR(addr)) || !size || !IS_ALLOC_ALIGNED(size)) {
         _DkRaiseFailure(PAL_ERROR_INVAL);
         LEAVE_PAL_CALL_RETURN((PAL_PTR)NULL);
     }
 
-    if (map_addr && _DkCheckMemoryMappable((void*)map_addr, size)) {
+    if (map_addr && _DkCheckMemoryMappable(map_addr, size)) {
         _DkRaiseFailure(PAL_ERROR_DENIED);
         LEAVE_PAL_CALL_RETURN((PAL_PTR)NULL);
     }
@@ -60,7 +60,7 @@ void DkVirtualMemoryFree(PAL_PTR addr, PAL_NUM size) {
         LEAVE_PAL_CALL();
     }
 
-    if (!ALLOC_ALIGNED(addr) || !ALLOC_ALIGNED(size)) {
+    if (!IS_ALLOC_ALIGNED_PTR(addr) || !IS_ALLOC_ALIGNED(size)) {
         _DkRaiseFailure(PAL_ERROR_INVAL);
         LEAVE_PAL_CALL();
     }
@@ -88,7 +88,7 @@ DkVirtualMemoryProtect(PAL_PTR addr, PAL_NUM size, PAL_FLG prot) {
         LEAVE_PAL_CALL_RETURN(PAL_FALSE);
     }
 
-    if (!ALLOC_ALIGNED((void*)addr) || !ALLOC_ALIGNED(size)) {
+    if (!IS_ALLOC_ALIGNED_PTR(addr) || !IS_ALLOC_ALIGNED(size)) {
         _DkRaiseFailure(PAL_ERROR_INVAL);
         LEAVE_PAL_CALL_RETURN(PAL_FALSE);
     }

+ 19 - 22
Pal/src/db_rtld.c

@@ -213,7 +213,7 @@ map_elf_object_by_handle (PAL_HANDLE handle, enum object_type type,
             case PT_LOAD:
                 /* A load command tells us to map in part of the file.
                    We record the load commands and process them all later.  */
-                if (!ALLOC_ALIGNED(ph->p_align)) {
+                if (!IS_ALLOC_ALIGNED(ph->p_align)) {
                     print_error("ELF load command alignment not aligned",
                                 -PAL_ERROR_NOMEM);
                     return NULL;
@@ -226,11 +226,11 @@ map_elf_object_by_handle (PAL_HANDLE handle, enum object_type type,
                 }
 
                 c = &loadcmds[nloadcmds++];
-                c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
-                c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_filesz);
+                c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
+                c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
                 c->dataend = ph->p_vaddr + ph->p_filesz;
                 c->allocend = ph->p_vaddr + ph->p_memsz;
-                c->mapoff = ALLOC_ALIGNDOWN(ph->p_offset);
+                c->mapoff = ALLOC_ALIGN_DOWN(ph->p_offset);
 
                 /* Determine whether there is a gap between the last segment
                    and this one.  */
@@ -348,8 +348,8 @@ postmap:
             ElfW(Addr) zero, zeroend, zerosec;
 
             zero = l->l_addr + c->dataend;
-            zeroend = ALLOC_ALIGNUP(l->l_addr + c->allocend);
-            zerosec = ALLOC_ALIGNUP(zero);
+            zeroend = ALLOC_ALIGN_UP(l->l_addr + c->allocend);
+            zerosec = ALLOC_ALIGN_UP(zero);
 
             if (zeroend < zerosec)
                 /* All the extra data is in the last section of the segment.
@@ -362,7 +362,7 @@ postmap:
                 {
                     /* Dag nab it.  */
                     ret = _DkVirtualMemoryProtect(
-                        (void *) ALLOC_ALIGNDOWN(zero), pal_state.alloc_align,
+                        (void*)ALLOC_ALIGN_DOWN(zero), pal_state.alloc_align,
                         c->prot | PAL_PROT_WRITE);
                     if (ret < 0) {
                         print_error("cannot change memory protections", ret);
@@ -371,7 +371,7 @@ postmap:
                 }
                 memset ((void *) zero, '\0', zerosec - zero);
                 if ((c->prot & PAL_PROT_WRITE) == 0)
-                    _DkVirtualMemoryProtect((void *) ALLOC_ALIGNDOWN(zero),
+                    _DkVirtualMemoryProtect((void*)ALLOC_ALIGN_DOWN(zero),
                                             pal_state.alloc_align, c->prot);
             }
 
@@ -519,10 +519,9 @@ int add_elf_object(void * addr, PAL_HANDLE handle, int type)
                 map->l_ldnum = ph->p_memsz / sizeof (ElfW(Dyn));
                 break;
             case PT_LOAD: {
-                ElfW(Addr) start = (ElfW(Addr))
-                        ALLOC_ALIGNDOWN(map->l_addr + ph->p_vaddr);
+                ElfW(Addr) start = (ElfW(Addr))ALLOC_ALIGN_DOWN(map->l_addr + ph->p_vaddr);
                 ElfW(Addr) end = (ElfW(Addr))
-                        ALLOC_ALIGNUP(map->l_addr + ph->p_vaddr + ph->p_memsz);
+                        ALLOC_ALIGN_UP(map->l_addr + ph->p_vaddr + ph->p_memsz);
                 if (start < mapstart)
                     mapstart = start;
                 if (end > mapend)
@@ -627,7 +626,7 @@ void cache_elf_object (PAL_HANDLE handle, struct link_map * map)
     unsigned long obj_size = sizeof(struct cached_elf_object);
     if (map->l_ld != map->l_real_ld)
         obj_size += sizeof(ElfW(Dyn)) * map->l_ldnum;
-    obj_size = ALLOC_ALIGNUP(obj_size);
+    obj_size = ALLOC_ALIGN_UP(obj_size);
 
     cached_size = obj_size;
     ret = _DkStreamSetLength(cached_file, obj_size);
@@ -660,10 +659,8 @@ void cache_elf_object (PAL_HANDLE handle, struct link_map * map)
         if (ph->p_type == PT_LOAD) {
             assert(obj->nloadcmds < MAX_CACHED_LOADCMDS);
 
-            void * mapstart = (void *)
-                    ALLOC_ALIGNDOWN(map->l_addr + ph->p_vaddr);
-            void * mapend = (void *)
-                    ALLOC_ALIGNUP(map->l_addr + ph->p_vaddr + ph->p_memsz);
+            void* mapstart = (void*)ALLOC_ALIGN_DOWN(map->l_addr + ph->p_vaddr);
+            void* mapend   = (void*)ALLOC_ALIGN_UP(map->l_addr + ph->p_vaddr + ph->p_memsz);
             unsigned long mapsize = mapend - mapstart;
             int mapprot = 0;
             void * cache_addr = NULL;
@@ -727,7 +724,7 @@ struct link_map * check_cached_elf_object (PAL_HANDLE handle)
         return NULL;
 
     struct cached_elf_object * obj = NULL;
-    unsigned long obj_size = ALLOC_ALIGNUP(sizeof(struct cached_elf_object));
+    unsigned long obj_size = ALLOC_ALIGN_UP(sizeof(struct cached_elf_object));
 
     ret = _DkStreamMap(cached_file, (void **) &obj,
                        PAL_PROT_READ|PAL_PROT_WRITE|PAL_PROT_WRITECOPY,
@@ -1124,8 +1121,8 @@ struct link_map * lookup_symbol (const char * undef_name, ElfW(Sym) ** ref)
 
 static int protect_relro (struct link_map * l)
 {
-    ElfW(Addr) start = ALLOC_ALIGNDOWN(l->l_addr + l->l_relro_addr);
-    ElfW(Addr) end = ALLOC_ALIGNUP(l->l_addr + l->l_relro_addr +
+    ElfW(Addr) start = ALLOC_ALIGN_DOWN(l->l_addr + l->l_relro_addr);
+    ElfW(Addr) end = ALLOC_ALIGN_UP(l->l_addr + l->l_relro_addr +
                                    l->l_relro_size);
 
     if (start != end)
@@ -1147,9 +1144,9 @@ static int relocate_elf_object (struct link_map * l)
     for (ph = l->l_phdr ; ph < &l->l_phdr[l->l_phnum] ; ph++)
         if (ph->p_type == PT_LOAD && (ph->p_flags & PF_W) == 0) {
             struct textrels * r = malloc(sizeof(struct textrels));
-            r->start = ALLOC_ALIGNDOWN(ph->p_vaddr) + l->l_addr;
-            r->len = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_memsz)
-                     - ALLOC_ALIGNDOWN(ph->p_vaddr);
+            r->start = ALLOC_ALIGN_DOWN(ph->p_vaddr) + l->l_addr;
+            r->len = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_memsz)
+                     - ALLOC_ALIGN_DOWN(ph->p_vaddr);
 
             ret = _DkVirtualMemoryProtect((void *) r->start, r->len,
                                           PAL_PROT_READ|PAL_PROT_WRITE);

+ 4 - 3
Pal/src/db_streams.c

@@ -532,12 +532,13 @@ DkStreamMap(PAL_HANDLE handle, PAL_PTR addr, PAL_FLG prot, PAL_NUM offset, PAL_N
     }
 
     /* Check that all addresses and sizes are aligned */
-    if ((addr && !ALLOC_ALIGNED(addr)) || !size || !ALLOC_ALIGNED(size) || !ALLOC_ALIGNED(offset)) {
+    if ((addr && !IS_ALLOC_ALIGNED_PTR(addr)) || !size || !IS_ALLOC_ALIGNED(size) || 
+            !IS_ALLOC_ALIGNED(offset)) {
         _DkRaiseFailure(PAL_ERROR_INVAL);
         LEAVE_PAL_CALL_RETURN((PAL_PTR)NULL);
     }
 
-    if (map_addr && _DkCheckMemoryMappable((void*)map_addr, size)) {
+    if (map_addr && _DkCheckMemoryMappable(map_addr, size)) {
         _DkRaiseFailure(PAL_ERROR_DENIED);
         LEAVE_PAL_CALL_RETURN((PAL_PTR)NULL);
     }
@@ -558,7 +559,7 @@ DkStreamMap(PAL_HANDLE handle, PAL_PTR addr, PAL_FLG prot, PAL_NUM offset, PAL_N
 void DkStreamUnmap(PAL_PTR addr, PAL_NUM size) {
     ENTER_PAL_CALL(DkStreamUnmap);
 
-    if (!addr || !ALLOC_ALIGNED((void*)addr) || !size || !ALLOC_ALIGNED(size)) {
+    if (!addr || !IS_ALLOC_ALIGNED_PTR(addr) || !size || !IS_ALLOC_ALIGNED(size)) {
         _DkRaiseFailure(PAL_ERROR_INVAL);
         LEAVE_PAL_CALL();
     }

+ 4 - 4
Pal/src/host/FreeBSD/db_main.c

@@ -142,15 +142,15 @@ void _DkGetAvailableUserAddressRange (PAL_PTR * start, PAL_PTR * end,
 
     if ((void *) TEXT_START - (void *) USER_ADDRESS_LOWEST >
         (void *) USER_ADDRESS_HIGHEST - (void *) DATA_END){
-        end_addr = (void *) ALLOC_ALIGNDOWN(TEXT_START);
+        end_addr = (void*)ALLOC_ALIGN_DOWN(TEXT_START);
         start_addr = pal_sec.user_addr_base ? :
             (void *) USER_ADDRESS_LOWEST;
     } else {
-        end_addr = (void *) USER_ADDRESS_HIGHEST;
-        start_addr = (void *) ALLOC_ALIGNUP(DATA_END);
+        end_addr = (void*)USER_ADDRESS_HIGHEST;
+        start_addr = (void*)ALLOC_ALIGN_UP(DATA_END);
     }
 
-    assert(ALLOC_ALIGNED(start_addr) && ALLOC_ALIGNED(end_addr));
+    assert(IS_ALLOC_ALIGNED(start_addr) && IS_ALLOC_ALIGNED(end_addr));
 
     while (1) {
         if (start_addr >= end_addr)

+ 3 - 3
Pal/src/host/Linux-SGX/db_files.c

@@ -130,7 +130,7 @@ static int64_t file_read(PAL_HANDLE handle, uint64_t offset, uint64_t count, voi
     uint64_t map_end   = ALIGN_UP(end, TRUSTED_STUB_SIZE);
 
     if (map_end > total)
-        map_end = ALLOC_ALIGNUP(total);
+        map_end = ALLOC_ALIGN_UP(total);
 
     ret = copy_and_verify_trusted_file(handle->file.realpath, handle->file.umem + map_start,
             map_start, map_end, buffer, offset, end - offset, stubs, total);
@@ -236,8 +236,8 @@ static int file_map(PAL_HANDLE handle, void** addr, int prot, uint64_t offset, u
         map_start = ALIGN_DOWN(offset, TRUSTED_STUB_SIZE);
         map_end   = ALIGN_UP(end, TRUSTED_STUB_SIZE);
     } else {
-        map_start = ALLOC_ALIGNDOWN(offset);
-        map_end   = ALLOC_ALIGNUP(end);
+        map_start = ALLOC_ALIGN_DOWN(offset);
+        map_end   = ALLOC_ALIGN_UP(end);
     }
 
     ret = ocall_mmap_untrusted(handle->file.fd, map_start, map_end - map_start, PROT_READ, &umem);

+ 0 - 2
Pal/src/host/Linux-SGX/db_main.c

@@ -315,8 +315,6 @@ void pal_linux_main(char * uptr_args, uint64_t args_size,
 
     /* Set the alignment early */
     pal_state.alloc_align = pagesz;
-    pal_state.alloc_shift = pagesz - 1;
-    pal_state.alloc_mask  = ~pagesz;
 
     /* initialize enclave properties */
     rv = init_enclave();

+ 4 - 4
Pal/src/host/Linux-SGX/db_rtld.c

@@ -62,8 +62,8 @@ void _DkDebugAddMap (struct link_map * map)
 
     if (!shdr) {
         shdr = __alloca(shdrsz);
-        unsigned long s = ALLOC_ALIGNDOWN(ehdr->e_shoff);
-        unsigned long e = ALLOC_ALIGNUP(ehdr->e_shoff + shdrsz);
+        unsigned long s = ALLOC_ALIGN_DOWN(ehdr->e_shoff);
+        unsigned long e = ALLOC_ALIGN_UP(ehdr->e_shoff + shdrsz);
         void * umem;
         ocall_mmap_untrusted(fd, s, e - s, PROT_READ, &umem);
         memcpy(shdr, umem + ehdr->e_shoff - s, shdrsz);
@@ -86,8 +86,8 @@ void _DkDebugAddMap (struct link_map * map)
 
     if (!shstrtab) {
         shstrtab = __alloca(shstrsz);
-        unsigned long s = ALLOC_ALIGNDOWN(shstroff);
-        unsigned long e = ALLOC_ALIGNUP(shstroff + shstrsz);
+        unsigned long s = ALLOC_ALIGN_DOWN(shstroff);
+        unsigned long e = ALLOC_ALIGN_UP(shstroff + shstrsz);
         void * umem;
         ocall_mmap_untrusted(fd, s, e - s, PROT_READ, &umem);
         memcpy((void *) shstrtab, umem + shstroff - s, shstrsz);

+ 12 - 12
Pal/src/host/Linux-SGX/enclave_ocalls.c

@@ -172,7 +172,7 @@ int ocall_read (int fd, void * buf, unsigned int count)
     ms_ocall_read_t * ms;
 
     if (count > MAX_UNTRUSTED_STACK_BUF) {
-        retval = ocall_mmap_untrusted(-1, 0, ALLOC_ALIGNUP(count), PROT_READ | PROT_WRITE, &obuf);
+        retval = ocall_mmap_untrusted(-1, 0, ALLOC_ALIGN_UP(count), PROT_READ | PROT_WRITE, &obuf);
         if (IS_ERR(retval))
             return retval;
     }
@@ -207,7 +207,7 @@ int ocall_read (int fd, void * buf, unsigned int count)
 out:
     sgx_reset_ustack();
     if (obuf)
-        ocall_munmap_untrusted(obuf, ALLOC_ALIGNUP(count));
+        ocall_munmap_untrusted(obuf, ALLOC_ALIGN_UP(count));
     return retval;
 }
 
@@ -224,7 +224,7 @@ int ocall_write (int fd, const void * buf, unsigned int count)
         /* typical case of buf inside of enclave memory */
         if (count > MAX_UNTRUSTED_STACK_BUF) {
             /* buf is too big and may overflow untrusted stack, so use untrusted heap */
-            retval = ocall_mmap_untrusted(-1, 0, ALLOC_ALIGNUP(count), PROT_READ | PROT_WRITE, &obuf);
+            retval = ocall_mmap_untrusted(-1, 0, ALLOC_ALIGN_UP(count), PROT_READ | PROT_WRITE, &obuf);
             if (IS_ERR(retval))
                 return retval;
             memcpy(obuf, buf, count);
@@ -257,7 +257,7 @@ int ocall_write (int fd, const void * buf, unsigned int count)
 out:
     sgx_reset_ustack();
     if (obuf && obuf != buf)
-        ocall_munmap_untrusted(obuf, ALLOC_ALIGNUP(count));
+        ocall_munmap_untrusted(obuf, ALLOC_ALIGN_UP(count));
     return retval;
 }
 
@@ -722,7 +722,7 @@ int ocall_recv (int sockfd, void * buf, unsigned int count,
     ms_ocall_recv_t * ms;
 
     if ((count + addrlen + controllen) > MAX_UNTRUSTED_STACK_BUF) {
-        retval = ocall_mmap_untrusted(-1, 0, ALLOC_ALIGNUP(count), PROT_READ | PROT_WRITE, &obuf);
+        retval = ocall_mmap_untrusted(-1, 0, ALLOC_ALIGN_UP(count), PROT_READ | PROT_WRITE, &obuf);
         if (IS_ERR(retval))
             return retval;
     }
@@ -779,7 +779,7 @@ int ocall_recv (int sockfd, void * buf, unsigned int count,
 out:
     sgx_reset_ustack();
     if (obuf)
-        ocall_munmap_untrusted(obuf, ALLOC_ALIGNUP(count));
+        ocall_munmap_untrusted(obuf, ALLOC_ALIGN_UP(count));
     return retval;
 }
 
@@ -798,7 +798,7 @@ int ocall_send (int sockfd, const void * buf, unsigned int count,
         /* typical case of buf inside of enclave memory */
         if ((count + addrlen + controllen) > MAX_UNTRUSTED_STACK_BUF) {
             /* buf is too big and may overflow untrusted stack, so use untrusted heap */
-            retval = ocall_mmap_untrusted(-1, 0, ALLOC_ALIGNUP(count), PROT_READ | PROT_WRITE, &obuf);
+            retval = ocall_mmap_untrusted(-1, 0, ALLOC_ALIGN_UP(count), PROT_READ | PROT_WRITE, &obuf);
             if (IS_ERR(retval))
                 return retval;
             memcpy(obuf, buf, count);
@@ -835,7 +835,7 @@ int ocall_send (int sockfd, const void * buf, unsigned int count,
 out:
     sgx_reset_ustack();
     if (obuf && obuf != buf)
-        ocall_munmap_untrusted(obuf, ALLOC_ALIGNUP(count));
+        ocall_munmap_untrusted(obuf, ALLOC_ALIGN_UP(count));
     return retval;
 }
 
@@ -1090,7 +1090,7 @@ int ocall_get_attestation (const sgx_spid_t* spid, const char* subkey, bool link
             sgx_quote_t* quote = malloc(len);
             if (!sgx_copy_to_enclave(quote, len, attestation->quote, len))
                 retval = -EACCES;
-            ocall_munmap_untrusted(attestation->quote, ALLOC_ALIGNUP(len));
+            ocall_munmap_untrusted(attestation->quote, ALLOC_ALIGN_UP(len));
             attestation->quote = quote;
         }
 
@@ -1099,7 +1099,7 @@ int ocall_get_attestation (const sgx_spid_t* spid, const char* subkey, bool link
             char* ias_report = malloc(len + 1);
             if (!sgx_copy_to_enclave(ias_report, len, attestation->ias_report, len))
                 retval = -EACCES;
-            ocall_munmap_untrusted(attestation->ias_report, ALLOC_ALIGNUP(len));
+            ocall_munmap_untrusted(attestation->ias_report, ALLOC_ALIGN_UP(len));
             ias_report[len] = 0; // Ensure null-ending
             attestation->ias_report = ias_report;
         }
@@ -1109,7 +1109,7 @@ int ocall_get_attestation (const sgx_spid_t* spid, const char* subkey, bool link
             uint8_t* ias_sig = malloc(len);
             if (!sgx_copy_to_enclave(ias_sig, len, attestation->ias_sig, len))
                 retval = -EACCES;
-            ocall_munmap_untrusted(attestation->ias_sig, ALLOC_ALIGNUP(len));
+            ocall_munmap_untrusted(attestation->ias_sig, ALLOC_ALIGN_UP(len));
             attestation->ias_sig = ias_sig;
         }
 
@@ -1118,7 +1118,7 @@ int ocall_get_attestation (const sgx_spid_t* spid, const char* subkey, bool link
             char* ias_certs = malloc(len + 1);
             if (!sgx_copy_to_enclave(ias_certs, len, attestation->ias_certs, len))
                 retval = -EACCES;
-            ocall_munmap_untrusted(attestation->ias_certs, ALLOC_ALIGNUP(len));
+            ocall_munmap_untrusted(attestation->ias_certs, ALLOC_ALIGN_UP(len));
             ias_certs[len] = 0; // Ensure null-ending
             attestation->ias_certs = ias_certs;
         }

+ 1 - 1
Pal/src/host/Linux-SGX/enclave_untrusted.c

@@ -22,7 +22,7 @@
 #include "enclave_ocalls.h"
 
 static PAL_LOCK malloc_lock = LOCK_INIT;
-static int pagesize         = PRESET_PAGESIZE;
+static size_t pagesize      = PRESET_PAGESIZE;
 
 #define SYSTEM_LOCK()   _DkSpinLock(&malloc_lock)
 #define SYSTEM_UNLOCK() _DkSpinUnlock(&malloc_lock)

+ 3 - 3
Pal/src/host/Linux-SGX/sgx_enclave.c

@@ -66,9 +66,9 @@ static int sgx_ocall_munmap_untrusted(void * pms)
 {
     ms_ocall_munmap_untrusted_t * ms = (ms_ocall_munmap_untrusted_t *) pms;
     ODEBUG(OCALL_MUNMAP_UNTRUSTED, ms);
-    INLINE_SYSCALL(munmap, 2, ALLOC_ALIGNDOWN(ms->ms_mem),
-                   ALLOC_ALIGNUP(ms->ms_mem + ms->ms_size) -
-                   ALLOC_ALIGNDOWN(ms->ms_mem));
+    INLINE_SYSCALL(munmap, 2, ALLOC_ALIGN_DOWN_PTR(ms->ms_mem),
+                   ALLOC_ALIGN_UP_PTR(ms->ms_mem + ms->ms_size) -
+                   ALLOC_ALIGN_DOWN_PTR(ms->ms_mem));
     return 0;
 }
 

+ 1 - 1
Pal/src/host/Linux-SGX/sgx_framework.c

@@ -117,7 +117,7 @@ static size_t get_ssaframesize (uint64_t xfrm)
                 xsave_size = cpuinfo[0] + cpuinfo[1];
         }
 
-    return ALLOC_ALIGNUP(xsave_size + sizeof(sgx_pal_gpr_t) + 1);
+    return ALLOC_ALIGN_UP(xsave_size + sizeof(sgx_pal_gpr_t) + 1);
 }
 
 bool is_wrfsbase_supported (void)

+ 15 - 12
Pal/src/host/Linux-SGX/sgx_internal.h

@@ -41,18 +41,21 @@ int snprintf(char * str, int size, const char * fmt, ...) __attribute__((format(
 
 /* constants and macros to help rounding addresses to page
    boundaries */
-extern unsigned long pagesize, pageshift, pagemask;
-
-#undef ALLOC_ALIGNDOWN
-#undef ALLOC_ALIGNUP
-#undef ALLOC_ALIGNED
-
-#define ALLOC_ALIGNDOWN(addr) \
-    (pagesize ? ((unsigned long)(addr)) & pagemask : (unsigned long)(addr))
-#define ALLOC_ALIGNUP(addr) \
-    (pagesize ? (((unsigned long)(addr)) + pageshift) & pagemask : (unsigned long)(addr))
-#define ALLOC_ALIGNED(addr) \
-    (pagesize && ((unsigned long)(addr)) == (((unsigned long)(addr)) & pagemask))
+extern size_t pagesize;
+
+#undef IS_ALLOC_ALIGNED
+#undef IS_ALLOC_ALIGNED_PTR
+#undef ALLOC_ALIGN_UP
+#undef ALLOC_ALIGN_UP_PTR
+#undef ALLOC_ALIGN_DOWN
+#undef ALLOC_ALIGN_DOWN_PTR
+
+#define IS_ALLOC_ALIGNED(addr)     IS_ALIGNED_POW2(addr, pagesize)
+#define IS_ALLOC_ALIGNED_PTR(addr) IS_ALIGNED_PTR_POW2(addr, pagesize)
+#define ALLOC_ALIGN_UP(addr)       ALIGN_UP_POW2(addr, pagesize)
+#define ALLOC_ALIGN_UP_PTR(addr)   ALIGN_UP_PTR_POW2(addr, pagesize)
+#define ALLOC_ALIGN_DOWN(addr)     ALIGN_DOWN_POW2(addr, pagesize)
+#define ALLOC_ALIGN_DOWN_PTR(addr) ALIGN_DOWN_PTR_POW2(addr, pagesize)
 
 uint32_t htonl (uint32_t longval);
 uint16_t htons (uint16_t shortval);

+ 9 - 11
Pal/src/host/Linux-SGX/sgx_main.c

@@ -19,9 +19,7 @@
 #include <sysdep.h>
 #include <sysdeps/generic/ldsodefs.h>
 
-unsigned long pagesize  = PRESET_PAGESIZE;
-unsigned long pagemask  = ~(PRESET_PAGESIZE - 1);
-unsigned long pageshift = PRESET_PAGESIZE - 1;
+size_t pagesize = PRESET_PAGESIZE;
 
 struct pal_enclave pal_enclave;
 
@@ -132,8 +130,8 @@ int scan_enclave_binary (int fd, unsigned long * base, unsigned long * size,
                 return -EINVAL;
 
             c = &loadcmds[nloadcmds++];
-            c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
-            c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_memsz);
+            c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
+            c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_memsz);
         }
 
     *base = loadcmds[0].mapstart;
@@ -174,12 +172,12 @@ int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
                 return -EINVAL;
 
             c = &loadcmds[nloadcmds++];
-            c->mapstart = ALLOC_ALIGNDOWN(ph->p_vaddr);
-            c->mapend = ALLOC_ALIGNUP(ph->p_vaddr + ph->p_filesz);
+            c->mapstart = ALLOC_ALIGN_DOWN(ph->p_vaddr);
+            c->mapend = ALLOC_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
             c->datastart = ph->p_vaddr;
             c->dataend = ph->p_vaddr + ph->p_filesz;
             c->allocend = ph->p_vaddr + ph->p_memsz;
-            c->mapoff = ALLOC_ALIGNDOWN(ph->p_offset);
+            c->mapoff = ALLOC_ALIGN_DOWN(ph->p_offset);
             c->prot = (ph->p_flags & PF_R ? PROT_READ  : 0)|
                       (ph->p_flags & PF_W ? PROT_WRITE : 0)|
                       (ph->p_flags & PF_X ? PROT_EXEC  : 0)|prot;
@@ -188,8 +186,8 @@ int load_enclave_binary (sgx_arch_secs_t * secs, int fd,
     base -= loadcmds[0].mapstart;
     for (c = loadcmds; c < &loadcmds[nloadcmds] ; c++) {
         ElfW(Addr) zero = c->dataend;
-        ElfW(Addr) zeroend = ALLOC_ALIGNUP(c->allocend);
-        ElfW(Addr) zeropage = ALLOC_ALIGNUP(zero);
+        ElfW(Addr) zeroend = ALLOC_ALIGN_UP(c->allocend);
+        ElfW(Addr) zeropage = ALLOC_ALIGN_UP(zero);
 
         if (zeroend < zeropage)
             zeropage = zeroend;
@@ -340,7 +338,7 @@ int initialize_enclave (struct pal_enclave * enclave)
      * it first to the list with memory areas. */
     areas[area_num] = (struct mem_area) {
         .desc = "manifest", .skip_eextend = false, .fd = enclave->manifest,
-        .is_binary = false, .addr = 0, .size = ALLOC_ALIGNUP(manifest_size),
+        .is_binary = false, .addr = 0, .size = ALLOC_ALIGN_UP(manifest_size),
         .prot = PROT_READ, .type = SGX_PAGE_REG
     };
     area_num++;

+ 11 - 11
Pal/src/host/Linux-SGX/sgx_platform.c

@@ -246,7 +246,7 @@ int contact_intel_attest_service(const char* subkey, const sgx_quote_nonce_t* no
     https_output_len = INLINE_SYSCALL(lseek, 3, output_fd, 0, SEEK_END);
     if (IS_ERR(https_output_len) || !https_output_len)
         goto failed;
-    https_output = (char*)INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGNUP(https_output_len),
+    https_output = (char*)INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGN_UP(https_output_len),
                                          PROT_READ, MAP_PRIVATE|MAP_FILE, output_fd, 0);
     if (IS_ERR_P(https_output))
         goto failed;
@@ -260,7 +260,7 @@ int contact_intel_attest_service(const char* subkey, const sgx_quote_nonce_t* no
     https_header_len = INLINE_SYSCALL(lseek, 3, header_fd, 0, SEEK_END);
     if (IS_ERR(https_header_len) || !https_header_len)
         goto failed;
-    https_header = (char*)INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGNUP(https_header_len),
+    https_header = (char*)INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGN_UP(https_header_len),
                                          PROT_READ, MAP_PRIVATE|MAP_FILE, header_fd, 0);
     if (IS_ERR_P(https_header))
         goto failed;
@@ -288,7 +288,7 @@ int contact_intel_attest_service(const char* subkey, const sgx_quote_nonce_t* no
                 goto failed;
             }
 
-            ias_sig = (uint8_t*)INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGNUP(ias_sig_len),
+            ias_sig = (uint8_t*)INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGN_UP(ias_sig_len),
                                                PROT_READ|PROT_WRITE,
                                                MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
             if (IS_ERR_P(ias_sig)) {
@@ -306,7 +306,7 @@ int contact_intel_attest_service(const char* subkey, const sgx_quote_nonce_t* no
 
             // Decode IAS signature chain
             ias_certs_len = end - start;
-            ias_certs = (char*)INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGNUP(ias_certs_len),
+            ias_certs = (char*)INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGN_UP(ias_certs_len),
                                               PROT_READ|PROT_WRITE,
                                               MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
             if (IS_ERR_P(ias_certs)) {
@@ -339,9 +339,9 @@ int contact_intel_attest_service(const char* subkey, const sgx_quote_nonce_t* no
 
             // Adjust certificate chain length
             ias_certs[total_bytes++] = '\0';
-            if (ALLOC_ALIGNUP(total_bytes) < ALLOC_ALIGNUP(ias_certs_len))
-                INLINE_SYSCALL(munmap, 2, ALLOC_ALIGNUP(total_bytes),
-                               ALLOC_ALIGNUP(ias_certs_len) - ALLOC_ALIGNUP(total_bytes));
+            if (ALLOC_ALIGN_UP(total_bytes) < ALLOC_ALIGN_UP(ias_certs_len))
+                INLINE_SYSCALL(munmap, 2, ALLOC_ALIGN_UP(total_bytes),
+                               ALLOC_ALIGN_UP(ias_certs_len) - ALLOC_ALIGN_UP(total_bytes));
             ias_certs_len = total_bytes;
         }
 
@@ -371,9 +371,9 @@ int contact_intel_attest_service(const char* subkey, const sgx_quote_nonce_t* no
     ret = 0;
 done:
     if (https_header)
-        INLINE_SYSCALL(munmap, 2, https_header, ALLOC_ALIGNUP(https_header_len));
+        INLINE_SYSCALL(munmap, 2, https_header, ALLOC_ALIGN_UP(https_header_len));
     if (https_output)
-        INLINE_SYSCALL(munmap, 2, https_output, ALLOC_ALIGNUP(https_output_len));
+        INLINE_SYSCALL(munmap, 2, https_output, ALLOC_ALIGN_UP(https_output_len));
     if (pipefds[0] != -1) INLINE_SYSCALL(close, 1, pipefds[0]);
     if (pipefds[1] != -1) INLINE_SYSCALL(close, 1, pipefds[1]);
     if (header_fd != -1) {
@@ -451,7 +451,7 @@ int retrieve_verified_quote(const sgx_spid_t* spid, const char* subkey, bool lin
         goto failed;
     }
 
-    sgx_quote_t* quote = (sgx_quote_t*) INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGNUP(r->quote.len),
+    sgx_quote_t* quote = (sgx_quote_t*) INLINE_SYSCALL(mmap, 6, NULL, ALLOC_ALIGN_UP(r->quote.len),
                                                        PROT_READ|PROT_WRITE,
                                                        MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
     if (IS_ERR_P(quote)) {
@@ -465,7 +465,7 @@ int retrieve_verified_quote(const sgx_spid_t* spid, const char* subkey, bool lin
 
     ret = contact_intel_attest_service(subkey, nonce, (sgx_quote_t *) quote, attestation);
     if (ret < 0) {
-        INLINE_SYSCALL(munmap, 2, quote, ALLOC_ALIGNUP(r->quote.len));
+        INLINE_SYSCALL(munmap, 2, quote, ALLOC_ALIGN_UP(r->quote.len));
         goto failed;
     }
 

+ 3 - 3
Pal/src/host/Linux/db_ipc.c

@@ -127,8 +127,8 @@ int _DkPhysicalMemoryCommit (PAL_HANDLE channel, int entries,
     gs.len  = __alloca(sizeof(unsigned long) * entries);
 
     for (int i = 0 ; i < entries ; i++) {
-        if (!addrs[i] || !sizes[i] || !ALLOC_ALIGNED(addrs[i]) ||
-            !ALLOC_ALIGNED(sizes[i]))
+        if (!addrs[i] || !sizes[i] || !IS_ALLOC_ALIGNED_PTR(addrs[i]) ||
+            !IS_ALLOC_ALIGNED(sizes[i]))
             return -PAL_ERROR_INVAL;
 
         gs.addr[i] = (unsigned long) addrs[i];
@@ -155,7 +155,7 @@ int _DkPhysicalMemoryMap (PAL_HANDLE channel, int entries,
     gr.prot = __alloca(sizeof(unsigned long) * entries);
 
     for (int i = 0 ; i < entries ; i++) {
-        if (!sizes[i] || !ALLOC_ALIGNED(addrs[i]) || !ALLOC_ALIGNED(sizes[i]))
+        if (!sizes[i] || !IS_ALLOC_ALIGNED_PTR(addrs[i]) || !IS_ALLOC_ALIGNED(sizes[i]))
             return -PAL_ERROR_INVAL;
 
         gr.addr[i] = (unsigned long) addrs[i];

+ 3 - 3
Pal/src/host/Linux/db_main.c

@@ -150,10 +150,10 @@ unsigned long _DkGetAllocationAlignment (void)
 void _DkGetAvailableUserAddressRange (PAL_PTR * start, PAL_PTR * end,
                                       PAL_PTR * hole_start, PAL_PTR * hole_end)
 {
-    void * end_addr = (void *) ALLOC_ALIGNDOWN(TEXT_START);
-    void * start_addr = (void *) USER_ADDRESS_LOWEST;
+    void* end_addr = (void*)ALLOC_ALIGN_DOWN_PTR(TEXT_START);
+    void* start_addr = (void*)USER_ADDRESS_LOWEST;
 
-    assert(ALLOC_ALIGNED(start_addr) && ALLOC_ALIGNED(end_addr));
+    assert(IS_ALLOC_ALIGNED_PTR(start_addr) && IS_ALLOC_ALIGNED_PTR(end_addr));
 
     while (1) {
         if (start_addr >= end_addr)

+ 9 - 9
Pal/src/pal_internal.h

@@ -210,8 +210,9 @@ extern struct pal_internal_state {
 
     struct config_store * root_config;
 
-    unsigned long   pagesize;
-    unsigned long   alloc_align, alloc_shift, alloc_mask;
+    /* May not be the same as page size, see e.g. SYSTEM_INFO::dwAllocationGranularity on Windows.
+     */
+    size_t          alloc_align;
 
     PAL_HANDLE      console;
 
@@ -237,13 +238,12 @@ extern struct pal_internal_state {
 
 extern PAL_CONTROL __pal_control;
 
-#define ALLOC_ALIGNDOWN(addr)                               \
-        (((unsigned long)(addr)) & pal_state.alloc_mask)
-#define ALLOC_ALIGNUP(addr)                                 \
-        ((((unsigned long)(addr)) + pal_state.alloc_shift) & pal_state.alloc_mask)
-#define ALLOC_ALIGNED(addr)                                 \
-        ((unsigned long)(addr) ==                           \
-            (((unsigned long)(addr)) & pal_state.alloc_mask))
+#define IS_ALLOC_ALIGNED(addr)     IS_ALIGNED_POW2(addr, pal_state.alloc_align)
+#define IS_ALLOC_ALIGNED_PTR(addr) IS_ALIGNED_PTR_POW2(addr, pal_state.alloc_align)
+#define ALLOC_ALIGN_UP(addr)       ALIGN_UP_POW2(addr, pal_state.alloc_align)
+#define ALLOC_ALIGN_UP_PTR(addr)   ALIGN_UP_PTR_POW2(addr, pal_state.alloc_align)
+#define ALLOC_ALIGN_DOWN(addr)     ALIGN_DOWN_POW2(addr, pal_state.alloc_align)
+#define ALLOC_ALIGN_DOWN_PTR(addr) ALIGN_DOWN_PTR_POW2(addr, pal_state.alloc_align)
 
 /* Main initialization function */
 noreturn void pal_main (