|
@@ -197,8 +197,8 @@ static int protect_page(struct link_map* l, void* addr, size_t size) {
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- void* start = ALIGN_DOWN(addr);
|
|
|
- void* end = ALIGN_UP(addr + size);
|
|
|
+ void* start = PAGE_ALIGN_DOWN_PTR(addr);
|
|
|
+ void* end = PAGE_ALIGN_UP_PTR(addr + size);
|
|
|
|
|
|
if (!DkVirtualMemoryProtect(start, end - start, PAL_PROT_READ | PAL_PROT_WRITE | prot))
|
|
|
return -PAL_ERRNO;
|
|
@@ -430,12 +430,13 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
|
|
|
case PT_LOAD:
|
|
|
/* A load command tells us to map in part of the file.
|
|
|
We record the load commands and process them all later. */
|
|
|
- if (__builtin_expect(!ALIGNED(ph->p_align), 0)) {
|
|
|
+ if (__builtin_expect(!IS_PAGE_ALIGNED(ph->p_align), 0)) {
|
|
|
errstring = "ELF load command alignment not page-aligned";
|
|
|
goto call_lose;
|
|
|
}
|
|
|
|
|
|
- if (__builtin_expect(((ph->p_vaddr - ph->p_offset) & (ph->p_align - 1)) != 0, 0)) {
|
|
|
+ if (__builtin_expect(!IS_ALIGNED_POW2(ph->p_vaddr - ph->p_offset, ph->p_align),
|
|
|
+ 0)) {
|
|
|
errstring = "ELF load command address/offset not properly aligned";
|
|
|
goto call_lose;
|
|
|
}
|
|
@@ -446,11 +447,11 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
|
|
|
}
|
|
|
|
|
|
c = &l->loadcmds[l->nloadcmds++];
|
|
|
- c->mapstart = ALIGN_DOWN(ph->p_vaddr);
|
|
|
- c->mapend = ALIGN_UP(ph->p_vaddr + ph->p_filesz);
|
|
|
+ c->mapstart = PAGE_ALIGN_DOWN(ph->p_vaddr);
|
|
|
+ c->mapend = PAGE_ALIGN_UP(ph->p_vaddr + ph->p_filesz);
|
|
|
c->dataend = ph->p_vaddr + ph->p_filesz;
|
|
|
c->allocend = ph->p_vaddr + ph->p_memsz;
|
|
|
- c->mapoff = ALIGN_DOWN(ph->p_offset);
|
|
|
+ c->mapoff = PAGE_ALIGN_DOWN(ph->p_offset);
|
|
|
|
|
|
/* Determine whether there is a gap between the last segment
|
|
|
and this one. */
|
|
@@ -510,12 +511,12 @@ static struct link_map* __map_elf_object(struct shim_handle* file, const void* f
|
|
|
mappref = (ElfW(Addr))c->mapstart + (ElfW(Addr))addr;
|
|
|
else
|
|
|
mappref = (ElfW(Addr))bkeep_unmapped_heap(
|
|
|
- ALIGN_UP(maplength), c->prot,
|
|
|
+ PAGE_ALIGN_UP(maplength), c->prot,
|
|
|
c->flags | MAP_PRIVATE | (type == OBJECT_INTERNAL ? VMA_INTERNAL : 0), file,
|
|
|
c->mapoff, NULL);
|
|
|
|
|
|
/* Remember which part of the address space this object uses. */
|
|
|
- ret = (*mmap)(file, (void**)&mappref, ALIGN_UP(maplength), c->prot,
|
|
|
+ ret = (*mmap)(file, (void**)&mappref, PAGE_ALIGN_UP(maplength), c->prot,
|
|
|
c->flags | MAP_PRIVATE, c->mapoff);
|
|
|
|
|
|
if (__builtin_expect(ret < 0, 0)) {
|
|
@@ -600,8 +601,8 @@ do_remap:
|
|
|
ElfW(Addr) zero, zeroend, zeropage;
|
|
|
|
|
|
zero = (ElfW(Addr))RELOCATE(l, c->dataend);
|
|
|
- zeroend = ALIGN_UP((ElfW(Addr))RELOCATE(l, c->allocend));
|
|
|
- zeropage = ALIGN_UP(zero);
|
|
|
+ zeroend = PAGE_ALIGN_UP((ElfW(Addr))RELOCATE(l, c->allocend));
|
|
|
+ zeropage = PAGE_ALIGN_UP(zero);
|
|
|
|
|
|
if (zeroend < zeropage)
|
|
|
/* All the extra data is in the last page of the segment.
|
|
@@ -613,13 +614,14 @@ do_remap:
|
|
|
/* Zero the final part of the last page of the segment. */
|
|
|
if (__builtin_expect((c->prot & PROT_WRITE) == 0, 0)) {
|
|
|
/* Dag nab it. */
|
|
|
- if (!DkVirtualMemoryProtect((caddr_t)ALIGN_DOWN(zero), allocsize,
|
|
|
+ if (!DkVirtualMemoryProtect((caddr_t)PAGE_ALIGN_DOWN(zero), g_pal_alloc_align,
|
|
|
c->prot | PAL_PROT_WRITE)) {
|
|
|
errstring = "cannot change memory protections";
|
|
|
goto call_lose;
|
|
|
}
|
|
|
memset((void*)zero, '\0', zeropage - zero);
|
|
|
- if (!DkVirtualMemoryProtect((caddr_t)ALIGN_DOWN(zero), allocsize, c->prot)) {
|
|
|
+ if (!DkVirtualMemoryProtect((caddr_t)PAGE_ALIGN_DOWN(zero), g_pal_alloc_align,
|
|
|
+ c->prot)) {
|
|
|
errstring = "cannot change memory protections";
|
|
|
goto call_lose;
|
|
|
}
|
|
@@ -785,7 +787,7 @@ static int __free_elf_object(struct link_map* l) {
|
|
|
|
|
|
zero = l->l_addr + c->dataend;
|
|
|
zeroend = l->l_addr + c->allocend;
|
|
|
- zeropage = ALIGN_UP(zero);
|
|
|
+ zeropage = PAGE_ALIGN_UP(zero);
|
|
|
|
|
|
if (zeroend < zeropage)
|
|
|
/* All the extra data is in the last page of the segment.
|
|
@@ -1430,20 +1432,20 @@ static int vdso_map_init(void) {
|
|
|
* When LibOS is loaded at different address, it may overlap with the old vDSO
|
|
|
* area.
|
|
|
*/
|
|
|
- void* addr = bkeep_unmapped_heap(ALIGN_UP(vdso_so_size), PROT_READ | PROT_EXEC, 0, NULL, 0,
|
|
|
+ void* addr = bkeep_unmapped_heap(PAGE_ALIGN_UP(vdso_so_size), PROT_READ | PROT_EXEC, 0, NULL, 0,
|
|
|
"linux-vdso.so.1");
|
|
|
if (addr == NULL)
|
|
|
return -ENOMEM;
|
|
|
- assert(addr == ALIGN_UP(addr));
|
|
|
+ assert(addr == PAGE_ALIGN_UP_PTR(addr));
|
|
|
|
|
|
- void* ret_addr = (void*)DkVirtualMemoryAlloc(addr, ALIGN_UP(vdso_so_size), 0,
|
|
|
+ void* ret_addr = (void*)DkVirtualMemoryAlloc(addr, PAGE_ALIGN_UP(vdso_so_size), 0,
|
|
|
PAL_PROT_READ | PAL_PROT_WRITE);
|
|
|
if (!ret_addr)
|
|
|
return -PAL_ERRNO;
|
|
|
assert(addr == ret_addr);
|
|
|
|
|
|
memcpy(addr, &vdso_so, vdso_so_size);
|
|
|
- memset(addr + vdso_so_size, 0, ALIGN_UP(vdso_so_size) - vdso_so_size);
|
|
|
+ memset(addr + vdso_so_size, 0, PAGE_ALIGN_UP(vdso_so_size) - vdso_so_size);
|
|
|
__load_elf_object(NULL, addr, OBJECT_VDSO, NULL);
|
|
|
vdso_map->l_name = "vDSO";
|
|
|
|
|
@@ -1457,7 +1459,7 @@ static int vdso_map_init(void) {
|
|
|
**vsyms[i].func = vsyms[i].value;
|
|
|
}
|
|
|
|
|
|
- if (!DkVirtualMemoryProtect(addr, ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_EXEC))
|
|
|
+ if (!DkVirtualMemoryProtect(addr, PAGE_ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_EXEC))
|
|
|
return -PAL_ERRNO;
|
|
|
|
|
|
vdso_addr = addr;
|
|
@@ -1468,7 +1470,8 @@ int vdso_map_migrate(void) {
|
|
|
if (!vdso_addr)
|
|
|
return 0;
|
|
|
|
|
|
- if (!DkVirtualMemoryProtect(vdso_addr, ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_WRITE))
|
|
|
+ if (!DkVirtualMemoryProtect(vdso_addr, PAGE_ALIGN_UP(vdso_so_size),
|
|
|
+ PAL_PROT_READ | PAL_PROT_WRITE))
|
|
|
return -PAL_ERRNO;
|
|
|
|
|
|
/* adjust funcs to loaded address for newly loaded libsysdb */
|
|
@@ -1476,7 +1479,8 @@ int vdso_map_migrate(void) {
|
|
|
**vsyms[i].func = vsyms[i].value;
|
|
|
}
|
|
|
|
|
|
- if (!DkVirtualMemoryProtect(vdso_addr, ALIGN_UP(vdso_so_size), PAL_PROT_READ | PAL_PROT_EXEC))
|
|
|
+ if (!DkVirtualMemoryProtect(vdso_addr, PAGE_ALIGN_UP(vdso_so_size),
|
|
|
+ PAL_PROT_READ | PAL_PROT_EXEC))
|
|
|
return -PAL_ERRNO;
|
|
|
return 0;
|
|
|
}
|
|
@@ -1534,7 +1538,7 @@ int init_brk_from_executable(struct shim_handle* exec) {
|
|
|
if (!(c->prot & PROT_EXEC))
|
|
|
data_segment_size += c->allocend - c->mapstart;
|
|
|
|
|
|
- return init_brk_region((void*)ALIGN_UP(exec_map->l_map_end), data_segment_size);
|
|
|
+ return init_brk_region((void*)PAGE_ALIGN_UP(exec_map->l_map_end), data_segment_size);
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -1569,7 +1573,7 @@ noreturn void execute_elf_object(struct shim_handle* exec, int* argcp, const cha
|
|
|
|
|
|
struct link_map* exec_map = __search_map_by_handle(exec);
|
|
|
assert(exec_map);
|
|
|
- assert((uintptr_t)argcp % 16 == 0); /* stack must be 16B-aligned */
|
|
|
+ assert(IS_ALIGNED_PTR(argcp, 16)); /* stack must be 16B-aligned */
|
|
|
assert((void*)argcp + sizeof(long) == argp || argp == NULL);
|
|
|
|
|
|
static_assert(REQUIRED_ELF_AUXV >= 8, "not enough space on stack for auxv");
|
|
@@ -1578,7 +1582,7 @@ noreturn void execute_elf_object(struct shim_handle* exec, int* argcp, const cha
|
|
|
auxp[1].a_type = AT_PHNUM;
|
|
|
auxp[1].a_un.a_val = exec_map->l_phnum;
|
|
|
auxp[2].a_type = AT_PAGESZ;
|
|
|
- auxp[2].a_un.a_val = allocsize;
|
|
|
+ auxp[2].a_un.a_val = g_pal_alloc_align;
|
|
|
auxp[3].a_type = AT_ENTRY;
|
|
|
auxp[3].a_un.a_val = exec_map->l_entry;
|
|
|
auxp[4].a_type = AT_BASE;
|