|
@@ -35,8 +35,8 @@
|
|
|
|
|
|
#include "enclave_pages.h"
|
|
|
|
|
|
-#define PAL_VMA_MAX 64
|
|
|
-
|
|
|
+/* TODO: Having VMAs in an array is extremely inefficient */
|
|
|
+#define PAL_VMA_MAX 64
|
|
|
static struct pal_vma {
|
|
|
void * top, * bottom;
|
|
|
} pal_vmas[PAL_VMA_MAX];
|
|
@@ -55,8 +55,8 @@ bool _DkCheckMemoryMappable (const void * addr, size_t size)
|
|
|
|
|
|
for (uint32_t i = 0 ; i < pal_nvmas ; i++)
|
|
|
if (addr < pal_vmas[i].top && addr + size > pal_vmas[i].bottom) {
|
|
|
- printf("address %p-%p is not mappable\n", addr, addr + size);
|
|
|
spinlock_unlock(&pal_vma_lock);
|
|
|
+ printf("address %p-%p is not mappable\n", addr, addr + size);
|
|
|
return true;
|
|
|
}
|
|
|
|
|
@@ -87,27 +87,53 @@ int _DkVirtualMemoryAlloc (void ** paddr, uint64_t size, int alloc_type, int pro
|
|
|
return -PAL_ERROR_INVAL; // `addr` was unaligned.
|
|
|
}
|
|
|
|
|
|
- memset(mem, 0, size);
|
|
|
-
|
|
|
if (alloc_type & PAL_ALLOC_INTERNAL) {
|
|
|
- SGX_DBG(DBG_M, "pal allocates %p-%p for internal use\n", mem, mem + size);
|
|
|
spinlock_lock(&pal_vma_lock);
|
|
|
- assert(pal_nvmas < PAL_VMA_MAX);
|
|
|
+ if (pal_nvmas >= PAL_VMA_MAX) {
|
|
|
+ spinlock_unlock(&pal_vma_lock);
|
|
|
+ SGX_DBG(DBG_E, "Pal is out of VMAs (current limit on VMAs PAL_VMA_MAX = %d)!\n",
|
|
|
+ PAL_VMA_MAX);
|
|
|
+ free_pages(mem, size);
|
|
|
+ return -PAL_ERROR_NOMEM;
|
|
|
+ }
|
|
|
+
|
|
|
pal_vmas[pal_nvmas].bottom = mem;
|
|
|
pal_vmas[pal_nvmas].top = mem + size;
|
|
|
pal_nvmas++;
|
|
|
spinlock_unlock(&pal_vma_lock);
|
|
|
+
|
|
|
+ SGX_DBG(DBG_M, "pal allocated %p-%p for internal use\n", mem, mem + size);
|
|
|
}
|
|
|
|
|
|
+ memset(mem, 0, size);
|
|
|
+
|
|
|
*paddr = mem;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int _DkVirtualMemoryFree (void * addr, uint64_t size)
|
|
|
{
|
|
|
-
|
|
|
if (sgx_is_completely_within_enclave(addr, size)) {
|
|
|
free_pages(addr, size);
|
|
|
+
|
|
|
+ /* check if it is internal PAL memory and remove this VMA from pal_vmas if yes */
|
|
|
+ spinlock_lock(&pal_vma_lock);
|
|
|
+ for (uint32_t i = 0; i < pal_nvmas; i++) {
|
|
|
+ if (addr == pal_vmas[i].bottom) {
|
|
|
+ /* TODO: currently assume that internal PAL memory is freed at same granularity as
|
|
|
+ * was allocated in _DkVirtualMemoryAlloc(); may be false in general case */
|
|
|
+ assert(addr + size == pal_vmas[i].top);
|
|
|
+
|
|
|
+ for (uint32_t j = i; j < pal_nvmas - 1; j++) {
|
|
|
+ pal_vmas[j].bottom = pal_vmas[j + 1].bottom;
|
|
|
+ pal_vmas[j].top = pal_vmas[j + 1].top;
|
|
|
+ }
|
|
|
+
|
|
|
+ pal_nvmas--;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spinlock_unlock(&pal_vma_lock);
|
|
|
} else {
|
|
|
/* Possible to have untrusted mapping. Simply unmap
|
|
|
the memory outside the enclave */
|