db_memory.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * db_memory.c
  15. *
  16. * This files contains APIs that allocate, free or protect virtual memory.
  17. */
  18. #include "pal_defs.h"
  19. #include "pal_linux_defs.h"
  20. #include "pal.h"
  21. #include "pal_internal.h"
  22. #include "pal_linux.h"
  23. #include "pal_security.h"
  24. #include "pal_error.h"
  25. #include "pal_debug.h"
  26. #include "spinlock.h"
  27. #include "api.h"
  28. #include <asm/mman.h>
  29. #include "enclave_pages.h"
  30. /* TODO: Having VMAs in an array is extremely inefficient */
  31. #define PAL_VMA_MAX 64
  32. static struct pal_vma {
  33. void * top, * bottom;
  34. } pal_vmas[PAL_VMA_MAX];
  35. static uint32_t pal_nvmas = 0;
  36. static spinlock_t pal_vma_lock = INIT_SPINLOCK_UNLOCKED;
  37. bool _DkCheckMemoryMappable (const void * addr, size_t size)
  38. {
  39. if (addr < DATA_END && addr + size > TEXT_START) {
  40. printf("address %p-%p is not mappable\n", addr, addr + size);
  41. return true;
  42. }
  43. spinlock_lock(&pal_vma_lock);
  44. for (uint32_t i = 0 ; i < pal_nvmas ; i++)
  45. if (addr < pal_vmas[i].top && addr + size > pal_vmas[i].bottom) {
  46. spinlock_unlock(&pal_vma_lock);
  47. printf("address %p-%p is not mappable\n", addr, addr + size);
  48. return true;
  49. }
  50. spinlock_unlock(&pal_vma_lock);
  51. return false;
  52. }
  53. int _DkVirtualMemoryAlloc (void ** paddr, uint64_t size, int alloc_type, int prot)
  54. {
  55. if (!WITHIN_MASK(prot, PAL_PROT_MASK))
  56. return -PAL_ERROR_INVAL;
  57. void * addr = *paddr, * mem;
  58. if ((alloc_type & PAL_ALLOC_INTERNAL) && addr)
  59. return -PAL_ERROR_INVAL;
  60. if (size == 0)
  61. __asm__ volatile ("int $3");
  62. mem = get_enclave_pages(addr, size);
  63. if (!mem)
  64. return addr ? -PAL_ERROR_DENIED : -PAL_ERROR_NOMEM;
  65. if (alloc_type & PAL_ALLOC_INTERNAL) {
  66. spinlock_lock(&pal_vma_lock);
  67. if (pal_nvmas >= PAL_VMA_MAX) {
  68. spinlock_unlock(&pal_vma_lock);
  69. SGX_DBG(DBG_E, "Pal is out of VMAs (current limit on VMAs PAL_VMA_MAX = %d)!\n",
  70. PAL_VMA_MAX);
  71. free_enclave_pages(mem, size);
  72. return -PAL_ERROR_NOMEM;
  73. }
  74. pal_vmas[pal_nvmas].bottom = mem;
  75. pal_vmas[pal_nvmas].top = mem + size;
  76. pal_nvmas++;
  77. spinlock_unlock(&pal_vma_lock);
  78. SGX_DBG(DBG_M, "pal allocated %p-%p for internal use\n", mem, mem + size);
  79. }
  80. memset(mem, 0, size);
  81. *paddr = mem;
  82. return 0;
  83. }
  84. int _DkVirtualMemoryFree (void * addr, uint64_t size)
  85. {
  86. if (sgx_is_completely_within_enclave(addr, size)) {
  87. int ret = free_enclave_pages(addr, size);
  88. if (ret < 0) {
  89. return ret;
  90. }
  91. /* check if it is internal PAL memory and remove this VMA from pal_vmas if yes */
  92. spinlock_lock(&pal_vma_lock);
  93. for (uint32_t i = 0; i < pal_nvmas; i++) {
  94. if (addr == pal_vmas[i].bottom) {
  95. /* TODO: currently assume that internal PAL memory is freed at same granularity as
  96. * was allocated in _DkVirtualMemoryAlloc(); may be false in general case */
  97. assert(addr + size == pal_vmas[i].top);
  98. for (uint32_t j = i; j < pal_nvmas - 1; j++) {
  99. pal_vmas[j].bottom = pal_vmas[j + 1].bottom;
  100. pal_vmas[j].top = pal_vmas[j + 1].top;
  101. }
  102. pal_nvmas--;
  103. break;
  104. }
  105. }
  106. spinlock_unlock(&pal_vma_lock);
  107. } else {
  108. /* Possible to have untrusted mapping. Simply unmap
  109. the memory outside the enclave */
  110. ocall_munmap_untrusted(addr, size);
  111. }
  112. return 0;
  113. }
  114. int _DkVirtualMemoryProtect (void * addr, uint64_t size, int prot)
  115. {
  116. static struct atomic_int at_cnt = {.counter = 0};
  117. if (atomic_cmpxchg(&at_cnt, 0, 1) == 0)
  118. SGX_DBG(DBG_M, "[Warning] DkVirtualMemoryProtect (0x%p, %lu, %d) is unimplemented",
  119. addr, size, prot);
  120. return 0;
  121. }
  122. unsigned long _DkMemoryQuota (void)
  123. {
  124. return pal_sec.heap_max - pal_sec.heap_min;
  125. }
  126. extern struct atomic_int g_alloced_pages;
  127. extern unsigned int g_page_size;
  128. unsigned long _DkMemoryAvailableQuota (void)
  129. {
  130. return (pal_sec.heap_max - pal_sec.heap_min) -
  131. atomic_read(&g_alloced_pages) * g_page_size;
  132. }