db_memory.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * db_memory.c
  15. *
  16. * This files contains APIs that allocate, free or protect virtual memory.
  17. */
  18. #include "pal_defs.h"
  19. #include "pal_linux_defs.h"
  20. #include "pal.h"
  21. #include "pal_internal.h"
  22. #include "pal_linux.h"
  23. #include "pal_security.h"
  24. #include "pal_error.h"
  25. #include "pal_debug.h"
  26. #include "api.h"
  27. #include <asm/mman.h>
  28. #include "enclave_pages.h"
  29. #define PAL_VMA_MAX 64
  30. static struct pal_vma {
  31. void * top, * bottom;
  32. } pal_vmas[PAL_VMA_MAX];
  33. static uint32_t pal_nvmas = 0;
  34. static struct spinlock pal_vma_lock;
  35. bool _DkCheckMemoryMappable (const void * addr, size_t size)
  36. {
  37. if (addr < DATA_END && addr + size > TEXT_START) {
  38. printf("address %p-%p is not mappable\n", addr, addr + size);
  39. return true;
  40. }
  41. _DkSpinLock(&pal_vma_lock);
  42. for (uint32_t i = 0 ; i < pal_nvmas ; i++)
  43. if (addr < pal_vmas[i].top && addr + size > pal_vmas[i].bottom) {
  44. printf("address %p-%p is not mappable\n", addr, addr + size);
  45. _DkSpinUnlock(&pal_vma_lock);
  46. return true;
  47. }
  48. _DkSpinUnlock(&pal_vma_lock);
  49. return false;
  50. }
  51. int _DkVirtualMemoryAlloc (void ** paddr, uint64_t size, int alloc_type, int prot)
  52. {
  53. if (!WITHIN_MASK(prot, PAL_PROT_MASK))
  54. return -PAL_ERROR_INVAL;
  55. void * addr = *paddr, * mem;
  56. if ((alloc_type & PAL_ALLOC_INTERNAL) && addr)
  57. return -PAL_ERROR_INVAL;
  58. if (size == 0)
  59. __asm__ volatile ("int $3");
  60. mem = get_reserved_pages(addr, size);
  61. if (!mem)
  62. return addr ? -PAL_ERROR_DENIED : -PAL_ERROR_NOMEM;
  63. if (addr && mem != addr) {
  64. // TODO: This case should be made impossible by fixing
  65. // `get_reserved_pages` semantics.
  66. free_pages(mem, size);
  67. return -PAL_ERROR_INVAL; // `addr` was unaligned.
  68. }
  69. memset(mem, 0, size);
  70. if (alloc_type & PAL_ALLOC_INTERNAL) {
  71. SGX_DBG(DBG_M, "pal allocates %p-%p for internal use\n", mem, mem + size);
  72. _DkSpinLock(&pal_vma_lock);
  73. assert(pal_nvmas < PAL_VMA_MAX);
  74. pal_vmas[pal_nvmas].bottom = mem;
  75. pal_vmas[pal_nvmas].top = mem + size;
  76. pal_nvmas++;
  77. _DkSpinUnlock(&pal_vma_lock);
  78. }
  79. *paddr = mem;
  80. return 0;
  81. }
  82. int _DkVirtualMemoryFree (void * addr, uint64_t size)
  83. {
  84. if (sgx_is_completely_within_enclave(addr, size)) {
  85. free_pages(addr, size);
  86. } else {
  87. /* Possible to have untrusted mapping. Simply unmap
  88. the memory outside the enclave */
  89. ocall_unmap_untrusted(addr, size);
  90. }
  91. return 0;
  92. }
  93. int _DkVirtualMemoryProtect (void * addr, uint64_t size, int prot)
  94. {
  95. static struct atomic_int at_cnt = {.counter = 0};
  96. if (atomic_cmpxchg(&at_cnt, 0, 1) == 0)
  97. SGX_DBG(DBG_M, "[Warning] DkVirtualMemoryProtect (0x%p, %lu, %d) is unimplemented",
  98. addr, size, prot);
  99. return 0;
  100. }
  101. unsigned long _DkMemoryQuota (void)
  102. {
  103. return pal_sec.heap_max - pal_sec.heap_min;
  104. }
  105. extern struct atomic_int alloced_pages;
  106. extern unsigned int pagesz;
  107. unsigned long _DkMemoryAvailableQuota (void)
  108. {
  109. return (pal_sec.heap_max - pal_sec.heap_min) -
  110. atomic_read(&alloced_pages) * pagesz;
  111. }