shim_malloc.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. /* Copyright (C) 2014 Stony Brook University
  2. This file is part of Graphene Library OS.
  3. Graphene Library OS is free software: you can redistribute it and/or
  4. modify it under the terms of the GNU Lesser General Public License
  5. as published by the Free Software Foundation, either version 3 of the
  6. License, or (at your option) any later version.
  7. Graphene Library OS is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU Lesser General Public License for more details.
  11. You should have received a copy of the GNU Lesser General Public License
  12. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  13. /*
  14. * shim_malloc.c
  15. *
  16. * This file implements page allocation for the library OS-internal SLAB
  17. * memory allocator. The slab allocator is in Pal/lib/slabmgr.h.
  18. *
  19. * When existing slabs are not sufficient, or a large (4k or greater)
  20. * allocation is requested, it ends up here (__system_alloc and __system_free).
  21. */
  22. #include <asm/mman.h>
  23. #include <pal.h>
  24. #include <pal_debug.h>
  25. #include <shim_checkpoint.h>
  26. #include <shim_internal.h>
  27. #include <shim_profile.h>
  28. #include <shim_utils.h>
  29. #include <shim_vma.h>
  30. static struct shim_lock slab_mgr_lock;
  31. #define SYSTEM_LOCK() lock(&slab_mgr_lock)
  32. #define SYSTEM_UNLOCK() unlock(&slab_mgr_lock)
  33. #define SYSTEM_LOCKED() locked(&slab_mgr_lock)
  34. #ifdef SLAB_DEBUG_TRACE
  35. #define SLAB_DEBUG
  36. #endif
  37. #define SLAB_CANARY
  38. #define STARTUP_SIZE 16
  39. #include <slabmgr.h>
  40. static SLAB_MGR slab_mgr = NULL;
  41. DEFINE_PROFILE_CATEGORY(memory, );
  42. /* Returns NULL on failure */
  43. void* __system_malloc(size_t size) {
  44. size_t alloc_size = ALLOC_ALIGN_UP(size);
  45. void* addr;
  46. void* ret_addr;
  47. int flags = MAP_PRIVATE | MAP_ANONYMOUS | VMA_INTERNAL;
  48. /*
  49. * If vmas are initialized, we need to request a free address range
  50. * using bkeep_unmapped_any(). The current mmap code uses this function
  51. * to synchronize all address allocation, via a "publication"
  52. * pattern. It is not safe to just call DkVirtualMemoryAlloc directly
  53. * without reserving the vma region first.
  54. */
  55. addr = bkeep_unmapped_any(alloc_size, PROT_READ | PROT_WRITE, flags, 0, "slab");
  56. if (!addr)
  57. return NULL;
  58. do {
  59. ret_addr = DkVirtualMemoryAlloc(addr, alloc_size, 0, PAL_PROT_WRITE | PAL_PROT_READ);
  60. if (!ret_addr) {
  61. /* If the allocation is interrupted by signal, try to handle the
  62. * signal and then retry the allocation. */
  63. if (PAL_NATIVE_ERRNO == PAL_ERROR_INTERRUPTED) {
  64. handle_signal();
  65. continue;
  66. }
  67. debug("failed to allocate memory (%ld)\n", -PAL_ERRNO);
  68. bkeep_munmap(addr, alloc_size, flags);
  69. return NULL;
  70. }
  71. } while (!ret_addr);
  72. assert(addr == ret_addr);
  73. return addr;
  74. }
  75. void __system_free(void* addr, size_t size) {
  76. DkVirtualMemoryFree(addr, ALLOC_ALIGN_UP(size));
  77. if (bkeep_munmap(addr, ALLOC_ALIGN_UP(size), VMA_INTERNAL) < 0)
  78. BUG();
  79. }
  80. int init_slab(void) {
  81. if (!create_lock(&slab_mgr_lock)) {
  82. return -ENOMEM;
  83. }
  84. slab_mgr = create_slab_mgr();
  85. if (!slab_mgr) {
  86. return -ENOMEM;
  87. }
  88. return 0;
  89. }
  90. EXTERN_ALIAS(init_slab);
  91. int reinit_slab(void) {
  92. if (slab_mgr) {
  93. destroy_slab_mgr(slab_mgr);
  94. slab_mgr = NULL;
  95. }
  96. return 0;
  97. }
  98. DEFINE_PROFILE_OCCURENCE(malloc_0, memory);
  99. DEFINE_PROFILE_OCCURENCE(malloc_1, memory);
  100. DEFINE_PROFILE_OCCURENCE(malloc_2, memory);
  101. DEFINE_PROFILE_OCCURENCE(malloc_3, memory);
  102. DEFINE_PROFILE_OCCURENCE(malloc_4, memory);
  103. DEFINE_PROFILE_OCCURENCE(malloc_5, memory);
  104. DEFINE_PROFILE_OCCURENCE(malloc_6, memory);
  105. DEFINE_PROFILE_OCCURENCE(malloc_7, memory);
  106. DEFINE_PROFILE_OCCURENCE(malloc_big, memory);
  107. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  108. void* __malloc_debug(size_t size, const char* file, int line)
  109. #else
  110. void* malloc(size_t size)
  111. #endif
  112. {
  113. #ifdef PROFILE
  114. int level = -1;
  115. for (size_t i = 0; i < SLAB_LEVEL; i++)
  116. if (size < slab_levels[i]) {
  117. level = i;
  118. break;
  119. }
  120. switch (level) {
  121. case 0:
  122. INC_PROFILE_OCCURENCE(malloc_0);
  123. break;
  124. case 1:
  125. INC_PROFILE_OCCURENCE(malloc_1);
  126. break;
  127. case 2:
  128. INC_PROFILE_OCCURENCE(malloc_2);
  129. break;
  130. case 3:
  131. INC_PROFILE_OCCURENCE(malloc_3);
  132. break;
  133. case 4:
  134. INC_PROFILE_OCCURENCE(malloc_4);
  135. break;
  136. case 5:
  137. INC_PROFILE_OCCURENCE(malloc_5);
  138. break;
  139. case 6:
  140. INC_PROFILE_OCCURENCE(malloc_6);
  141. break;
  142. case 7:
  143. INC_PROFILE_OCCURENCE(malloc_7);
  144. break;
  145. case -1:
  146. INC_PROFILE_OCCURENCE(malloc_big);
  147. break;
  148. }
  149. #endif
  150. #ifdef SLAB_DEBUG_TRACE
  151. void* mem = slab_alloc_debug(slab_mgr, size, file, line);
  152. #else
  153. void* mem = slab_alloc(slab_mgr, size);
  154. #endif
  155. if (!mem) {
  156. /*
  157. * Normally, the library OS should not run out of memory.
  158. * If malloc() failed internally, we cannot handle the
  159. * condition and must terminate the current process.
  160. */
  161. SYS_PRINTF("******** Out-of-memory in library OS ********\n");
  162. __abort();
  163. }
  164. #ifdef SLAB_DEBUG_PRINT
  165. debug("malloc(%d) = %p (%s:%d)\n", size, mem, file, line);
  166. #endif
  167. return mem;
  168. }
  169. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLAB_DEBUG_TRACE)
  170. EXTERN_ALIAS(malloc);
  171. #endif
  172. void* calloc(size_t nmemb, size_t size) {
  173. // This overflow checking is not a UB, because the operands are unsigned.
  174. size_t total = nmemb * size;
  175. if (total / size != nmemb)
  176. return NULL;
  177. void* ptr = malloc(total);
  178. if (ptr)
  179. memset(ptr, 0, total);
  180. return ptr;
  181. }
  182. EXTERN_ALIAS(calloc);
  183. #if 0 /* Temporarily disabling this code */
  184. void * realloc(void * ptr, size_t new_size)
  185. {
  186. /* TODO: We can't deal with this case right now */
  187. assert(!memory_migrated(ptr));
  188. size_t old_size = slab_get_buf_size(slab_mgr, ptr);
  189. /*
  190. * TODO: this realloc() implementation follows the GLIBC design, which
  191. * will avoid reallocation when the buffer is large enough. Potentially
  192. * this design can cause memory draining if user resizes an extremely
  193. * large object to much smaller.
  194. */
  195. if (old_size >= new_size)
  196. return ptr;
  197. void * new_buf = malloc(new_size);
  198. if (!new_buf)
  199. return NULL;
  200. memcpy(new_buf, ptr, old_size);
  201. /* realloc() does not zero the rest of the object */
  202. free(ptr);
  203. return new_buf;
  204. }
  205. EXTERN_ALIAS(realloc);
  206. #endif
  207. // Copies data from `mem` to a newly allocated buffer of a specified size.
  208. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  209. void* __malloc_copy_debug(const void* mem, size_t size, const char* file, int line)
  210. #else
  211. void* malloc_copy(const void* mem, size_t size)
  212. #endif
  213. {
  214. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  215. void* buff = __malloc_debug(size, file, line);
  216. #else
  217. void* buff = malloc(size);
  218. #endif
  219. if (buff)
  220. memcpy(buff, mem, size);
  221. return buff;
  222. }
  223. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLABD_DEBUG_TRACE)
  224. EXTERN_ALIAS(malloc_copy);
  225. #endif
  226. DEFINE_PROFILE_OCCURENCE(free_0, memory);
  227. DEFINE_PROFILE_OCCURENCE(free_1, memory);
  228. DEFINE_PROFILE_OCCURENCE(free_2, memory);
  229. DEFINE_PROFILE_OCCURENCE(free_3, memory);
  230. DEFINE_PROFILE_OCCURENCE(free_4, memory);
  231. DEFINE_PROFILE_OCCURENCE(free_5, memory);
  232. DEFINE_PROFILE_OCCURENCE(free_6, memory);
  233. DEFINE_PROFILE_OCCURENCE(free_7, memory);
  234. DEFINE_PROFILE_OCCURENCE(free_big, memory);
  235. DEFINE_PROFILE_OCCURENCE(free_migrated, memory);
  236. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  237. void __free_debug(void* mem, const char* file, int line)
  238. #else
  239. void free(void* mem)
  240. #endif
  241. {
  242. if (!mem)
  243. return;
  244. if (memory_migrated(mem)) {
  245. INC_PROFILE_OCCURENCE(free_migrated);
  246. return;
  247. }
  248. #ifdef PROFILE
  249. int level = RAW_TO_LEVEL(mem);
  250. switch (level) {
  251. case 0:
  252. INC_PROFILE_OCCURENCE(free_0);
  253. break;
  254. case 1:
  255. INC_PROFILE_OCCURENCE(free_1);
  256. break;
  257. case 2:
  258. INC_PROFILE_OCCURENCE(free_2);
  259. break;
  260. case 3:
  261. INC_PROFILE_OCCURENCE(free_3);
  262. break;
  263. case 4:
  264. INC_PROFILE_OCCURENCE(free_4);
  265. break;
  266. case 5:
  267. INC_PROFILE_OCCURENCE(free_5);
  268. break;
  269. case 6:
  270. INC_PROFILE_OCCURENCE(free_6);
  271. break;
  272. case 7:
  273. INC_PROFILE_OCCURENCE(free_7);
  274. break;
  275. case -1:
  276. case 255:
  277. INC_PROFILE_OCCURENCE(free_big);
  278. break;
  279. }
  280. #endif
  281. #ifdef SLAB_DEBUG_PRINT
  282. debug("free(%p) (%s:%d)\n", mem, file, line);
  283. #endif
  284. #ifdef SLAB_DEBUG_TRACE
  285. slab_free_debug(slab_mgr, mem, file, line);
  286. #else
  287. slab_free(slab_mgr, mem);
  288. #endif
  289. }
  290. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLABD_DEBUG_TRACE)
  291. EXTERN_ALIAS(free);
  292. #endif