shim_malloc.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_malloc.c
  17. *
  18. * This file implements page allocation for the library OS-internal SLAB
  19. * memory allocator. The slab allocator is in Pal/lib/slabmgr.h.
  20. *
  21. * When existing slabs are not sufficient, or a large (4k or greater)
  22. * allocation is requested, it ends up here (__system_alloc and __system_free).
  23. *
  24. * There are two modes this file executes in: early initialization (before
  25. * VMAs are available), and post-initialization.
  26. *
  27. * Before VMAs are available, allocations are tracked in the shim_heap_areas
  28. * array.
  29. *
  30. * Once VMAs initialized, the contents of shim_heap_areas are added to the VMA
  31. * list. In order to reduce the risk of virtual address collisions, the VMA
  32. * for the shim_heap_area is never removed, but the pages themselves are
  33. * freed. This approach effectively reserves part of the address space for
  34. * initialization-time bookkeeping.
  35. *
  36. * After initialization, all allocations and frees just call
  37. * DkVirtualMemoryAlloc and DkVirtualMemory Free, and add/remove VMAs for the
  38. * results.
  39. */
  40. #include <shim_internal.h>
  41. #include <shim_utils.h>
  42. #include <shim_profile.h>
  43. #include <shim_checkpoint.h>
  44. #include <shim_vma.h>
  45. #include <pal.h>
  46. #include <pal_debug.h>
  47. #include <asm/mman.h>
  48. static LOCKTYPE slab_mgr_lock;
  49. #define system_lock() lock(slab_mgr_lock)
  50. #define system_unlock() unlock(slab_mgr_lock)
  51. #define PAGE_SIZE allocsize
  52. #ifdef SLAB_DEBUG_TRACE
  53. # define SLAB_DEBUG
  54. #endif
  55. #define SLAB_CANARY
  56. #define STARTUP_SIZE 4
  57. #include <slabmgr.h>
  58. static SLAB_MGR slab_mgr = NULL;
  59. #define MIN_SHIM_HEAP_PAGES 64
  60. #define MAX_SHIM_HEAP_AREAS 32
  61. #define INIT_SHIM_HEAP 256 * allocsize
  62. static int vmas_initialized = 0;
  63. static struct shim_heap {
  64. void * start;
  65. void * current;
  66. void * end;
  67. } shim_heap_areas[MAX_SHIM_HEAP_AREAS];
  68. static LOCKTYPE shim_heap_lock;
  69. DEFINE_PROFILE_CATAGORY(memory, );
  70. static struct shim_heap * __alloc_enough_heap (size_t size)
  71. {
  72. struct shim_heap * heap = NULL, * first_empty = NULL, * smallest = NULL;
  73. size_t smallest_size = 0;
  74. for (int i = 0 ; i < MAX_SHIM_HEAP_AREAS ; i++)
  75. if (shim_heap_areas[i].start) {
  76. if (shim_heap_areas[i].end >= shim_heap_areas[i].current + size)
  77. return &shim_heap_areas[i];
  78. if (!smallest ||
  79. shim_heap_areas[i].end <=
  80. shim_heap_areas[i].current + smallest_size) {
  81. smallest = &shim_heap_areas[i];
  82. smallest_size = shim_heap_areas[i].end -
  83. shim_heap_areas[i].current;
  84. }
  85. } else {
  86. if (!first_empty)
  87. first_empty = &shim_heap_areas[i];
  88. }
  89. if (!heap) {
  90. size_t heap_size = MIN_SHIM_HEAP_PAGES * allocsize;
  91. void * start = NULL;
  92. heap = first_empty ? : smallest;
  93. assert(heap);
  94. while (size > heap_size)
  95. heap_size *= 2;
  96. if (!(start = (void *) DkVirtualMemoryAlloc(NULL, heap_size, 0,
  97. PAL_PROT_WRITE|PAL_PROT_READ)))
  98. return NULL;
  99. debug("allocate internal heap at %p - %p\n", start, start + heap_size);
  100. if (heap == smallest && heap->current != heap->end) {
  101. DkVirtualMemoryFree(heap->current, heap->end - heap->current);
  102. int flags = VMA_INTERNAL;
  103. unlock(shim_heap_lock);
  104. bkeep_munmap(heap->current, heap->end - heap->current, flags);
  105. lock(shim_heap_lock);
  106. }
  107. heap->start = heap->current = start;
  108. heap->end = start + heap_size;
  109. unlock(shim_heap_lock);
  110. bkeep_mmap(start, heap_size, PROT_READ|PROT_WRITE,
  111. MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL, NULL, 0, NULL);
  112. lock(shim_heap_lock);
  113. }
  114. return heap;
  115. }
  116. /* Returns NULL on failure */
  117. void * __system_malloc (size_t size)
  118. {
  119. size_t alloc_size = ALIGN_UP(size);
  120. void *addr, *addr_new;
  121. lock(shim_heap_lock);
  122. if (vmas_initialized) {
  123. /* If vmas are initialized, we need to request a free address range
  124. * using get_unmapped_vma(). The current mmap code uses this function
  125. * to synchronize all address allocation, via a "publication"
  126. * pattern. It is not safe to just call DkVirtualMemoryAlloc directly
  127. * without reserving the vma region first.
  128. */
  129. int flags = MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL;
  130. addr = get_unmapped_vma(alloc_size, flags);
  131. if (!addr) {
  132. unlock(shim_heap_lock);
  133. return NULL;
  134. }
  135. addr_new = (void *) DkVirtualMemoryAlloc(addr, alloc_size, 0,
  136. PAL_PROT_WRITE|PAL_PROT_READ);
  137. if (!addr_new) {
  138. bkeep_munmap(addr, alloc_size, flags);
  139. unlock(shim_heap_lock);
  140. return NULL;
  141. }
  142. assert (addr == addr_new);
  143. bkeep_mmap(addr, alloc_size, PROT_READ|PROT_WRITE,
  144. flags, NULL, 0, NULL);
  145. } else {
  146. struct shim_heap * heap = __alloc_enough_heap(alloc_size);
  147. if (!heap) {
  148. unlock(shim_heap_lock);
  149. return NULL;
  150. }
  151. addr = heap->current;
  152. heap->current += alloc_size;
  153. }
  154. unlock(shim_heap_lock);
  155. return addr;
  156. }
  157. void __system_free (void * addr, size_t size)
  158. {
  159. int in_reserved_area = 0;
  160. DkVirtualMemoryFree(addr, ALIGN_UP(size));
  161. int flags = VMA_INTERNAL;
  162. for (int i = 0 ; i < MAX_SHIM_HEAP_AREAS ; i++)
  163. if (shim_heap_areas[i].start) {
  164. /* Here we assume that any allocation from the
  165. * shim_heap_area is a strict inclusion. Allocations
  166. * cannot partially overlap.
  167. */
  168. if (addr >= shim_heap_areas[i].start
  169. && addr <= shim_heap_areas[i].end)
  170. in_reserved_area = 1;
  171. }
  172. if (! in_reserved_area)
  173. bkeep_munmap(addr, ALIGN_UP(size), flags);
  174. }
  175. int init_heap (void)
  176. {
  177. create_lock(shim_heap_lock);
  178. void * start = (void *) DkVirtualMemoryAlloc(NULL, INIT_SHIM_HEAP, 0,
  179. PAL_PROT_WRITE|PAL_PROT_READ);
  180. if (!start)
  181. return -ENOMEM;
  182. debug("allocate internal heap at %p - %p\n", start,
  183. start + INIT_SHIM_HEAP);
  184. shim_heap_areas[0].start = shim_heap_areas[0].current = start;
  185. shim_heap_areas[0].end = start + INIT_SHIM_HEAP;
  186. return 0;
  187. }
  188. int bkeep_shim_heap (void)
  189. {
  190. lock(shim_heap_lock);
  191. for (int i = 0 ; i < MAX_SHIM_HEAP_AREAS ; i++)
  192. if (shim_heap_areas[i].start) {
  193. /* Add a VMA for the active region */
  194. bkeep_mmap(shim_heap_areas[i].start,
  195. shim_heap_areas[i].current - shim_heap_areas[i].start,
  196. PROT_READ|PROT_WRITE,
  197. MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL, NULL, 0, NULL);
  198. /* Go ahead and free the reserved region */
  199. if (shim_heap_areas[i].current < shim_heap_areas[i].end) {
  200. DkVirtualMemoryFree(shim_heap_areas[i].current,
  201. ALIGN_UP(((long unsigned int) shim_heap_areas[i].end) - ((long unsigned int) shim_heap_areas[i].current)));
  202. shim_heap_areas[i].end = shim_heap_areas[i].current;
  203. }
  204. }
  205. vmas_initialized = 1;
  206. unlock(shim_heap_lock);
  207. return 0;
  208. }
  209. int init_slab (void)
  210. {
  211. create_lock(slab_mgr_lock);
  212. slab_mgr = create_slab_mgr();
  213. return 0;
  214. }
  215. extern_alias(init_slab);
  216. int reinit_slab (void)
  217. {
  218. if (slab_mgr) {
  219. destroy_slab_mgr(slab_mgr);
  220. slab_mgr = NULL;
  221. }
  222. return 0;
  223. }
  224. DEFINE_PROFILE_OCCURENCE(malloc_0, memory);
  225. DEFINE_PROFILE_OCCURENCE(malloc_1, memory);
  226. DEFINE_PROFILE_OCCURENCE(malloc_2, memory);
  227. DEFINE_PROFILE_OCCURENCE(malloc_3, memory);
  228. DEFINE_PROFILE_OCCURENCE(malloc_4, memory);
  229. DEFINE_PROFILE_OCCURENCE(malloc_5, memory);
  230. DEFINE_PROFILE_OCCURENCE(malloc_6, memory);
  231. DEFINE_PROFILE_OCCURENCE(malloc_7, memory);
  232. DEFINE_PROFILE_OCCURENCE(malloc_big, memory);
  233. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  234. void * __malloc_debug (size_t size, const char * file, int line)
  235. #else
  236. void * malloc (size_t size)
  237. #endif
  238. {
  239. #ifdef PROFILE
  240. int i;
  241. int level = -1;
  242. for (i = 0 ; i < SLAB_LEVEL ; i++)
  243. if (size < slab_levels[i]) {
  244. level = i;
  245. break;
  246. }
  247. switch(level) {
  248. case 0:
  249. INC_PROFILE_OCCURENCE(malloc_0);
  250. break;
  251. case 1:
  252. INC_PROFILE_OCCURENCE(malloc_1);
  253. break;
  254. case 2:
  255. INC_PROFILE_OCCURENCE(malloc_2);
  256. break;
  257. case 3:
  258. INC_PROFILE_OCCURENCE(malloc_3);
  259. break;
  260. case 4:
  261. INC_PROFILE_OCCURENCE(malloc_4);
  262. break;
  263. case 5:
  264. INC_PROFILE_OCCURENCE(malloc_5);
  265. break;
  266. case 6:
  267. INC_PROFILE_OCCURENCE(malloc_6);
  268. break;
  269. case 7:
  270. INC_PROFILE_OCCURENCE(malloc_7);
  271. break;
  272. case -1:
  273. INC_PROFILE_OCCURENCE(malloc_big);
  274. break;
  275. }
  276. #endif
  277. #ifdef SLAB_DEBUG_TRACE
  278. void * mem = slab_alloc_debug(slab_mgr, size, file, line);
  279. #else
  280. void * mem = slab_alloc(slab_mgr, size);
  281. #endif
  282. #ifdef SLAB_DEBUG_PRINT
  283. debug("malloc(%d) = %p (%s:%d)\n", size, mem, file, line);
  284. #endif
  285. return mem;
  286. }
  287. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLAB_DEBUG_TRACE)
  288. extern_alias(malloc);
  289. #endif
  290. void * calloc (size_t nmemb, size_t size)
  291. {
  292. // This overflow checking is not a UB, because the operands are unsigned.
  293. size_t total = nmemb * size;
  294. if (total / size != nmemb)
  295. return NULL;
  296. void *ptr = malloc(total);
  297. if (ptr)
  298. memset(ptr, 0, total);
  299. return ptr;
  300. }
  301. extern_alias(calloc);
  302. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  303. void * __remalloc_debug (const void * mem, size_t size,
  304. const char * file, int line)
  305. #else
  306. void * remalloc (const void * mem, size_t size)
  307. #endif
  308. {
  309. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  310. void * buff = __malloc_debug(size, file, line);
  311. #else
  312. void * buff = malloc(size);
  313. #endif
  314. if (buff)
  315. memcpy(buff, mem, size);
  316. return buff;
  317. }
  318. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLABD_DEBUG_TRACE)
  319. extern_alias(remalloc);
  320. #endif
  321. DEFINE_PROFILE_OCCURENCE(free_0, memory);
  322. DEFINE_PROFILE_OCCURENCE(free_1, memory);
  323. DEFINE_PROFILE_OCCURENCE(free_2, memory);
  324. DEFINE_PROFILE_OCCURENCE(free_3, memory);
  325. DEFINE_PROFILE_OCCURENCE(free_4, memory);
  326. DEFINE_PROFILE_OCCURENCE(free_5, memory);
  327. DEFINE_PROFILE_OCCURENCE(free_6, memory);
  328. DEFINE_PROFILE_OCCURENCE(free_7, memory);
  329. DEFINE_PROFILE_OCCURENCE(free_big, memory);
  330. DEFINE_PROFILE_OCCURENCE(free_migrated, memory);
  331. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  332. void __free_debug (void * mem, const char * file, int line)
  333. #else
  334. void free (void * mem)
  335. #endif
  336. {
  337. if (MEMORY_MIGRATED(mem)) {
  338. INC_PROFILE_OCCURENCE(free_migrated);
  339. return;
  340. }
  341. #ifdef PROFILE
  342. int level = RAW_TO_LEVEL(mem);
  343. switch(level) {
  344. case 0:
  345. INC_PROFILE_OCCURENCE(free_0);
  346. break;
  347. case 1:
  348. INC_PROFILE_OCCURENCE(free_1);
  349. break;
  350. case 2:
  351. INC_PROFILE_OCCURENCE(free_2);
  352. break;
  353. case 3:
  354. INC_PROFILE_OCCURENCE(free_3);
  355. break;
  356. case 4:
  357. INC_PROFILE_OCCURENCE(free_4);
  358. break;
  359. case 5:
  360. INC_PROFILE_OCCURENCE(free_5);
  361. break;
  362. case 6:
  363. INC_PROFILE_OCCURENCE(free_6);
  364. break;
  365. case 7:
  366. INC_PROFILE_OCCURENCE(free_7);
  367. break;
  368. case -1:
  369. case 255:
  370. INC_PROFILE_OCCURENCE(free_big);
  371. break;
  372. }
  373. #endif
  374. #ifdef SLAB_DEBUG_PRINT
  375. debug("free(%p) (%s:%d)\n", mem, file, line);
  376. #endif
  377. #ifdef SLAB_DEBUG_TRACE
  378. slab_free_debug(slab_mgr, mem, file, line);
  379. #else
  380. slab_free(slab_mgr, mem);
  381. #endif
  382. }
  383. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLABD_DEBUG_TRACE)
  384. extern_alias(free);
  385. #endif