shim_malloc.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * shim_malloc.c
  17. *
  18. * This file implements page allocation for the library OS-internal SLAB
  19. * memory allocator. The slab allocator is in Pal/lib/slabmgr.h.
  20. *
  21. * When existing slabs are not sufficient, or a large (4k or greater)
  22. * allocation is requested, it ends up here (__system_alloc and __system_free).
  23. *
  24. * There are two modes this file executes in: early initialization (before
  25. * VMAs are available), and post-initialization.
  26. *
  27. * Before VMAs are available, allocations are tracked in the shim_heap_areas
  28. * array.
  29. *
  30. * Once VMAs initialized, the contents of shim_heap_areas are added to the VMA
  31. * list. In order to reduce the risk of virtual address collisions, the VMA
  32. * for the shim_heap_area is never removed, but the pages themselves are
  33. * freed. This approach effectively reserves part of the address space for
  34. * initialization-time bookkeeping.
  35. *
  36. * After initialization, all allocations and frees just call
  37. * DkVirtualMemoryAlloc and DkVirtualMemory Free, and add/remove VMAs for the
  38. * results.
  39. */
  40. #include <shim_internal.h>
  41. #include <shim_utils.h>
  42. #include <shim_profile.h>
  43. #include <shim_checkpoint.h>
  44. #include <shim_vma.h>
  45. #include <pal.h>
  46. #include <pal_debug.h>
  47. #include <asm/mman.h>
  48. static LOCKTYPE slab_mgr_lock;
  49. #define system_lock() lock(slab_mgr_lock)
  50. #define system_unlock() unlock(slab_mgr_lock)
  51. #define PAGE_SIZE allocsize
  52. #ifdef SLAB_DEBUG_TRACE
  53. # define SLAB_DEBUG
  54. #endif
  55. #define SLAB_CANARY
  56. #define STARTUP_SIZE 4
  57. #include <slabmgr.h>
  58. static SLAB_MGR slab_mgr = NULL;
  59. #define MIN_SHIM_HEAP_PAGES 64
  60. #define MAX_SHIM_HEAP_AREAS 32
  61. #define INIT_SHIM_HEAP 256 * allocsize
  62. static int vmas_initialized = 0;
  63. static struct shim_heap {
  64. void * start;
  65. void * current;
  66. void * end;
  67. } shim_heap_areas[MAX_SHIM_HEAP_AREAS];
  68. static LOCKTYPE shim_heap_lock;
  69. DEFINE_PROFILE_CATAGORY(memory, );
  70. static struct shim_heap * __alloc_enough_heap (size_t size)
  71. {
  72. struct shim_heap * heap = NULL, * first_empty = NULL, * smallest = NULL;
  73. size_t smallest_size = 0;
  74. for (int i = 0 ; i < MAX_SHIM_HEAP_AREAS ; i++)
  75. if (shim_heap_areas[i].start) {
  76. if (shim_heap_areas[i].end >= shim_heap_areas[i].current + size)
  77. return &shim_heap_areas[i];
  78. if (!smallest ||
  79. shim_heap_areas[i].end <=
  80. shim_heap_areas[i].current + smallest_size) {
  81. smallest = &shim_heap_areas[i];
  82. smallest_size = shim_heap_areas[i].end -
  83. shim_heap_areas[i].current;
  84. }
  85. } else {
  86. if (!first_empty)
  87. first_empty = &shim_heap_areas[i];
  88. }
  89. if (!heap) {
  90. size_t heap_size = MIN_SHIM_HEAP_PAGES * allocsize;
  91. void * start = NULL;
  92. heap = first_empty ? : smallest;
  93. assert(heap);
  94. while (size > heap_size)
  95. heap_size *= 2;
  96. if (!(start = (void *) DkVirtualMemoryAlloc(NULL, heap_size, 0,
  97. PAL_PROT_WRITE|PAL_PROT_READ)))
  98. return NULL;
  99. debug("allocate internal heap at %p - %p\n", start, start + heap_size);
  100. if (heap == smallest && heap->current != heap->end) {
  101. DkVirtualMemoryFree(heap->current, heap->end - heap->current);
  102. int flags = VMA_INTERNAL;
  103. unlock(shim_heap_lock);
  104. bkeep_munmap(heap->current, heap->end - heap->current, &flags);
  105. lock(shim_heap_lock);
  106. }
  107. heap->start = heap->current = start;
  108. heap->end = start + heap_size;
  109. unlock(shim_heap_lock);
  110. bkeep_mmap(start, heap_size, PROT_READ|PROT_WRITE,
  111. MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL, NULL, 0, NULL);
  112. lock(shim_heap_lock);
  113. }
  114. return heap;
  115. }
  116. void * __system_malloc (size_t size)
  117. {
  118. size_t alloc_size = ALIGN_UP(size);
  119. void *addr;
  120. lock(shim_heap_lock);
  121. if (vmas_initialized) {
  122. addr = (void *) DkVirtualMemoryAlloc(NULL, alloc_size, 0,
  123. PAL_PROT_WRITE|PAL_PROT_READ);
  124. bkeep_mmap(addr, alloc_size, PROT_READ|PROT_WRITE,
  125. MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL, NULL, 0, NULL);
  126. } else {
  127. struct shim_heap * heap = __alloc_enough_heap(alloc_size);
  128. if (!heap) {
  129. unlock(shim_heap_lock);
  130. return NULL;
  131. }
  132. addr = heap->current;
  133. heap->current += alloc_size;
  134. }
  135. unlock(shim_heap_lock);
  136. return addr;
  137. }
  138. void __system_free (void * addr, size_t size)
  139. {
  140. int in_reserved_area = 0;
  141. DkVirtualMemoryFree(addr, ALIGN_UP(size));
  142. int flags = VMA_INTERNAL;
  143. for (int i = 0 ; i < MAX_SHIM_HEAP_AREAS ; i++)
  144. if (shim_heap_areas[i].start) {
  145. /* Here we assume that any allocation from the
  146. * shim_heap_area is a strict inclusion. Allocations
  147. * cannot partially overlap.
  148. */
  149. if (addr >= shim_heap_areas[i].start
  150. && addr <= shim_heap_areas[i].end)
  151. in_reserved_area = 1;
  152. }
  153. if (! in_reserved_area)
  154. bkeep_munmap(addr, ALIGN_UP(size), &flags);
  155. }
  156. int init_heap (void)
  157. {
  158. create_lock(shim_heap_lock);
  159. void * start = (void *) DkVirtualMemoryAlloc(NULL, INIT_SHIM_HEAP, 0,
  160. PAL_PROT_WRITE|PAL_PROT_READ);
  161. if (!start)
  162. return -ENOMEM;
  163. debug("allocate internal heap at %p - %p\n", start,
  164. start + INIT_SHIM_HEAP);
  165. shim_heap_areas[0].start = shim_heap_areas[0].current = start;
  166. shim_heap_areas[0].end = start + INIT_SHIM_HEAP;
  167. return 0;
  168. }
  169. int bkeep_shim_heap (void)
  170. {
  171. lock(shim_heap_lock);
  172. for (int i = 0 ; i < MAX_SHIM_HEAP_AREAS ; i++)
  173. if (shim_heap_areas[i].start) {
  174. /* Add a VMA for the active region */
  175. bkeep_mmap(shim_heap_areas[i].start,
  176. shim_heap_areas[i].current - shim_heap_areas[i].start,
  177. PROT_READ|PROT_WRITE,
  178. MAP_PRIVATE|MAP_ANONYMOUS|VMA_INTERNAL, NULL, 0, NULL);
  179. /* Go ahead and free the reserved region */
  180. if (shim_heap_areas[i].current < shim_heap_areas[i].end) {
  181. DkVirtualMemoryFree(shim_heap_areas[i].current,
  182. ALIGN_UP(((long unsigned int) shim_heap_areas[i].end) - ((long unsigned int) shim_heap_areas[i].current)));
  183. shim_heap_areas[i].end = shim_heap_areas[i].current;
  184. }
  185. }
  186. vmas_initialized = 1;
  187. unlock(shim_heap_lock);
  188. return 0;
  189. }
  190. int init_slab (void)
  191. {
  192. create_lock(slab_mgr_lock);
  193. slab_mgr = create_slab_mgr();
  194. return 0;
  195. }
  196. extern_alias(init_slab);
  197. int reinit_slab (void)
  198. {
  199. if (slab_mgr) {
  200. destroy_slab_mgr(slab_mgr);
  201. slab_mgr = NULL;
  202. }
  203. return 0;
  204. }
  205. DEFINE_PROFILE_OCCURENCE(malloc_0, memory);
  206. DEFINE_PROFILE_OCCURENCE(malloc_1, memory);
  207. DEFINE_PROFILE_OCCURENCE(malloc_2, memory);
  208. DEFINE_PROFILE_OCCURENCE(malloc_3, memory);
  209. DEFINE_PROFILE_OCCURENCE(malloc_4, memory);
  210. DEFINE_PROFILE_OCCURENCE(malloc_5, memory);
  211. DEFINE_PROFILE_OCCURENCE(malloc_6, memory);
  212. DEFINE_PROFILE_OCCURENCE(malloc_7, memory);
  213. DEFINE_PROFILE_OCCURENCE(malloc_big, memory);
  214. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  215. void * __malloc_debug (size_t size, const char * file, int line)
  216. #else
  217. void * malloc (size_t size)
  218. #endif
  219. {
  220. #ifdef PROFILE
  221. int i;
  222. int level = -1;
  223. for (i = 0 ; i < SLAB_LEVEL ; i++)
  224. if (size < slab_levels[i]) {
  225. level = i;
  226. break;
  227. }
  228. switch(level) {
  229. case 0:
  230. INC_PROFILE_OCCURENCE(malloc_0);
  231. break;
  232. case 1:
  233. INC_PROFILE_OCCURENCE(malloc_1);
  234. break;
  235. case 2:
  236. INC_PROFILE_OCCURENCE(malloc_2);
  237. break;
  238. case 3:
  239. INC_PROFILE_OCCURENCE(malloc_3);
  240. break;
  241. case 4:
  242. INC_PROFILE_OCCURENCE(malloc_4);
  243. break;
  244. case 5:
  245. INC_PROFILE_OCCURENCE(malloc_5);
  246. break;
  247. case 6:
  248. INC_PROFILE_OCCURENCE(malloc_6);
  249. break;
  250. case 7:
  251. INC_PROFILE_OCCURENCE(malloc_7);
  252. break;
  253. case -1:
  254. INC_PROFILE_OCCURENCE(malloc_big);
  255. break;
  256. }
  257. #endif
  258. #ifdef SLAB_DEBUG_TRACE
  259. void * mem = slab_alloc_debug(slab_mgr, size, file, line);
  260. #else
  261. void * mem = slab_alloc(slab_mgr, size);
  262. #endif
  263. #ifdef SLAB_DEBUG_PRINT
  264. debug("malloc(%d) = %p (%s:%d)\n", size, mem, file, line);
  265. #endif
  266. return mem;
  267. }
  268. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLAB_DEBUG_TRACE)
  269. extern_alias(malloc);
  270. #endif
  271. void * calloc (size_t nmemb, size_t size)
  272. {
  273. size_t total = nmemb * size;
  274. void *ptr = malloc(total);
  275. if (ptr)
  276. memset(ptr, 0, total);
  277. return ptr;
  278. }
  279. extern_alias(calloc);
  280. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  281. void * __remalloc_debug (const void * mem, size_t size,
  282. const char * file, int line)
  283. #else
  284. void * remalloc (const void * mem, size_t size)
  285. #endif
  286. {
  287. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  288. void * buff = __malloc_debug(size, file, line);
  289. #else
  290. void * buff = malloc(size);
  291. #endif
  292. if (buff)
  293. memcpy(buff, mem, size);
  294. return buff;
  295. }
  296. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLABD_DEBUG_TRACE)
  297. extern_alias(remalloc);
  298. #endif
  299. DEFINE_PROFILE_OCCURENCE(free_0, memory);
  300. DEFINE_PROFILE_OCCURENCE(free_1, memory);
  301. DEFINE_PROFILE_OCCURENCE(free_2, memory);
  302. DEFINE_PROFILE_OCCURENCE(free_3, memory);
  303. DEFINE_PROFILE_OCCURENCE(free_4, memory);
  304. DEFINE_PROFILE_OCCURENCE(free_5, memory);
  305. DEFINE_PROFILE_OCCURENCE(free_6, memory);
  306. DEFINE_PROFILE_OCCURENCE(free_7, memory);
  307. DEFINE_PROFILE_OCCURENCE(free_big, memory);
  308. DEFINE_PROFILE_OCCURENCE(free_migrated, memory);
  309. #if defined(SLAB_DEBUG_PRINT) || defined(SLABD_DEBUG_TRACE)
  310. void __free_debug (void * mem, const char * file, int line)
  311. #else
  312. void free (void * mem)
  313. #endif
  314. {
  315. if (MEMORY_MIGRATED(mem)) {
  316. INC_PROFILE_OCCURENCE(free_migrated);
  317. return;
  318. }
  319. #ifdef PROFILE
  320. int level = RAW_TO_LEVEL(mem);
  321. switch(level) {
  322. case 0:
  323. INC_PROFILE_OCCURENCE(free_0);
  324. break;
  325. case 1:
  326. INC_PROFILE_OCCURENCE(free_1);
  327. break;
  328. case 2:
  329. INC_PROFILE_OCCURENCE(free_2);
  330. break;
  331. case 3:
  332. INC_PROFILE_OCCURENCE(free_3);
  333. break;
  334. case 4:
  335. INC_PROFILE_OCCURENCE(free_4);
  336. break;
  337. case 5:
  338. INC_PROFILE_OCCURENCE(free_5);
  339. break;
  340. case 6:
  341. INC_PROFILE_OCCURENCE(free_6);
  342. break;
  343. case 7:
  344. INC_PROFILE_OCCURENCE(free_7);
  345. break;
  346. case -1:
  347. case 255:
  348. INC_PROFILE_OCCURENCE(free_big);
  349. break;
  350. }
  351. #endif
  352. #ifdef SLAB_DEBUG_PRINT
  353. debug("free(%p) (%s:%d)\n", mem, file, line);
  354. #endif
  355. #ifdef SLAB_DEBUG_TRACE
  356. slab_free_debug(slab_mgr, mem, file, line);
  357. #else
  358. slab_free(slab_mgr, mem);
  359. #endif
  360. }
  361. #if !defined(SLAB_DEBUG_PRINT) && !defined(SLABD_DEBUG_TRACE)
  362. extern_alias(free);
  363. #endif