slabmgr.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468
  1. /* -*- mode:c; c-file-style:"k&r"; c-basic-offset: 4; tab-width:4; indent-tabs-mode:nil; mode:auto-fill; fill-column:78; -*- */
  2. /* vim: set ts=4 sw=4 et tw=78 fo=cqt wm=0: */
  3. /* Copyright (C) 2014 Stony Brook University
  4. This file is part of Graphene Library OS.
  5. Graphene Library OS is free software: you can redistribute it and/or
  6. modify it under the terms of the GNU Lesser General Public License
  7. as published by the Free Software Foundation, either version 3 of the
  8. License, or (at your option) any later version.
  9. Graphene Library OS is distributed in the hope that it will be useful,
  10. but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. GNU Lesser General Public License for more details.
  13. You should have received a copy of the GNU Lesser General Public License
  14. along with this program. If not, see <http://www.gnu.org/licenses/>. */
  15. /*
  16. * slabmgr.h
  17. *
  18. * This file contains implementation of SLAB (variable-size) memory allocator.
  19. */
  20. #ifndef SLABMGR_H
  21. #define SLABMGR_H
  22. #include "list.h"
  23. #include <pal_debug.h>
  24. #include <assert.h>
  25. #include <sys/mman.h>
  26. #ifndef system_malloc
  27. #error "macro \"void * system_malloc(int size)\" not declared"
  28. #endif
  29. #ifndef system_free
  30. #error "macro \"void * system_free(void * ptr, int size)\" not declared"
  31. #endif
  32. #ifndef system_lock
  33. #define system_lock() ({})
  34. #endif
  35. #ifndef system_unlock
  36. #define system_unlock() ({})
  37. #endif
  38. /* malloc is supposed to provide some kind of alignment guarantees, but
  39. * I can't find a specific reference to what that should be for x86_64.
  40. * The first link here is a reference to a technical report from Mozilla,
  41. * which seems to indicate that 64-bit platforms align return values to
  42. * 16-bytes. calloc and malloc provide the same alignment guarantees.
  43. * calloc additionally sets the memory to 0, which malloc is not required
  44. * to do.
  45. *
  46. * http://www.erahm.org/2016/03/24/minimum-alignment-of-allocation-across-platforms/
  47. * http://pubs.opengroup.org/onlinepubs/9699919799/functions/malloc.html
  48. */
  49. #define MIN_MALLOC_ALIGNMENT 16
  50. /* Slab objects need to be a multiple of 16 bytes to ensure proper address
  51. * alignment for malloc and calloc. */
  52. #define OBJ_PADDING 15
  53. #define LARGE_OBJ_PADDING 8
  54. /* Returns the smallest exact multiple of _y that is at least as large as _x.
  55. * In other words, returns _x if _x is a multiple of _y, otherwise rounds
  56. * _x up to be a multiple of _y.
  57. */
  58. #define ROUND_UP(_x, _y) ((((_x) + (_y) - 1) / (_y)) * (_y))
  59. DEFINE_LIST(slab_obj);
  60. typedef struct __attribute__((packed)) slab_obj {
  61. unsigned char level;
  62. unsigned char padding[OBJ_PADDING];
  63. union {
  64. LIST_TYPE(slab_obj) __list;
  65. unsigned char *raw;
  66. };
  67. } SLAB_OBJ_TYPE, * SLAB_OBJ;
  68. /* In order for slab elements to be 16-byte aligned, struct slab_area must
  69. * be a multiple of 16 bytes. TODO: Add compile time assertion that this
  70. * invariant is respected. */
  71. #define AREA_PADDING 12
  72. DEFINE_LIST(slab_area);
  73. typedef struct __attribute__((packed)) slab_area {
  74. LIST_TYPE(slab_area) __list;
  75. unsigned int size;
  76. unsigned char pad[AREA_PADDING];
  77. unsigned char raw[];
  78. } SLAB_AREA_TYPE, * SLAB_AREA;
  79. #ifdef SLAB_DEBUG
  80. struct slab_debug {
  81. struct {
  82. const char * file;
  83. int line;
  84. } alloc, free;
  85. };
  86. # define SLAB_DEBUG_SIZE sizeof(struct slab_debug)
  87. #else
  88. # define SLAB_DEBUG_SIZE 0
  89. #endif
  90. #ifdef SLAB_CANARY
  91. # define SLAB_CANARY_STRING 0xDEADBEEF
  92. # define SLAB_CANARY_SIZE sizeof(unsigned long)
  93. #else
  94. # define SLAB_CANARY_SIZE 0
  95. #endif
  96. #define SLAB_HDR_SIZE \
  97. ROUND_UP((sizeof(SLAB_OBJ_TYPE) - sizeof(LIST_TYPE(slab_obj)) + \
  98. SLAB_DEBUG_SIZE + SLAB_CANARY_SIZE), \
  99. MIN_MALLOC_ALIGNMENT)
  100. #ifndef SLAB_LEVEL
  101. #define SLAB_LEVEL 8
  102. #endif
  103. #ifndef SLAB_LEVEL_SIZES
  104. # define SLAB_LEVEL_SIZES 16, 32, 64, \
  105. 128 - SLAB_HDR_SIZE, \
  106. 256 - SLAB_HDR_SIZE, \
  107. 512 - SLAB_HDR_SIZE, \
  108. 1024 - SLAB_HDR_SIZE, \
  109. 2048 - SLAB_HDR_SIZE
  110. # define SLAB_LEVELS_SUM (4080 - SLAB_HDR_SIZE * 5)
  111. #else
  112. # ifndef SLAB_LEVELS_SUM
  113. # error "SALB_LEVELS_SUM not defined"
  114. # endif
  115. #endif
  116. static int slab_levels[SLAB_LEVEL] = { SLAB_LEVEL_SIZES };
  117. DEFINE_LISTP(slab_obj);
  118. DEFINE_LISTP(slab_area);
  119. typedef struct slab_mgr {
  120. LISTP_TYPE(slab_area) area_list[SLAB_LEVEL];
  121. LISTP_TYPE(slab_obj) free_list[SLAB_LEVEL];
  122. unsigned int size[SLAB_LEVEL];
  123. void * addr[SLAB_LEVEL], * addr_top[SLAB_LEVEL];
  124. } SLAB_MGR_TYPE, * SLAB_MGR;
  125. typedef struct __attribute__((packed)) large_mem_obj {
  126. // offset 0
  127. unsigned long size;
  128. unsigned char large_padding[LARGE_OBJ_PADDING];
  129. // offset 16
  130. unsigned char level;
  131. unsigned char padding[OBJ_PADDING];
  132. // offset 32
  133. unsigned char raw[];
  134. } LARGE_MEM_OBJ_TYPE, * LARGE_MEM_OBJ;
  135. #define OBJ_LEVEL(obj) ((obj)->level)
  136. #define OBJ_RAW(obj) (&(obj)->raw)
  137. #ifndef container_of
  138. #define container_of(ptr, type, field) ((type *)((char *)(ptr) - offsetof(type, field)))
  139. #endif
  140. #define RAW_TO_LEVEL(raw_ptr) \
  141. (*((unsigned char *) (raw_ptr) - OBJ_PADDING - 1))
  142. #define RAW_TO_OBJ(raw_ptr, type) container_of((raw_ptr), type, raw)
  143. #define __SUM_OBJ_SIZE(slab_size, size) \
  144. (((slab_size) + SLAB_HDR_SIZE) * (size))
  145. #define __MIN_MEM_SIZE() (sizeof(SLAB_AREA_TYPE))
  146. #define __MAX_MEM_SIZE(slab_size, size) \
  147. (__MIN_MEM_SIZE() + __SUM_OBJ_SIZE((slab_size), (size)))
  148. #define __INIT_SUM_OBJ_SIZE(size) \
  149. ((SLAB_LEVELS_SUM + SLAB_HDR_SIZE * SLAB_LEVEL) * (size))
  150. #define __INIT_MIN_MEM_SIZE() \
  151. (sizeof(SLAB_MGR_TYPE) + sizeof(SLAB_AREA_TYPE) * SLAB_LEVEL)
  152. #define __INIT_MAX_MEM_SIZE(size) \
  153. (__INIT_MIN_MEM_SIZE() + __INIT_SUM_OBJ_SIZE((size)))
  154. #ifdef PAGE_SIZE
  155. static inline int size_align_down(int slab_size, int size)
  156. {
  157. int s = __MAX_MEM_SIZE(slab_size, size);
  158. int p = s - (s & ~(PAGE_SIZE - 1));
  159. int o = __SUM_OBJ_SIZE(slab_size, 1);
  160. return size - p / o - (p % o ? 1 : 0);
  161. }
  162. static inline int size_align_up(int slab_size, int size)
  163. {
  164. int s = __MAX_MEM_SIZE(slab_size, size);
  165. int p = ((s + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) - s;
  166. int o = __SUM_OBJ_SIZE(slab_size, 1);
  167. return size + p / o;
  168. }
  169. static inline int init_align_down(int size)
  170. {
  171. int s = __INIT_MAX_MEM_SIZE(size);
  172. int p = s - (s & ~(PAGE_SIZE - 1));
  173. int o = __INIT_SUM_OBJ_SIZE(1);
  174. return size - p /o - (p % o ? 1 : 0);
  175. }
  176. static inline int init_size_align_up(int size)
  177. {
  178. int s = __INIT_MAX_MEM_SIZE(size);
  179. int p = ((s + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1)) - s;
  180. int o = __INIT_SUM_OBJ_SIZE(1);
  181. return size + p / o;
  182. }
  183. #endif /* PAGE_SIZE */
  184. #ifndef STARTUP_SIZE
  185. # define STARTUP_SIZE 16
  186. #endif
  187. static inline void __set_free_slab_area (SLAB_AREA area, SLAB_MGR mgr,
  188. int level)
  189. {
  190. int slab_size = slab_levels[level] + SLAB_HDR_SIZE;
  191. mgr->addr[level] = (void *) area->raw;
  192. mgr->addr_top[level] = (void *) area->raw + (area->size * slab_size);
  193. mgr->size[level] += area->size;
  194. }
  195. static inline SLAB_MGR create_slab_mgr (void)
  196. {
  197. #ifdef PAGE_SIZE
  198. int size = init_size_align_up(STARTUP_SIZE);
  199. #else
  200. int size = STARTUP_SIZE;
  201. #endif
  202. unsigned long mem;
  203. SLAB_AREA area;
  204. SLAB_MGR mgr;
  205. mem = (unsigned long) system_malloc(__INIT_MAX_MEM_SIZE(size));
  206. if (mem <= 0)
  207. return NULL;
  208. mgr = (SLAB_MGR) mem;
  209. void * addr = (void *) mgr + sizeof(SLAB_MGR_TYPE);
  210. int i;
  211. for (i = 0 ; i < SLAB_LEVEL ; i++) {
  212. area = (SLAB_AREA) addr;
  213. area->size = STARTUP_SIZE;
  214. INIT_LIST_HEAD(area, __list);
  215. INIT_LISTP(&mgr->area_list[i]);
  216. listp_add_tail(area, &mgr->area_list[i], __list);
  217. INIT_LISTP(&mgr->free_list[i]);
  218. mgr->size[i] = 0;
  219. __set_free_slab_area(area, mgr, i);
  220. addr += __MAX_MEM_SIZE(slab_levels[i], STARTUP_SIZE);
  221. }
  222. return mgr;
  223. }
  224. static inline void destroy_slab_mgr (SLAB_MGR mgr)
  225. {
  226. void * addr = (void *) mgr + sizeof(SLAB_MGR_TYPE);
  227. SLAB_AREA area, tmp, n;
  228. int i;
  229. for (i = 0 ; i < SLAB_LEVEL; i++) {
  230. area = (SLAB_AREA) addr;
  231. listp_for_each_entry_safe(tmp, n, &mgr->area_list[i], __list) {
  232. if (tmp != area)
  233. system_free(area,
  234. __MAX_MEM_SIZE(slab_levels[i], area->size));
  235. }
  236. addr += __MAX_MEM_SIZE(slab_levels[i], STARTUP_SIZE);
  237. }
  238. system_free(mgr, addr - (void *) mgr);
  239. }
  240. static inline SLAB_MGR enlarge_slab_mgr (SLAB_MGR mgr, int level)
  241. {
  242. SLAB_AREA area;
  243. int size;
  244. /* DEP 11/24/17: I don't see how this case is possible.
  245. * Either way, we should be consistent with whether to
  246. * return with system_lock held or not.
  247. * Commenting for now and replacing with an assert */
  248. /*if (level >= SLAB_LEVEL) {
  249. system_lock();
  250. goto out;
  251. }*/
  252. assert(level < SLAB_LEVEL);
  253. /* DEP 11/24/17: This strategy basically doubles a level's size
  254. * every time it grows. The assumption if we get this far is that
  255. * mgr->addr == mgr->top_addr */
  256. assert (mgr->addr[level] == mgr->addr_top[level]);
  257. size = mgr->size[level];
  258. area = (SLAB_AREA) system_malloc(__MAX_MEM_SIZE(slab_levels[level], size));
  259. if (area <= 0)
  260. return NULL;
  261. system_lock();
  262. area->size = size;
  263. INIT_LIST_HEAD(area, __list);
  264. listp_add(area, &mgr->area_list[level], __list);
  265. __set_free_slab_area(area, mgr, level);
  266. system_unlock();
  267. //out:
  268. return mgr;
  269. }
  270. static inline void * slab_alloc (SLAB_MGR mgr, int size)
  271. {
  272. SLAB_OBJ mobj;
  273. int i;
  274. int level = -1;
  275. for (i = 0 ; i < SLAB_LEVEL ; i++)
  276. if (size <= slab_levels[i]) {
  277. level = i;
  278. break;
  279. }
  280. if (level == -1) {
  281. LARGE_MEM_OBJ mem = (LARGE_MEM_OBJ)
  282. system_malloc(sizeof(LARGE_MEM_OBJ_TYPE) + size);
  283. if (!mem)
  284. return NULL;
  285. mem->size = size;
  286. OBJ_LEVEL(mem) = (unsigned char) -1;
  287. return OBJ_RAW(mem);
  288. }
  289. system_lock();
  290. assert(mgr->addr[level] <= mgr->addr_top[level]);
  291. if (mgr->addr[level] == mgr->addr_top[level] &&
  292. listp_empty(&mgr->free_list[level])) {
  293. system_unlock();
  294. enlarge_slab_mgr(mgr, level);
  295. system_lock();
  296. }
  297. if (!listp_empty(&mgr->free_list[level])) {
  298. mobj = listp_first_entry(&mgr->free_list[level], SLAB_OBJ_TYPE, __list);
  299. listp_del(mobj, &mgr->free_list[level], __list);
  300. } else {
  301. mobj = (void *) mgr->addr[level];
  302. mgr->addr[level] += slab_levels[level] + SLAB_HDR_SIZE;
  303. }
  304. assert(mgr->addr[level] <= mgr->addr_top[level]);
  305. OBJ_LEVEL(mobj) = level;
  306. system_unlock();
  307. #ifdef SLAB_CANARY
  308. unsigned long * m =
  309. (unsigned long *) ((void *) OBJ_RAW(mobj) + slab_levels[level]);
  310. *m = SLAB_CANARY_STRING;
  311. #endif
  312. return OBJ_RAW(mobj);
  313. }
  314. #ifdef SLAB_DEBUG
  315. static inline void * slab_alloc_debug (SLAB_MGR mgr, int size,
  316. const char * file, int line)
  317. {
  318. void * mem = slab_alloc(mgr, size);
  319. int i;
  320. int level = -1;
  321. for (i = 0 ; i < SLAB_LEVEL ; i++)
  322. if (size <= slab_levels[i]) {
  323. level = i;
  324. break;
  325. }
  326. if (level != -1) {
  327. struct slab_debug * debug =
  328. (struct slab_debug *) (mem + slab_levels[level] +
  329. SLAB_CANARY_SIZE);
  330. debug->alloc.file = file;
  331. debug->alloc.line = line;
  332. }
  333. return mem;
  334. }
  335. #endif
  336. static inline void slab_free (SLAB_MGR mgr, void * obj)
  337. {
  338. /* In a general purpose allocator, free of NULL is allowed (and is a
  339. * nop). We might want to enforce stricter rules for our allocator if
  340. * we're sure that no clients rely on being able to free NULL. */
  341. if (obj == NULL)
  342. return;
  343. unsigned char level = RAW_TO_LEVEL(obj);
  344. if (level == (unsigned char) -1) {
  345. LARGE_MEM_OBJ mem = RAW_TO_OBJ(obj, LARGE_MEM_OBJ_TYPE);
  346. system_free(mem, mem->size + sizeof(LARGE_MEM_OBJ_TYPE));
  347. return;
  348. }
  349. /* If this happens, either the heap is already corrupted, or someone's
  350. * freeing something that's wrong, which will most likely lead to heap
  351. * corruption. Either way, panic if this happens. TODO: this doesn't allow
  352. * us to detect cases where the heap headers have been zeroed, which
  353. * is a common type of heap corruption. We could make this case slightly
  354. * more likely to be detected by adding a non-zero offset to the level,
  355. * so a level of 0 in the header would no longer be a valid level. */
  356. if (level >= SLAB_LEVEL) {
  357. pal_printf("Heap corruption detected: invalid heap level %ud\n", level);
  358. assert(0); // panic
  359. }
  360. #ifdef SLAB_CANARY
  361. unsigned long * m = (unsigned long *) (obj + slab_levels[level]);
  362. assert((*m) == SLAB_CANARY_STRING);
  363. #endif
  364. SLAB_OBJ mobj = RAW_TO_OBJ(obj, SLAB_OBJ_TYPE);
  365. system_lock();
  366. INIT_LIST_HEAD(mobj, __list);
  367. listp_add_tail(mobj, &mgr->free_list[level], __list);
  368. system_unlock();
  369. }
  370. #ifdef SLAB_DEBUG
  371. static inline void slab_free_debug (SLAB_MGR mgr, void * obj,
  372. const char * file, int line)
  373. {
  374. if (obj == NULL)
  375. return;
  376. unsigned char level = RAW_TO_LEVEL(obj);
  377. if (level < SLAB_LEVEL) {
  378. struct slab_debug * debug =
  379. (struct slab_debug *) (obj + slab_levels[level] +
  380. SLAB_CANARY_SIZE);
  381. debug->free.file = file;
  382. debug->free.line = line;
  383. }
  384. slab_free(mgr, obj);
  385. }
  386. #endif
  387. #endif /* SLABMGR_H */