memarea.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /* Copyright (c) 2008-2015, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /** \file memarea.c
  4. * \brief Implementation for memarea_t, an allocator for allocating lots of
  5. * small objects that will be freed all at once.
  6. */
  7. #include "orconfig.h"
  8. #include <stdlib.h>
  9. #include "memarea.h"
  10. #include "util.h"
  11. #include "compat.h"
  12. #include "torlog.h"
  13. /** If true, we try to detect any attempts to write beyond the length of a
  14. * memarea. */
  15. #define USE_SENTINELS
  16. /** All returned pointers should be aligned to the nearest multiple of this
  17. * value. */
  18. #define MEMAREA_ALIGN SIZEOF_VOID_P
  19. #if MEMAREA_ALIGN == 4
  20. #define MEMAREA_ALIGN_MASK 3lu
  21. #elif MEMAREA_ALIGN == 8
  22. #define MEMAREA_ALIGN_MASK 7lu
  23. #else
  24. #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
  25. #endif
  26. #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER)
  27. #define USE_ALIGNED_ATTRIBUTE
  28. #define U_MEM mem
  29. #else
  30. #define U_MEM u.mem
  31. #endif
  32. #ifdef USE_SENTINELS
  33. /** Magic value that we stick at the end of a memarea so we can make sure
  34. * there are no run-off-the-end bugs. */
  35. #define SENTINEL_VAL 0x90806622u
  36. /** How many bytes per area do we devote to the sentinel? */
  37. #define SENTINEL_LEN sizeof(uint32_t)
  38. /** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
  39. * end, set those bytes. */
  40. #define SET_SENTINEL(chunk) \
  41. STMT_BEGIN \
  42. set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \
  43. STMT_END
  44. /** Assert that the sentinel on a memarea is set correctly. */
  45. #define CHECK_SENTINEL(chunk) \
  46. STMT_BEGIN \
  47. uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]); \
  48. tor_assert(sent_val == SENTINEL_VAL); \
  49. STMT_END
  50. #else
  51. #define SENTINEL_LEN 0
  52. #define SET_SENTINEL(chunk) STMT_NIL
  53. #define CHECK_SENTINEL(chunk) STMT_NIL
  54. #endif
  55. /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
  56. static INLINE void *
  57. realign_pointer(void *ptr)
  58. {
  59. uintptr_t x = (uintptr_t)ptr;
  60. x = (x+MEMAREA_ALIGN_MASK) & ~MEMAREA_ALIGN_MASK;
  61. /* Reinstate this if bug 930 ever reappears
  62. tor_assert(((void*)x) >= ptr);
  63. */
  64. return (void*)x;
  65. }
  66. /** Implements part of a memarea. New memory is carved off from chunk->mem in
  67. * increasing order until a request is too big, at which point a new chunk is
  68. * allocated. */
  69. typedef struct memarea_chunk_t {
  70. /** Next chunk in this area. Only kept around so we can free it. */
  71. struct memarea_chunk_t *next_chunk;
  72. size_t mem_size; /**< How much RAM is available in mem, total? */
  73. char *next_mem; /**< Next position in mem to allocate data at. If it's
  74. * equal to mem+mem_size, this chunk is full. */
  75. #ifdef USE_ALIGNED_ATTRIBUTE
  76. char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN)));
  77. #else
  78. union {
  79. char mem[1]; /**< Memory space in this chunk. */
  80. void *void_for_alignment_; /**< Dummy; used to make sure mem is aligned. */
  81. } u;
  82. #endif
  83. } memarea_chunk_t;
  84. /** How many bytes are needed for overhead before we get to the memory part
  85. * of a chunk? */
  86. #define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, U_MEM)
  87. /** What's the smallest that we'll allocate a chunk? */
  88. #define CHUNK_SIZE 4096
  89. /** A memarea_t is an allocation region for a set of small memory requests
  90. * that will all be freed at once. */
  91. struct memarea_t {
  92. memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */
  93. };
  94. /** How many chunks will we put into the freelist before freeing them? */
  95. #define MAX_FREELIST_LEN 4
  96. /** The number of memarea chunks currently in our freelist. */
  97. static int freelist_len=0;
  98. /** A linked list of unused memory area chunks. Used to prevent us from
  99. * spinning in malloc/free loops. */
  100. static memarea_chunk_t *freelist = NULL;
  101. /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
  102. static memarea_chunk_t *
  103. alloc_chunk(size_t sz, int freelist_ok)
  104. {
  105. tor_assert(sz < SIZE_T_CEILING);
  106. if (freelist && freelist_ok) {
  107. memarea_chunk_t *res = freelist;
  108. freelist = res->next_chunk;
  109. res->next_chunk = NULL;
  110. --freelist_len;
  111. CHECK_SENTINEL(res);
  112. return res;
  113. } else {
  114. size_t chunk_size = freelist_ok ? CHUNK_SIZE : sz;
  115. memarea_chunk_t *res;
  116. chunk_size += SENTINEL_LEN;
  117. res = tor_malloc(chunk_size);
  118. res->next_chunk = NULL;
  119. res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN;
  120. res->next_mem = res->U_MEM;
  121. tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN ==
  122. ((char*)res)+chunk_size);
  123. tor_assert(realign_pointer(res->next_mem) == res->next_mem);
  124. SET_SENTINEL(res);
  125. return res;
  126. }
  127. }
  128. /** Release <b>chunk</b> from a memarea, either by adding it to the freelist
  129. * or by freeing it if the freelist is already too big. */
  130. static void
  131. chunk_free_unchecked(memarea_chunk_t *chunk)
  132. {
  133. CHECK_SENTINEL(chunk);
  134. if (freelist_len < MAX_FREELIST_LEN) {
  135. ++freelist_len;
  136. chunk->next_chunk = freelist;
  137. freelist = chunk;
  138. chunk->next_mem = chunk->U_MEM;
  139. } else {
  140. tor_free(chunk);
  141. }
  142. }
  143. /** Allocate and return new memarea. */
  144. memarea_t *
  145. memarea_new(void)
  146. {
  147. memarea_t *head = tor_malloc(sizeof(memarea_t));
  148. head->first = alloc_chunk(CHUNK_SIZE, 1);
  149. return head;
  150. }
  151. /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
  152. * and friends for this area */
  153. void
  154. memarea_drop_all(memarea_t *area)
  155. {
  156. memarea_chunk_t *chunk, *next;
  157. for (chunk = area->first; chunk; chunk = next) {
  158. next = chunk->next_chunk;
  159. chunk_free_unchecked(chunk);
  160. }
  161. area->first = NULL; /*fail fast on */
  162. tor_free(area);
  163. }
  164. /** Forget about having allocated anything in <b>area</b>, and free some of
  165. * the backing storage associated with it, as appropriate. Invalidates all
  166. * pointers returned from memarea_alloc() for this area. */
  167. void
  168. memarea_clear(memarea_t *area)
  169. {
  170. memarea_chunk_t *chunk, *next;
  171. if (area->first->next_chunk) {
  172. for (chunk = area->first->next_chunk; chunk; chunk = next) {
  173. next = chunk->next_chunk;
  174. chunk_free_unchecked(chunk);
  175. }
  176. area->first->next_chunk = NULL;
  177. }
  178. area->first->next_mem = area->first->U_MEM;
  179. }
  180. /** Remove all unused memarea chunks from the internal freelist. */
  181. void
  182. memarea_clear_freelist(void)
  183. {
  184. memarea_chunk_t *chunk, *next;
  185. freelist_len = 0;
  186. for (chunk = freelist; chunk; chunk = next) {
  187. next = chunk->next_chunk;
  188. tor_free(chunk);
  189. }
  190. freelist = NULL;
  191. }
  192. /** Return true iff <b>p</b> is in a range that has been returned by an
  193. * allocation from <b>area</b>. */
  194. int
  195. memarea_owns_ptr(const memarea_t *area, const void *p)
  196. {
  197. memarea_chunk_t *chunk;
  198. const char *ptr = p;
  199. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  200. if (ptr >= chunk->U_MEM && ptr < chunk->next_mem)
  201. return 1;
  202. }
  203. return 0;
  204. }
  205. /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
  206. * bytes. <b>sz</b> should be significantly smaller than the area's chunk
  207. * size, though we can deal if it isn't. */
  208. void *
  209. memarea_alloc(memarea_t *area, size_t sz)
  210. {
  211. memarea_chunk_t *chunk = area->first;
  212. char *result;
  213. tor_assert(chunk);
  214. CHECK_SENTINEL(chunk);
  215. tor_assert(sz < SIZE_T_CEILING);
  216. if (sz == 0)
  217. sz = 1;
  218. tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size);
  219. const size_t space_remaining =
  220. (chunk->U_MEM + chunk->mem_size) - chunk->next_mem;
  221. if (sz > space_remaining) {
  222. if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
  223. /* This allocation is too big. Stick it in a special chunk, and put
  224. * that chunk second in the list. */
  225. memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE, 0);
  226. new_chunk->next_chunk = chunk->next_chunk;
  227. chunk->next_chunk = new_chunk;
  228. chunk = new_chunk;
  229. } else {
  230. memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE, 1);
  231. new_chunk->next_chunk = chunk;
  232. area->first = chunk = new_chunk;
  233. }
  234. tor_assert(chunk->mem_size >= sz);
  235. }
  236. result = chunk->next_mem;
  237. chunk->next_mem = chunk->next_mem + sz;
  238. /* Reinstate these if bug 930 ever comes back
  239. tor_assert(chunk->next_mem >= chunk->U_MEM);
  240. tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size);
  241. */
  242. chunk->next_mem = realign_pointer(chunk->next_mem);
  243. return result;
  244. }
  245. /** As memarea_alloc(), but clears the memory it returns. */
  246. void *
  247. memarea_alloc_zero(memarea_t *area, size_t sz)
  248. {
  249. void *result = memarea_alloc(area, sz);
  250. memset(result, 0, sz);
  251. return result;
  252. }
  253. /** As memdup, but returns the memory from <b>area</b>. */
  254. void *
  255. memarea_memdup(memarea_t *area, const void *s, size_t n)
  256. {
  257. char *result = memarea_alloc(area, n);
  258. memcpy(result, s, n);
  259. return result;
  260. }
  261. /** As strdup, but returns the memory from <b>area</b>. */
  262. char *
  263. memarea_strdup(memarea_t *area, const char *s)
  264. {
  265. return memarea_memdup(area, s, strlen(s)+1);
  266. }
  267. /** As strndup, but returns the memory from <b>area</b>. */
  268. char *
  269. memarea_strndup(memarea_t *area, const char *s, size_t n)
  270. {
  271. size_t ln = 0;
  272. char *result;
  273. tor_assert(n < SIZE_T_CEILING);
  274. for (ln = 0; ln < n && s[ln]; ++ln)
  275. ;
  276. result = memarea_alloc(area, ln+1);
  277. memcpy(result, s, ln);
  278. result[ln]='\0';
  279. return result;
  280. }
  281. /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
  282. * and <b>used_out</b> to the number of bytes currently used. */
  283. void
  284. memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
  285. {
  286. size_t a = 0, u = 0;
  287. memarea_chunk_t *chunk;
  288. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  289. CHECK_SENTINEL(chunk);
  290. a += CHUNK_HEADER_SIZE + chunk->mem_size;
  291. tor_assert(chunk->next_mem >= chunk->U_MEM);
  292. u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->U_MEM);
  293. }
  294. *allocated_out = a;
  295. *used_out = u;
  296. }
  297. /** Assert that <b>area</b> is okay. */
  298. void
  299. memarea_assert_ok(memarea_t *area)
  300. {
  301. memarea_chunk_t *chunk;
  302. tor_assert(area->first);
  303. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  304. CHECK_SENTINEL(chunk);
  305. tor_assert(chunk->next_mem >= chunk->U_MEM);
  306. tor_assert(chunk->next_mem <=
  307. (char*) realign_pointer(chunk->U_MEM+chunk->mem_size));
  308. }
  309. }