memarea.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282
  1. /* Copyright (c) 2008-2010, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /** \file memarea.c
  4. * \brief Implementation for memarea_t, an allocator for allocating lots of
  5. * small objects that will be freed all at once.
  6. */
  7. #include "orconfig.h"
  8. #include <stdlib.h>
  9. #include "memarea.h"
  10. #include "util.h"
  11. #include "compat.h"
  12. #include "log.h"
  13. /** All returned pointers should be aligned to the nearest multiple of this
  14. * value. */
  15. #define MEMAREA_ALIGN SIZEOF_VOID_P
  16. #if MEMAREA_ALIGN == 4
  17. #define MEMAREA_ALIGN_MASK 3lu
  18. #elif MEMAREA_ALIGN == 8
  19. #define MEMAREA_ALIGN_MASK 7lu
  20. #else
  21. #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
  22. #endif
  23. /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
  24. static INLINE void *
  25. realign_pointer(void *ptr)
  26. {
  27. uintptr_t x = (uintptr_t)ptr;
  28. x = (x+MEMAREA_ALIGN_MASK) & ~MEMAREA_ALIGN_MASK;
  29. tor_assert(((void*)x) >= ptr); // XXXX021 remove this once bug 930 is solved
  30. return (void*)x;
  31. }
  32. /** Implements part of a memarea. New memory is carved off from chunk->mem in
  33. * increasing order until a request is too big, at which point a new chunk is
  34. * allocated. */
  35. typedef struct memarea_chunk_t {
  36. /** Next chunk in this area. Only kept around so we can free it. */
  37. struct memarea_chunk_t *next_chunk;
  38. size_t mem_size; /**< How much RAM is available in u.mem, total? */
  39. char *next_mem; /**< Next position in u.mem to allocate data at. If it's
  40. * greater than or equal to mem+mem_size, this chunk is
  41. * full. */
  42. union {
  43. char mem[1]; /**< Memory space in this chunk. */
  44. void *_void_for_alignment; /**< Dummy; used to make sure mem is aligned. */
  45. } u;
  46. } memarea_chunk_t;
  47. #define CHUNK_HEADER_SIZE STRUCT_OFFSET(memarea_chunk_t, u)
  48. #define CHUNK_SIZE 4096
  49. /** A memarea_t is an allocation region for a set of small memory requests
  50. * that will all be freed at once. */
  51. struct memarea_t {
  52. memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */
  53. };
  54. /** How many chunks will we put into the freelist before freeing them? */
  55. #define MAX_FREELIST_LEN 4
  56. /** The number of memarea chunks currently in our freelist. */
  57. static int freelist_len=0;
  58. /** A linked list of unused memory area chunks. Used to prevent us from
  59. * spinning in malloc/free loops. */
  60. static memarea_chunk_t *freelist = NULL;
  61. /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
  62. static memarea_chunk_t *
  63. alloc_chunk(size_t sz, int freelist_ok)
  64. {
  65. if (freelist && freelist_ok) {
  66. memarea_chunk_t *res = freelist;
  67. freelist = res->next_chunk;
  68. res->next_chunk = NULL;
  69. --freelist_len;
  70. return res;
  71. } else {
  72. size_t chunk_size = freelist_ok ? CHUNK_SIZE : sz;
  73. memarea_chunk_t *res = tor_malloc_roundup(&chunk_size);
  74. res->next_chunk = NULL;
  75. res->mem_size = chunk_size - CHUNK_HEADER_SIZE;
  76. res->next_mem = res->u.mem;
  77. tor_assert(res->next_mem+res->mem_size == ((char*)res)+chunk_size);
  78. tor_assert(realign_pointer(res->next_mem) == res->next_mem);
  79. return res;
  80. }
  81. }
  82. /** Release <b>chunk</b> from a memarea, either by adding it to the freelist
  83. * or by freeing it if the freelist is already too big. */
  84. static void
  85. chunk_free(memarea_chunk_t *chunk)
  86. {
  87. if (freelist_len < MAX_FREELIST_LEN) {
  88. ++freelist_len;
  89. chunk->next_chunk = freelist;
  90. freelist = chunk;
  91. chunk->next_mem = chunk->u.mem;
  92. } else {
  93. tor_free(chunk);
  94. }
  95. }
  96. /** Allocate and return new memarea. */
  97. memarea_t *
  98. memarea_new(void)
  99. {
  100. memarea_t *head = tor_malloc(sizeof(memarea_t));
  101. head->first = alloc_chunk(CHUNK_SIZE, 1);
  102. return head;
  103. }
  104. /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
  105. * and friends for this area */
  106. void
  107. memarea_drop_all(memarea_t *area)
  108. {
  109. memarea_chunk_t *chunk, *next;
  110. for (chunk = area->first; chunk; chunk = next) {
  111. next = chunk->next_chunk;
  112. chunk_free(chunk);
  113. }
  114. area->first = NULL; /*fail fast on */
  115. tor_free(area);
  116. }
  117. /** Forget about having allocated anything in <b>area</b>, and free some of
  118. * the backing storage associated with it, as appropriate. Invalidates all
  119. * pointers returned from memarea_alloc() for this area. */
  120. void
  121. memarea_clear(memarea_t *area)
  122. {
  123. memarea_chunk_t *chunk, *next;
  124. if (area->first->next_chunk) {
  125. for (chunk = area->first->next_chunk; chunk; chunk = next) {
  126. next = chunk->next_chunk;
  127. chunk_free(chunk);
  128. }
  129. area->first->next_chunk = NULL;
  130. }
  131. area->first->next_mem = area->first->u.mem;
  132. }
  133. /** Remove all unused memarea chunks from the internal freelist. */
  134. void
  135. memarea_clear_freelist(void)
  136. {
  137. memarea_chunk_t *chunk, *next;
  138. freelist_len = 0;
  139. for (chunk = freelist; chunk; chunk = next) {
  140. next = chunk->next_chunk;
  141. tor_free(chunk);
  142. }
  143. freelist = NULL;
  144. }
  145. /** Return true iff <b>p</b> is in a range that has been returned by an
  146. * allocation from <b>area</b>. */
  147. int
  148. memarea_owns_ptr(const memarea_t *area, const void *p)
  149. {
  150. memarea_chunk_t *chunk;
  151. const char *ptr = p;
  152. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  153. if (ptr >= chunk->u.mem && ptr < chunk->next_mem)
  154. return 1;
  155. }
  156. return 0;
  157. }
  158. /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
  159. * bytes. <b>sz</b> should be significantly smaller than the area's chunk
  160. * size, though we can deal if it isn't. */
  161. void *
  162. memarea_alloc(memarea_t *area, size_t sz)
  163. {
  164. memarea_chunk_t *chunk = area->first;
  165. char *result;
  166. tor_assert(chunk);
  167. if (sz == 0)
  168. sz = 1;
  169. if (chunk->next_mem+sz > chunk->u.mem+chunk->mem_size) {
  170. if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
  171. /* This allocation is too big. Stick it in a special chunk, and put
  172. * that chunk second in the list. */
  173. memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE, 0);
  174. new_chunk->next_chunk = chunk->next_chunk;
  175. chunk->next_chunk = new_chunk;
  176. chunk = new_chunk;
  177. } else {
  178. memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE, 1);
  179. new_chunk->next_chunk = chunk;
  180. area->first = chunk = new_chunk;
  181. }
  182. tor_assert(chunk->mem_size >= sz);
  183. }
  184. result = chunk->next_mem;
  185. chunk->next_mem = chunk->next_mem + sz;
  186. // XXXX021 remove these once bug 930 is solved.
  187. tor_assert(chunk->next_mem >= chunk->u.mem);
  188. tor_assert(chunk->next_mem <= chunk->u.mem+chunk->mem_size);
  189. chunk->next_mem = realign_pointer(chunk->next_mem);
  190. return result;
  191. }
  192. /** As memarea_alloc(), but clears the memory it returns. */
  193. void *
  194. memarea_alloc_zero(memarea_t *area, size_t sz)
  195. {
  196. void *result = memarea_alloc(area, sz);
  197. memset(result, 0, sz);
  198. return result;
  199. }
  200. /** As memdup, but returns the memory from <b>area</b>. */
  201. void *
  202. memarea_memdup(memarea_t *area, const void *s, size_t n)
  203. {
  204. char *result = memarea_alloc(area, n);
  205. memcpy(result, s, n);
  206. return result;
  207. }
  208. /** As strdup, but returns the memory from <b>area</b>. */
  209. char *
  210. memarea_strdup(memarea_t *area, const char *s)
  211. {
  212. return memarea_memdup(area, s, strlen(s)+1);
  213. }
  214. /** As strndup, but returns the memory from <b>area</b>. */
  215. char *
  216. memarea_strndup(memarea_t *area, const char *s, size_t n)
  217. {
  218. size_t ln;
  219. char *result;
  220. const char *cp, *end = s+n;
  221. for (cp = s; cp < end && *cp; ++cp)
  222. ;
  223. /* cp now points to s+n, or to the 0 in the string. */
  224. ln = cp-s;
  225. result = memarea_alloc(area, ln+1);
  226. memcpy(result, s, ln);
  227. result[ln]='\0';
  228. return result;
  229. }
  230. /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
  231. * and <b>used_out</b> to the number of bytes currently used. */
  232. void
  233. memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
  234. {
  235. size_t a = 0, u = 0;
  236. memarea_chunk_t *chunk;
  237. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  238. a += CHUNK_HEADER_SIZE + chunk->mem_size;
  239. tor_assert(chunk->next_mem >= chunk->u.mem);
  240. u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->u.mem);
  241. }
  242. *allocated_out = a;
  243. *used_out = u;
  244. }
  245. /** Assert that <b>area</b> is okay. */
  246. void
  247. memarea_assert_ok(memarea_t *area)
  248. {
  249. memarea_chunk_t *chunk;
  250. tor_assert(area->first);
  251. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  252. tor_assert(chunk->next_mem >= chunk->u.mem);
  253. tor_assert(chunk->next_mem <=
  254. (char*) realign_pointer(chunk->u.mem+chunk->mem_size));
  255. }
  256. }