memarea.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /* Copyright (c) 2008-2018, The Tor Project, Inc. */
  2. /* See LICENSE for licensing information */
  3. /**
  4. * \file memarea.c
  5. *
  6. * \brief Implementation for memarea_t, an allocator for allocating lots of
  7. * small objects that will be freed all at once.
  8. */
  9. #include "orconfig.h"
  10. #include "lib/memarea/memarea.h"
  11. #include <stdlib.h>
  12. #include <string.h>
  13. #include "lib/cc/torint.h"
  14. #include "lib/arch/bytes.h"
  15. #include "lib/log/torlog.h"
  16. #include "lib/log/util_bug.h"
  17. #include "lib/malloc/util_malloc.h"
  18. #ifndef DISABLE_MEMORY_SENTINELS
  19. /** If true, we try to detect any attempts to write beyond the length of a
  20. * memarea. */
  21. #define USE_SENTINELS
  22. /** All returned pointers should be aligned to the nearest multiple of this
  23. * value. */
  24. #define MEMAREA_ALIGN SIZEOF_VOID_P
  25. /** A value which, when masked out of a pointer, produces a maximally aligned
  26. * pointer. */
  27. #if MEMAREA_ALIGN == 4
  28. #define MEMAREA_ALIGN_MASK ((uintptr_t)3)
  29. #elif MEMAREA_ALIGN == 8
  30. #define MEMAREA_ALIGN_MASK ((uintptr_t)7)
  31. #else
  32. #error "void* is neither 4 nor 8 bytes long. I don't know how to align stuff."
  33. #endif /* MEMAREA_ALIGN == 4 || ... */
  34. #if defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER)
  35. #define USE_ALIGNED_ATTRIBUTE
  36. /** Name for the 'memory' member of a memory chunk. */
  37. #define U_MEM mem
  38. #else
  39. #define U_MEM u.mem
  40. #endif /* defined(__GNUC__) && defined(FLEXIBLE_ARRAY_MEMBER) */
  41. #ifdef USE_SENTINELS
  42. /** Magic value that we stick at the end of a memarea so we can make sure
  43. * there are no run-off-the-end bugs. */
  44. #define SENTINEL_VAL 0x90806622u
  45. /** How many bytes per area do we devote to the sentinel? */
  46. #define SENTINEL_LEN sizeof(uint32_t)
  47. /** Given a mem_area_chunk_t with SENTINEL_LEN extra bytes allocated at the
  48. * end, set those bytes. */
  49. #define SET_SENTINEL(chunk) \
  50. STMT_BEGIN \
  51. set_uint32( &(chunk)->U_MEM[chunk->mem_size], SENTINEL_VAL ); \
  52. STMT_END
  53. /** Assert that the sentinel on a memarea is set correctly. */
  54. #define CHECK_SENTINEL(chunk) \
  55. STMT_BEGIN \
  56. uint32_t sent_val = get_uint32(&(chunk)->U_MEM[chunk->mem_size]); \
  57. tor_assert(sent_val == SENTINEL_VAL); \
  58. STMT_END
  59. #else /* !(defined(USE_SENTINELS)) */
  60. #define SENTINEL_LEN 0
  61. #define SET_SENTINEL(chunk) STMT_NIL
  62. #define CHECK_SENTINEL(chunk) STMT_NIL
  63. #endif /* defined(USE_SENTINELS) */
  64. /** Increment <b>ptr</b> until it is aligned to MEMAREA_ALIGN. */
  65. static inline void *
  66. realign_pointer(void *ptr)
  67. {
  68. uintptr_t x = (uintptr_t)ptr;
  69. x = (x+MEMAREA_ALIGN_MASK) & ~MEMAREA_ALIGN_MASK;
  70. /* Reinstate this if bug 930 ever reappears
  71. tor_assert(((void*)x) >= ptr);
  72. */
  73. return (void*)x;
  74. }
  75. /** Implements part of a memarea. New memory is carved off from chunk->mem in
  76. * increasing order until a request is too big, at which point a new chunk is
  77. * allocated. */
  78. typedef struct memarea_chunk_t {
  79. /** Next chunk in this area. Only kept around so we can free it. */
  80. struct memarea_chunk_t *next_chunk;
  81. size_t mem_size; /**< How much RAM is available in mem, total? */
  82. char *next_mem; /**< Next position in mem to allocate data at. If it's
  83. * equal to mem+mem_size, this chunk is full. */
  84. #ifdef USE_ALIGNED_ATTRIBUTE
  85. /** Actual content of the memory chunk. */
  86. char mem[FLEXIBLE_ARRAY_MEMBER] __attribute__((aligned(MEMAREA_ALIGN)));
  87. #else
  88. union {
  89. char mem[1]; /**< Memory space in this chunk. */
  90. void *void_for_alignment_; /**< Dummy; used to make sure mem is aligned. */
  91. } u; /**< Union used to enforce alignment when we don't have support for
  92. * doing it right. */
  93. #endif /* defined(USE_ALIGNED_ATTRIBUTE) */
  94. } memarea_chunk_t;
  95. /** How many bytes are needed for overhead before we get to the memory part
  96. * of a chunk? */
  97. #define CHUNK_HEADER_SIZE offsetof(memarea_chunk_t, U_MEM)
  98. /** What's the smallest that we'll allocate a chunk? */
  99. #define CHUNK_SIZE 4096
  100. /** A memarea_t is an allocation region for a set of small memory requests
  101. * that will all be freed at once. */
  102. struct memarea_t {
  103. memarea_chunk_t *first; /**< Top of the chunk stack: never NULL. */
  104. };
  105. /** Helper: allocate a new memarea chunk of around <b>chunk_size</b> bytes. */
  106. static memarea_chunk_t *
  107. alloc_chunk(size_t sz)
  108. {
  109. tor_assert(sz < SIZE_T_CEILING);
  110. size_t chunk_size = sz < CHUNK_SIZE ? CHUNK_SIZE : sz;
  111. memarea_chunk_t *res;
  112. chunk_size += SENTINEL_LEN;
  113. res = tor_malloc(chunk_size);
  114. res->next_chunk = NULL;
  115. res->mem_size = chunk_size - CHUNK_HEADER_SIZE - SENTINEL_LEN;
  116. res->next_mem = res->U_MEM;
  117. tor_assert(res->next_mem+res->mem_size+SENTINEL_LEN ==
  118. ((char*)res)+chunk_size);
  119. tor_assert(realign_pointer(res->next_mem) == res->next_mem);
  120. SET_SENTINEL(res);
  121. return res;
  122. }
  123. /** Release <b>chunk</b> from a memarea. */
  124. static void
  125. memarea_chunk_free_unchecked(memarea_chunk_t *chunk)
  126. {
  127. CHECK_SENTINEL(chunk);
  128. tor_free(chunk);
  129. }
  130. /** Allocate and return new memarea. */
  131. memarea_t *
  132. memarea_new(void)
  133. {
  134. memarea_t *head = tor_malloc(sizeof(memarea_t));
  135. head->first = alloc_chunk(CHUNK_SIZE);
  136. return head;
  137. }
  138. /** Free <b>area</b>, invalidating all pointers returned from memarea_alloc()
  139. * and friends for this area */
  140. void
  141. memarea_drop_all_(memarea_t *area)
  142. {
  143. memarea_chunk_t *chunk, *next;
  144. for (chunk = area->first; chunk; chunk = next) {
  145. next = chunk->next_chunk;
  146. memarea_chunk_free_unchecked(chunk);
  147. }
  148. area->first = NULL; /*fail fast on */
  149. tor_free(area);
  150. }
  151. /** Forget about having allocated anything in <b>area</b>, and free some of
  152. * the backing storage associated with it, as appropriate. Invalidates all
  153. * pointers returned from memarea_alloc() for this area. */
  154. void
  155. memarea_clear(memarea_t *area)
  156. {
  157. memarea_chunk_t *chunk, *next;
  158. if (area->first->next_chunk) {
  159. for (chunk = area->first->next_chunk; chunk; chunk = next) {
  160. next = chunk->next_chunk;
  161. memarea_chunk_free_unchecked(chunk);
  162. }
  163. area->first->next_chunk = NULL;
  164. }
  165. area->first->next_mem = area->first->U_MEM;
  166. }
  167. /** Return true iff <b>p</b> is in a range that has been returned by an
  168. * allocation from <b>area</b>. */
  169. int
  170. memarea_owns_ptr(const memarea_t *area, const void *p)
  171. {
  172. memarea_chunk_t *chunk;
  173. const char *ptr = p;
  174. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  175. if (ptr >= chunk->U_MEM && ptr < chunk->next_mem)
  176. return 1;
  177. }
  178. return 0;
  179. }
  180. /** Return a pointer to a chunk of memory in <b>area</b> of at least <b>sz</b>
  181. * bytes. <b>sz</b> should be significantly smaller than the area's chunk
  182. * size, though we can deal if it isn't. */
  183. void *
  184. memarea_alloc(memarea_t *area, size_t sz)
  185. {
  186. memarea_chunk_t *chunk = area->first;
  187. char *result;
  188. tor_assert(chunk);
  189. CHECK_SENTINEL(chunk);
  190. tor_assert(sz < SIZE_T_CEILING);
  191. if (sz == 0)
  192. sz = 1;
  193. tor_assert(chunk->next_mem <= chunk->U_MEM + chunk->mem_size);
  194. const size_t space_remaining =
  195. (chunk->U_MEM + chunk->mem_size) - chunk->next_mem;
  196. if (sz > space_remaining) {
  197. if (sz+CHUNK_HEADER_SIZE >= CHUNK_SIZE) {
  198. /* This allocation is too big. Stick it in a special chunk, and put
  199. * that chunk second in the list. */
  200. memarea_chunk_t *new_chunk = alloc_chunk(sz+CHUNK_HEADER_SIZE);
  201. new_chunk->next_chunk = chunk->next_chunk;
  202. chunk->next_chunk = new_chunk;
  203. chunk = new_chunk;
  204. } else {
  205. memarea_chunk_t *new_chunk = alloc_chunk(CHUNK_SIZE);
  206. new_chunk->next_chunk = chunk;
  207. area->first = chunk = new_chunk;
  208. }
  209. tor_assert(chunk->mem_size >= sz);
  210. }
  211. result = chunk->next_mem;
  212. chunk->next_mem = chunk->next_mem + sz;
  213. /* Reinstate these if bug 930 ever comes back
  214. tor_assert(chunk->next_mem >= chunk->U_MEM);
  215. tor_assert(chunk->next_mem <= chunk->U_MEM+chunk->mem_size);
  216. */
  217. chunk->next_mem = realign_pointer(chunk->next_mem);
  218. return result;
  219. }
  220. /** As memarea_alloc(), but clears the memory it returns. */
  221. void *
  222. memarea_alloc_zero(memarea_t *area, size_t sz)
  223. {
  224. void *result = memarea_alloc(area, sz);
  225. memset(result, 0, sz);
  226. return result;
  227. }
  228. /** As memdup, but returns the memory from <b>area</b>. */
  229. void *
  230. memarea_memdup(memarea_t *area, const void *s, size_t n)
  231. {
  232. char *result = memarea_alloc(area, n);
  233. memcpy(result, s, n);
  234. return result;
  235. }
  236. /** As strdup, but returns the memory from <b>area</b>. */
  237. char *
  238. memarea_strdup(memarea_t *area, const char *s)
  239. {
  240. return memarea_memdup(area, s, strlen(s)+1);
  241. }
  242. /** As strndup, but returns the memory from <b>area</b>. */
  243. char *
  244. memarea_strndup(memarea_t *area, const char *s, size_t n)
  245. {
  246. size_t ln = 0;
  247. char *result;
  248. tor_assert(n < SIZE_T_CEILING);
  249. for (ln = 0; ln < n && s[ln]; ++ln)
  250. ;
  251. result = memarea_alloc(area, ln+1);
  252. memcpy(result, s, ln);
  253. result[ln]='\0';
  254. return result;
  255. }
  256. /** Set <b>allocated_out</b> to the number of bytes allocated in <b>area</b>,
  257. * and <b>used_out</b> to the number of bytes currently used. */
  258. void
  259. memarea_get_stats(memarea_t *area, size_t *allocated_out, size_t *used_out)
  260. {
  261. size_t a = 0, u = 0;
  262. memarea_chunk_t *chunk;
  263. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  264. CHECK_SENTINEL(chunk);
  265. a += CHUNK_HEADER_SIZE + chunk->mem_size;
  266. tor_assert(chunk->next_mem >= chunk->U_MEM);
  267. u += CHUNK_HEADER_SIZE + (chunk->next_mem - chunk->U_MEM);
  268. }
  269. *allocated_out = a;
  270. *used_out = u;
  271. }
  272. /** Assert that <b>area</b> is okay. */
  273. void
  274. memarea_assert_ok(memarea_t *area)
  275. {
  276. memarea_chunk_t *chunk;
  277. tor_assert(area->first);
  278. for (chunk = area->first; chunk; chunk = chunk->next_chunk) {
  279. CHECK_SENTINEL(chunk);
  280. tor_assert(chunk->next_mem >= chunk->U_MEM);
  281. tor_assert(chunk->next_mem <=
  282. (char*) realign_pointer(chunk->U_MEM+chunk->mem_size));
  283. }
  284. }
  285. #else /* !(!defined(DISABLE_MEMORY_SENTINELS)) */
  286. struct memarea_t {
  287. smartlist_t *pieces;
  288. };
  289. memarea_t *
  290. memarea_new(void)
  291. {
  292. memarea_t *ma = tor_malloc_zero(sizeof(memarea_t));
  293. ma->pieces = smartlist_new();
  294. return ma;
  295. }
  296. void
  297. memarea_drop_all_(memarea_t *area)
  298. {
  299. memarea_clear(area);
  300. smartlist_free(area->pieces);
  301. tor_free(area);
  302. }
  303. void
  304. memarea_clear(memarea_t *area)
  305. {
  306. SMARTLIST_FOREACH(area->pieces, void *, p, tor_free_(p));
  307. smartlist_clear(area->pieces);
  308. }
  309. int
  310. memarea_owns_ptr(const memarea_t *area, const void *ptr)
  311. {
  312. SMARTLIST_FOREACH(area->pieces, const void *, p, if (ptr == p) return 1;);
  313. return 0;
  314. }
  315. void *
  316. memarea_alloc(memarea_t *area, size_t sz)
  317. {
  318. void *result = tor_malloc(sz);
  319. smartlist_add(area->pieces, result);
  320. return result;
  321. }
  322. void *
  323. memarea_alloc_zero(memarea_t *area, size_t sz)
  324. {
  325. void *result = tor_malloc_zero(sz);
  326. smartlist_add(area->pieces, result);
  327. return result;
  328. }
  329. void *
  330. memarea_memdup(memarea_t *area, const void *s, size_t n)
  331. {
  332. void *r = memarea_alloc(area, n);
  333. memcpy(r, s, n);
  334. return r;
  335. }
  336. char *
  337. memarea_strdup(memarea_t *area, const char *s)
  338. {
  339. size_t n = strlen(s);
  340. char *r = memarea_alloc(area, n+1);
  341. memcpy(r, s, n);
  342. r[n] = 0;
  343. return r;
  344. }
  345. char *
  346. memarea_strndup(memarea_t *area, const char *s, size_t n)
  347. {
  348. size_t ln = strnlen(s, n);
  349. char *r = memarea_alloc(area, ln+1);
  350. memcpy(r, s, ln);
  351. r[ln] = 0;
  352. return r;
  353. }
  354. void
  355. memarea_get_stats(memarea_t *area,
  356. size_t *allocated_out, size_t *used_out)
  357. {
  358. (void)area;
  359. *allocated_out = *used_out = 128;
  360. }
  361. void
  362. memarea_assert_ok(memarea_t *area)
  363. {
  364. (void)area;
  365. }
  366. #endif /* !defined(DISABLE_MEMORY_SENTINELS) */