low_level_alloc.cc 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. /* Copyright (c) 2006, Google Inc.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are
  7. * met:
  8. *
  9. * * Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * * Redistributions in binary form must reproduce the above
  12. * copyright notice, this list of conditions and the following disclaimer
  13. * in the documentation and/or other materials provided with the
  14. * distribution.
  15. * * Neither the name of Google Inc. nor the names of its
  16. * contributors may be used to endorse or promote products derived from
  17. * this software without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. */
  31. // A low-level allocator that can be used by other low-level
  32. // modules without introducing dependency cycles.
  33. // This allocator is slow and wasteful of memory;
  34. // it should not be used when performance is key.
  35. #include "base/low_level_alloc.h"
  36. #include "base/dynamic_annotations.h"
  37. #include "base/spinlock.h"
  38. #include "base/logging.h"
  39. #include "malloc_hook-inl.h"
  40. #include <gperftools/malloc_hook.h>
  41. #include <errno.h>
  42. #ifdef HAVE_UNISTD_H
  43. #include <unistd.h>
  44. #endif
  45. #ifdef HAVE_MMAP
  46. #include <sys/mman.h>
  47. #endif
  48. #include <new> // for placement-new
  49. // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
  50. // form of the name instead.
  51. #ifndef MAP_ANONYMOUS
  52. # define MAP_ANONYMOUS MAP_ANON
  53. #endif
  54. // A first-fit allocator with amortized logarithmic free() time.
  55. LowLevelAlloc::PagesAllocator::~PagesAllocator() {
  56. }
  57. // ---------------------------------------------------------------------------
  58. static const int kMaxLevel = 30;
  59. // We put this class-only struct in a namespace to avoid polluting the
  60. // global namespace with this struct name (thus risking an ODR violation).
  61. namespace low_level_alloc_internal {
  62. // This struct describes one allocated block, or one free block.
  63. struct AllocList {
  64. struct Header {
  65. intptr_t size; // size of entire region, including this field. Must be
  66. // first. Valid in both allocated and unallocated blocks
  67. intptr_t magic; // kMagicAllocated or kMagicUnallocated xor this
  68. LowLevelAlloc::Arena *arena; // pointer to parent arena
  69. void *dummy_for_alignment; // aligns regions to 0 mod 2*sizeof(void*)
  70. } header;
  71. // Next two fields: in unallocated blocks: freelist skiplist data
  72. // in allocated blocks: overlaps with client data
  73. int levels; // levels in skiplist used
  74. AllocList *next[kMaxLevel]; // actually has levels elements.
  75. // The AllocList node may not have room for
  76. // all kMaxLevel entries. See max_fit in
  77. // LLA_SkiplistLevels()
  78. };
  79. }
  80. using low_level_alloc_internal::AllocList;
  81. // ---------------------------------------------------------------------------
  82. // A trivial skiplist implementation. This is used to keep the freelist
  83. // in address order while taking only logarithmic time per insert and delete.
  84. // An integer approximation of log2(size/base)
  85. // Requires size >= base.
  86. static int IntLog2(size_t size, size_t base) {
  87. int result = 0;
  88. for (size_t i = size; i > base; i >>= 1) { // i == floor(size/2**result)
  89. result++;
  90. }
  91. // floor(size / 2**result) <= base < floor(size / 2**(result-1))
  92. // => log2(size/(base+1)) <= result < 1+log2(size/base)
  93. // => result ~= log2(size/base)
  94. return result;
  95. }
  96. // Return a random integer n: p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
  97. static int Random() {
  98. static uint32 r = 1; // no locking---it's not critical
  99. ANNOTATE_BENIGN_RACE(&r, "benign race, not critical.");
  100. int result = 1;
  101. while ((((r = r*1103515245 + 12345) >> 30) & 1) == 0) {
  102. result++;
  103. }
  104. return result;
  105. }
  106. // Return a number of skiplist levels for a node of size bytes, where
  107. // base is the minimum node size. Compute level=log2(size / base)+n
  108. // where n is 1 if random is false and otherwise a random number generated with
  109. // the standard distribution for a skiplist: See Random() above.
  110. // Bigger nodes tend to have more skiplist levels due to the log2(size / base)
  111. // term, so first-fit searches touch fewer nodes. "level" is clipped so
  112. // level<kMaxLevel and next[level-1] will fit in the node.
  113. // 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
  114. static int LLA_SkiplistLevels(size_t size, size_t base, bool random) {
  115. // max_fit is the maximum number of levels that will fit in a node for the
  116. // given size. We can't return more than max_fit, no matter what the
  117. // random number generator says.
  118. int max_fit = (size-OFFSETOF_MEMBER(AllocList, next)) / sizeof (AllocList *);
  119. int level = IntLog2(size, base) + (random? Random() : 1);
  120. if (level > max_fit) level = max_fit;
  121. if (level > kMaxLevel-1) level = kMaxLevel - 1;
  122. RAW_CHECK(level >= 1, "block not big enough for even one level");
  123. return level;
  124. }
  125. // Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
  126. // For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
  127. // points to the last element at level i in the AllocList less than *e, or is
  128. // head if no such element exists.
  129. static AllocList *LLA_SkiplistSearch(AllocList *head,
  130. AllocList *e, AllocList **prev) {
  131. AllocList *p = head;
  132. for (int level = head->levels - 1; level >= 0; level--) {
  133. for (AllocList *n; (n = p->next[level]) != 0 && n < e; p = n) {
  134. }
  135. prev[level] = p;
  136. }
  137. return (head->levels == 0) ? 0 : prev[0]->next[0];
  138. }
  139. // Insert element *e into AllocList *head. Set prev[] as LLA_SkiplistSearch.
  140. // Requires that e->levels be previously set by the caller (using
  141. // LLA_SkiplistLevels())
  142. static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
  143. AllocList **prev) {
  144. LLA_SkiplistSearch(head, e, prev);
  145. for (; head->levels < e->levels; head->levels++) { // extend prev pointers
  146. prev[head->levels] = head; // to all *e's levels
  147. }
  148. for (int i = 0; i != e->levels; i++) { // add element to list
  149. e->next[i] = prev[i]->next[i];
  150. prev[i]->next[i] = e;
  151. }
  152. }
  153. // Remove element *e from AllocList *head. Set prev[] as LLA_SkiplistSearch().
  154. // Requires that e->levels be previous set by the caller (using
  155. // LLA_SkiplistLevels())
  156. static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
  157. AllocList **prev) {
  158. AllocList *found = LLA_SkiplistSearch(head, e, prev);
  159. RAW_CHECK(e == found, "element not in freelist");
  160. for (int i = 0; i != e->levels && prev[i]->next[i] == e; i++) {
  161. prev[i]->next[i] = e->next[i];
  162. }
  163. while (head->levels > 0 && head->next[head->levels - 1] == 0) {
  164. head->levels--; // reduce head->levels if level unused
  165. }
  166. }
  167. // ---------------------------------------------------------------------------
  168. // Arena implementation
  169. struct LowLevelAlloc::Arena {
  170. Arena() : mu(SpinLock::LINKER_INITIALIZED) {} // does nothing; for static init
  171. explicit Arena(int) : pagesize(0) {} // set pagesize to zero explicitly
  172. // for non-static init
  173. SpinLock mu; // protects freelist, allocation_count,
  174. // pagesize, roundup, min_size
  175. AllocList freelist; // head of free list; sorted by addr (under mu)
  176. int32 allocation_count; // count of allocated blocks (under mu)
  177. int32 flags; // flags passed to NewArena (ro after init)
  178. size_t pagesize; // ==getpagesize() (init under mu, then ro)
  179. size_t roundup; // lowest power of 2 >= max(16,sizeof (AllocList))
  180. // (init under mu, then ro)
  181. size_t min_size; // smallest allocation block size
  182. // (init under mu, then ro)
  183. PagesAllocator *allocator;
  184. };
  185. // The default arena, which is used when 0 is passed instead of an Arena
  186. // pointer.
  187. static struct LowLevelAlloc::Arena default_arena;
  188. // Non-malloc-hooked arenas: used only to allocate metadata for arenas that
  189. // do not want malloc hook reporting, so that for them there's no malloc hook
  190. // reporting even during arena creation.
  191. static struct LowLevelAlloc::Arena unhooked_arena;
  192. static struct LowLevelAlloc::Arena unhooked_async_sig_safe_arena;
  193. namespace {
  194. class DefaultPagesAllocator : public LowLevelAlloc::PagesAllocator {
  195. public:
  196. virtual ~DefaultPagesAllocator() {};
  197. virtual void *MapPages(int32 flags, size_t size);
  198. virtual void UnMapPages(int32 flags, void *addr, size_t size);
  199. };
  200. }
  201. // magic numbers to identify allocated and unallocated blocks
  202. static const intptr_t kMagicAllocated = 0x4c833e95;
  203. static const intptr_t kMagicUnallocated = ~kMagicAllocated;
  204. namespace {
  205. class SCOPED_LOCKABLE ArenaLock {
  206. public:
  207. explicit ArenaLock(LowLevelAlloc::Arena *arena)
  208. EXCLUSIVE_LOCK_FUNCTION(arena->mu)
  209. : left_(false), mask_valid_(false), arena_(arena) {
  210. if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
  211. // We've decided not to support async-signal-safe arena use until
  212. // there a demonstrated need. Here's how one could do it though
  213. // (would need to be made more portable).
  214. #if 0
  215. sigset_t all;
  216. sigfillset(&all);
  217. this->mask_valid_ =
  218. (pthread_sigmask(SIG_BLOCK, &all, &this->mask_) == 0);
  219. #else
  220. RAW_CHECK(false, "We do not yet support async-signal-safe arena.");
  221. #endif
  222. }
  223. this->arena_->mu.Lock();
  224. }
  225. ~ArenaLock() { RAW_CHECK(this->left_, "haven't left Arena region"); }
  226. void Leave() /*UNLOCK_FUNCTION()*/ {
  227. this->arena_->mu.Unlock();
  228. #if 0
  229. if (this->mask_valid_) {
  230. pthread_sigmask(SIG_SETMASK, &this->mask_, 0);
  231. }
  232. #endif
  233. this->left_ = true;
  234. }
  235. private:
  236. bool left_; // whether left region
  237. bool mask_valid_;
  238. #if 0
  239. sigset_t mask_; // old mask of blocked signals
  240. #endif
  241. LowLevelAlloc::Arena *arena_;
  242. DISALLOW_COPY_AND_ASSIGN(ArenaLock);
  243. };
  244. } // anonymous namespace
  245. // create an appropriate magic number for an object at "ptr"
  246. // "magic" should be kMagicAllocated or kMagicUnallocated
  247. inline static intptr_t Magic(intptr_t magic, AllocList::Header *ptr) {
  248. return magic ^ reinterpret_cast<intptr_t>(ptr);
  249. }
  250. // Initialize the fields of an Arena
  251. static void ArenaInit(LowLevelAlloc::Arena *arena) {
  252. if (arena->pagesize == 0) {
  253. arena->pagesize = getpagesize();
  254. // Round up block sizes to a power of two close to the header size.
  255. arena->roundup = 16;
  256. while (arena->roundup < sizeof (arena->freelist.header)) {
  257. arena->roundup += arena->roundup;
  258. }
  259. // Don't allocate blocks less than twice the roundup size to avoid tiny
  260. // free blocks.
  261. arena->min_size = 2 * arena->roundup;
  262. arena->freelist.header.size = 0;
  263. arena->freelist.header.magic =
  264. Magic(kMagicUnallocated, &arena->freelist.header);
  265. arena->freelist.header.arena = arena;
  266. arena->freelist.levels = 0;
  267. memset(arena->freelist.next, 0, sizeof (arena->freelist.next));
  268. arena->allocation_count = 0;
  269. if (arena == &default_arena) {
  270. // Default arena should be hooked, e.g. for heap-checker to trace
  271. // pointer chains through objects in the default arena.
  272. arena->flags = LowLevelAlloc::kCallMallocHook;
  273. } else if (arena == &unhooked_async_sig_safe_arena) {
  274. arena->flags = LowLevelAlloc::kAsyncSignalSafe;
  275. } else {
  276. arena->flags = 0; // other arenas' flags may be overridden by client,
  277. // but unhooked_arena will have 0 in 'flags'.
  278. }
  279. arena->allocator = LowLevelAlloc::GetDefaultPagesAllocator();
  280. }
  281. }
  282. // L < meta_data_arena->mu
  283. LowLevelAlloc::Arena *LowLevelAlloc::NewArena(int32 flags,
  284. Arena *meta_data_arena) {
  285. return NewArenaWithCustomAlloc(flags, meta_data_arena, NULL);
  286. }
  287. // L < meta_data_arena->mu
  288. LowLevelAlloc::Arena *LowLevelAlloc::NewArenaWithCustomAlloc(int32 flags,
  289. Arena *meta_data_arena,
  290. PagesAllocator *allocator) {
  291. RAW_CHECK(meta_data_arena != 0, "must pass a valid arena");
  292. if (meta_data_arena == &default_arena) {
  293. if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
  294. meta_data_arena = &unhooked_async_sig_safe_arena;
  295. } else if ((flags & LowLevelAlloc::kCallMallocHook) == 0) {
  296. meta_data_arena = &unhooked_arena;
  297. }
  298. }
  299. // Arena(0) uses the constructor for non-static contexts
  300. Arena *result =
  301. new (AllocWithArena(sizeof (*result), meta_data_arena)) Arena(0);
  302. ArenaInit(result);
  303. result->flags = flags;
  304. if (allocator) {
  305. result->allocator = allocator;
  306. }
  307. return result;
  308. }
  309. // L < arena->mu, L < arena->arena->mu
  310. bool LowLevelAlloc::DeleteArena(Arena *arena) {
  311. RAW_CHECK(arena != 0 && arena != &default_arena && arena != &unhooked_arena,
  312. "may not delete default arena");
  313. ArenaLock section(arena);
  314. bool empty = (arena->allocation_count == 0);
  315. section.Leave();
  316. if (empty) {
  317. while (arena->freelist.next[0] != 0) {
  318. AllocList *region = arena->freelist.next[0];
  319. size_t size = region->header.size;
  320. arena->freelist.next[0] = region->next[0];
  321. RAW_CHECK(region->header.magic ==
  322. Magic(kMagicUnallocated, &region->header),
  323. "bad magic number in DeleteArena()");
  324. RAW_CHECK(region->header.arena == arena,
  325. "bad arena pointer in DeleteArena()");
  326. RAW_CHECK(size % arena->pagesize == 0,
  327. "empty arena has non-page-aligned block size");
  328. RAW_CHECK(reinterpret_cast<intptr_t>(region) % arena->pagesize == 0,
  329. "empty arena has non-page-aligned block");
  330. int munmap_result;
  331. if ((arena->flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
  332. munmap_result = munmap(region, size);
  333. } else {
  334. munmap_result = MallocHook::UnhookedMUnmap(region, size);
  335. }
  336. RAW_CHECK(munmap_result == 0,
  337. "LowLevelAlloc::DeleteArena: munmap failed address");
  338. }
  339. Free(arena);
  340. }
  341. return empty;
  342. }
  343. // ---------------------------------------------------------------------------
  344. // Return value rounded up to next multiple of align.
  345. // align must be a power of two.
  346. static intptr_t RoundUp(intptr_t addr, intptr_t align) {
  347. return (addr + align - 1) & ~(align - 1);
  348. }
  349. // Equivalent to "return prev->next[i]" but with sanity checking
  350. // that the freelist is in the correct order, that it
  351. // consists of regions marked "unallocated", and that no two regions
  352. // are adjacent in memory (they should have been coalesced).
  353. // L < arena->mu
  354. static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {
  355. RAW_CHECK(i < prev->levels, "too few levels in Next()");
  356. AllocList *next = prev->next[i];
  357. if (next != 0) {
  358. RAW_CHECK(next->header.magic == Magic(kMagicUnallocated, &next->header),
  359. "bad magic number in Next()");
  360. RAW_CHECK(next->header.arena == arena,
  361. "bad arena pointer in Next()");
  362. if (prev != &arena->freelist) {
  363. RAW_CHECK(prev < next, "unordered freelist");
  364. RAW_CHECK(reinterpret_cast<char *>(prev) + prev->header.size <
  365. reinterpret_cast<char *>(next), "malformed freelist");
  366. }
  367. }
  368. return next;
  369. }
  370. // Coalesce list item "a" with its successor if they are adjacent.
  371. static void Coalesce(AllocList *a) {
  372. AllocList *n = a->next[0];
  373. if (n != 0 && reinterpret_cast<char *>(a) + a->header.size ==
  374. reinterpret_cast<char *>(n)) {
  375. LowLevelAlloc::Arena *arena = a->header.arena;
  376. a->header.size += n->header.size;
  377. n->header.magic = 0;
  378. n->header.arena = 0;
  379. AllocList *prev[kMaxLevel];
  380. LLA_SkiplistDelete(&arena->freelist, n, prev);
  381. LLA_SkiplistDelete(&arena->freelist, a, prev);
  382. a->levels = LLA_SkiplistLevels(a->header.size, arena->min_size, true);
  383. LLA_SkiplistInsert(&arena->freelist, a, prev);
  384. }
  385. }
  386. // Adds block at location "v" to the free list
  387. // L >= arena->mu
  388. static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {
  389. AllocList *f = reinterpret_cast<AllocList *>(
  390. reinterpret_cast<char *>(v) - sizeof (f->header));
  391. RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
  392. "bad magic number in AddToFreelist()");
  393. RAW_CHECK(f->header.arena == arena,
  394. "bad arena pointer in AddToFreelist()");
  395. f->levels = LLA_SkiplistLevels(f->header.size, arena->min_size, true);
  396. AllocList *prev[kMaxLevel];
  397. LLA_SkiplistInsert(&arena->freelist, f, prev);
  398. f->header.magic = Magic(kMagicUnallocated, &f->header);
  399. Coalesce(f); // maybe coalesce with successor
  400. Coalesce(prev[0]); // maybe coalesce with predecessor
  401. }
  402. // Frees storage allocated by LowLevelAlloc::Alloc().
  403. // L < arena->mu
  404. void LowLevelAlloc::Free(void *v) {
  405. if (v != 0) {
  406. AllocList *f = reinterpret_cast<AllocList *>(
  407. reinterpret_cast<char *>(v) - sizeof (f->header));
  408. RAW_CHECK(f->header.magic == Magic(kMagicAllocated, &f->header),
  409. "bad magic number in Free()");
  410. LowLevelAlloc::Arena *arena = f->header.arena;
  411. if ((arena->flags & kCallMallocHook) != 0) {
  412. MallocHook::InvokeDeleteHook(v);
  413. }
  414. ArenaLock section(arena);
  415. AddToFreelist(v, arena);
  416. RAW_CHECK(arena->allocation_count > 0, "nothing in arena to free");
  417. arena->allocation_count--;
  418. section.Leave();
  419. }
  420. }
  421. // allocates and returns a block of size bytes, to be freed with Free()
  422. // L < arena->mu
  423. static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
  424. void *result = 0;
  425. if (request != 0) {
  426. AllocList *s; // will point to region that satisfies request
  427. ArenaLock section(arena);
  428. ArenaInit(arena);
  429. // round up with header
  430. size_t req_rnd = RoundUp(request + sizeof (s->header), arena->roundup);
  431. for (;;) { // loop until we find a suitable region
  432. // find the minimum levels that a block of this size must have
  433. int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
  434. if (i < arena->freelist.levels) { // potential blocks exist
  435. AllocList *before = &arena->freelist; // predecessor of s
  436. while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
  437. before = s;
  438. }
  439. if (s != 0) { // we found a region
  440. break;
  441. }
  442. }
  443. // we unlock before mmap() both because mmap() may call a callback hook,
  444. // and because it may be slow.
  445. arena->mu.Unlock();
  446. // mmap generous 64K chunks to decrease
  447. // the chances/impact of fragmentation:
  448. size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
  449. void *new_pages = arena->allocator->MapPages(arena->flags, new_pages_size);
  450. arena->mu.Lock();
  451. s = reinterpret_cast<AllocList *>(new_pages);
  452. s->header.size = new_pages_size;
  453. // Pretend the block is allocated; call AddToFreelist() to free it.
  454. s->header.magic = Magic(kMagicAllocated, &s->header);
  455. s->header.arena = arena;
  456. AddToFreelist(&s->levels, arena); // insert new region into free list
  457. }
  458. AllocList *prev[kMaxLevel];
  459. LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
  460. // s points to the first free region that's big enough
  461. if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
  462. AllocList *n = reinterpret_cast<AllocList *>
  463. (req_rnd + reinterpret_cast<char *>(s));
  464. n->header.size = s->header.size - req_rnd;
  465. n->header.magic = Magic(kMagicAllocated, &n->header);
  466. n->header.arena = arena;
  467. s->header.size = req_rnd;
  468. AddToFreelist(&n->levels, arena);
  469. }
  470. s->header.magic = Magic(kMagicAllocated, &s->header);
  471. RAW_CHECK(s->header.arena == arena, "");
  472. arena->allocation_count++;
  473. section.Leave();
  474. result = &s->levels;
  475. }
  476. ANNOTATE_NEW_MEMORY(result, request);
  477. return result;
  478. }
  479. void *LowLevelAlloc::Alloc(size_t request) {
  480. void *result = DoAllocWithArena(request, &default_arena);
  481. if ((default_arena.flags & kCallMallocHook) != 0) {
  482. // this call must be directly in the user-called allocator function
  483. // for MallocHook::GetCallerStackTrace to work properly
  484. MallocHook::InvokeNewHook(result, request);
  485. }
  486. return result;
  487. }
  488. void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {
  489. RAW_CHECK(arena != 0, "must pass a valid arena");
  490. void *result = DoAllocWithArena(request, arena);
  491. if ((arena->flags & kCallMallocHook) != 0) {
  492. // this call must be directly in the user-called allocator function
  493. // for MallocHook::GetCallerStackTrace to work properly
  494. MallocHook::InvokeNewHook(result, request);
  495. }
  496. return result;
  497. }
  498. LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {
  499. return &default_arena;
  500. }
  501. static DefaultPagesAllocator *default_pages_allocator;
  502. static union {
  503. char chars[sizeof(DefaultPagesAllocator)];
  504. void *ptr;
  505. } debug_pages_allocator_space;
  506. LowLevelAlloc::PagesAllocator *LowLevelAlloc::GetDefaultPagesAllocator(void) {
  507. if (default_pages_allocator) {
  508. return default_pages_allocator;
  509. }
  510. default_pages_allocator = new (debug_pages_allocator_space.chars) DefaultPagesAllocator();
  511. return default_pages_allocator;
  512. }
  513. void *DefaultPagesAllocator::MapPages(int32 flags, size_t size) {
  514. void *new_pages;
  515. if ((flags & LowLevelAlloc::kAsyncSignalSafe) != 0) {
  516. new_pages = MallocHook::UnhookedMMap(0, size,
  517. PROT_WRITE|PROT_READ,
  518. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  519. } else {
  520. new_pages = mmap(0, size,
  521. PROT_WRITE|PROT_READ,
  522. MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
  523. }
  524. RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
  525. return new_pages;
  526. }
  527. void DefaultPagesAllocator::UnMapPages(int32 flags, void *region, size_t size) {
  528. int munmap_result;
  529. if ((flags & LowLevelAlloc::kAsyncSignalSafe) == 0) {
  530. munmap_result = munmap(region, size);
  531. } else {
  532. munmap_result = MallocHook::UnhookedMUnmap(region, size);
  533. }
  534. RAW_CHECK(munmap_result == 0,
  535. "LowLevelAlloc::DeleteArena: munmap failed address");
  536. }