page_heap.cc 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. // Copyright (c) 2008, Google Inc.
  3. // All rights reserved.
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. // ---
  31. // Author: Sanjay Ghemawat <opensource@google.com>
  32. #include <config.h>
  33. #ifdef HAVE_INTTYPES_H
  34. #include <inttypes.h> // for PRIuPTR
  35. #endif
  36. #include <errno.h> // for ENOMEM, errno
  37. #include <gperftools/malloc_extension.h> // for MallocRange, etc
  38. #include "base/basictypes.h"
  39. #include "base/commandlineflags.h"
  40. #include "internal_logging.h" // for ASSERT, TCMalloc_Printer, etc
  41. #include "page_heap_allocator.h" // for PageHeapAllocator
  42. #include "static_vars.h" // for Static
  43. #include "system-alloc.h" // for TCMalloc_SystemAlloc, etc
  44. DEFINE_double(tcmalloc_release_rate,
  45. EnvToDouble("TCMALLOC_RELEASE_RATE", 1.0),
  46. "Rate at which we release unused memory to the system. "
  47. "Zero means we never release memory back to the system. "
  48. "Increase this flag to return memory faster; decrease it "
  49. "to return memory slower. Reasonable rates are in the "
  50. "range [0,10]");
  51. DEFINE_int64(tcmalloc_heap_limit_mb,
  52. EnvToInt("TCMALLOC_HEAP_LIMIT_MB", 0),
  53. "Limit total size of the process heap to the "
  54. "specified number of MiB. "
  55. "When we approach the limit the memory is released "
  56. "to the system more aggressively (more minor page faults). "
  57. "Zero means to allocate as long as system allows.");
  58. namespace tcmalloc {
  59. PageHeap::PageHeap()
  60. : pagemap_(MetaDataAlloc),
  61. pagemap_cache_(0),
  62. scavenge_counter_(0),
  63. // Start scavenging at kMaxPages list
  64. release_index_(kMaxPages),
  65. aggressive_decommit_(false) {
  66. COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
  67. DLL_Init(&large_.normal);
  68. DLL_Init(&large_.returned);
  69. for (int i = 0; i < kMaxPages; i++) {
  70. DLL_Init(&free_[i].normal);
  71. DLL_Init(&free_[i].returned);
  72. }
  73. }
  74. Span* PageHeap::SearchFreeAndLargeLists(Length n) {
  75. ASSERT(Check());
  76. ASSERT(n > 0);
  77. // Find first size >= n that has a non-empty list
  78. for (Length s = n; s < kMaxPages; s++) {
  79. Span* ll = &free_[s].normal;
  80. // If we're lucky, ll is non-empty, meaning it has a suitable span.
  81. if (!DLL_IsEmpty(ll)) {
  82. ASSERT(ll->next->location == Span::ON_NORMAL_FREELIST);
  83. return Carve(ll->next, n);
  84. }
  85. // Alternatively, maybe there's a usable returned span.
  86. ll = &free_[s].returned;
  87. if (!DLL_IsEmpty(ll)) {
  88. // We did not call EnsureLimit before, to avoid releasing the span
  89. // that will be taken immediately back.
  90. // Calling EnsureLimit here is not very expensive, as it fails only if
  91. // there is no more normal spans (and it fails efficiently)
  92. // or SystemRelease does not work (there is probably no returned spans).
  93. if (EnsureLimit(n)) {
  94. // ll may have became empty due to coalescing
  95. if (!DLL_IsEmpty(ll)) {
  96. ASSERT(ll->next->location == Span::ON_RETURNED_FREELIST);
  97. return Carve(ll->next, n);
  98. }
  99. }
  100. }
  101. }
  102. // No luck in free lists, our last chance is in a larger class.
  103. return AllocLarge(n); // May be NULL
  104. }
  105. static const size_t kForcedCoalesceInterval = 128*1024*1024;
  106. Span* PageHeap::New(Length n) {
  107. ASSERT(Check());
  108. ASSERT(n > 0);
  109. Span* result = SearchFreeAndLargeLists(n);
  110. if (result != NULL)
  111. return result;
  112. if (stats_.free_bytes != 0 && stats_.unmapped_bytes != 0
  113. && stats_.free_bytes + stats_.unmapped_bytes >= stats_.system_bytes / 4
  114. && (stats_.system_bytes / kForcedCoalesceInterval
  115. != (stats_.system_bytes + (n << kPageShift)) / kForcedCoalesceInterval)) {
  116. // We're about to grow heap, but there are lots of free pages.
  117. // tcmalloc's design decision to keep unmapped and free spans
  118. // separately and never coalesce them means that sometimes there
  119. // can be free pages span of sufficient size, but it consists of
  120. // "segments" of different type so page heap search cannot find
  121. // it. In order to prevent growing heap and wasting memory in such
  122. // case we're going to unmap all free pages. So that all free
  123. // spans are maximally coalesced.
  124. //
  125. // We're also limiting 'rate' of going into this path to be at
  126. // most once per 128 megs of heap growth. Otherwise programs that
  127. // grow heap frequently (and that means by small amount) could be
  128. // penalized with higher count of minor page faults.
  129. //
  130. // See also large_heap_fragmentation_unittest.cc and
  131. // https://code.google.com/p/gperftools/issues/detail?id=368
  132. ReleaseAtLeastNPages(static_cast<Length>(0x7fffffff));
  133. // then try again. If we are forced to grow heap because of large
  134. // spans fragmentation and not because of problem described above,
  135. // then at the very least we've just unmapped free but
  136. // insufficiently big large spans back to OS. So in case of really
  137. // unlucky memory fragmentation we'll be consuming virtual address
  138. // space, but not real memory
  139. result = SearchFreeAndLargeLists(n);
  140. if (result != NULL) return result;
  141. }
  142. // Grow the heap and try again.
  143. if (!GrowHeap(n)) {
  144. ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
  145. ASSERT(Check());
  146. // underlying SysAllocator likely set ENOMEM but we can get here
  147. // due to EnsureLimit so we set it here too.
  148. //
  149. // Setting errno to ENOMEM here allows us to avoid dealing with it
  150. // in fast-path.
  151. errno = ENOMEM;
  152. return NULL;
  153. }
  154. return SearchFreeAndLargeLists(n);
  155. }
  156. Span* PageHeap::AllocLarge(Length n) {
  157. // find the best span (closest to n in size).
  158. // The following loops implements address-ordered best-fit.
  159. Span *best = NULL;
  160. // Search through normal list
  161. for (Span* span = large_.normal.next;
  162. span != &large_.normal;
  163. span = span->next) {
  164. if (span->length >= n) {
  165. if ((best == NULL)
  166. || (span->length < best->length)
  167. || ((span->length == best->length) && (span->start < best->start))) {
  168. best = span;
  169. ASSERT(best->location == Span::ON_NORMAL_FREELIST);
  170. }
  171. }
  172. }
  173. Span *bestNormal = best;
  174. // Search through released list in case it has a better fit
  175. for (Span* span = large_.returned.next;
  176. span != &large_.returned;
  177. span = span->next) {
  178. if (span->length >= n) {
  179. if ((best == NULL)
  180. || (span->length < best->length)
  181. || ((span->length == best->length) && (span->start < best->start))) {
  182. best = span;
  183. ASSERT(best->location == Span::ON_RETURNED_FREELIST);
  184. }
  185. }
  186. }
  187. if (best == bestNormal) {
  188. return best == NULL ? NULL : Carve(best, n);
  189. }
  190. // best comes from returned list.
  191. if (EnsureLimit(n, false)) {
  192. return Carve(best, n);
  193. }
  194. if (EnsureLimit(n, true)) {
  195. // best could have been destroyed by coalescing.
  196. // bestNormal is not a best-fit, and it could be destroyed as well.
  197. // We retry, the limit is already ensured:
  198. return AllocLarge(n);
  199. }
  200. // If bestNormal existed, EnsureLimit would succeeded:
  201. ASSERT(bestNormal == NULL);
  202. // We are not allowed to take best from returned list.
  203. return NULL;
  204. }
  205. Span* PageHeap::Split(Span* span, Length n) {
  206. ASSERT(0 < n);
  207. ASSERT(n < span->length);
  208. ASSERT(span->location == Span::IN_USE);
  209. ASSERT(span->sizeclass == 0);
  210. Event(span, 'T', n);
  211. const int extra = span->length - n;
  212. Span* leftover = NewSpan(span->start + n, extra);
  213. ASSERT(leftover->location == Span::IN_USE);
  214. Event(leftover, 'U', extra);
  215. RecordSpan(leftover);
  216. pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
  217. span->length = n;
  218. return leftover;
  219. }
  220. void PageHeap::CommitSpan(Span* span) {
  221. TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift),
  222. static_cast<size_t>(span->length << kPageShift));
  223. stats_.committed_bytes += span->length << kPageShift;
  224. }
  225. bool PageHeap::DecommitSpan(Span* span) {
  226. bool rv = TCMalloc_SystemRelease(reinterpret_cast<void*>(span->start << kPageShift),
  227. static_cast<size_t>(span->length << kPageShift));
  228. if (rv) {
  229. stats_.committed_bytes -= span->length << kPageShift;
  230. }
  231. return rv;
  232. }
  233. Span* PageHeap::Carve(Span* span, Length n) {
  234. ASSERT(n > 0);
  235. ASSERT(span->location != Span::IN_USE);
  236. const int old_location = span->location;
  237. RemoveFromFreeList(span);
  238. span->location = Span::IN_USE;
  239. Event(span, 'A', n);
  240. const int extra = span->length - n;
  241. ASSERT(extra >= 0);
  242. if (extra > 0) {
  243. Span* leftover = NewSpan(span->start + n, extra);
  244. leftover->location = old_location;
  245. Event(leftover, 'S', extra);
  246. RecordSpan(leftover);
  247. // The previous span of |leftover| was just splitted -- no need to
  248. // coalesce them. The next span of |leftover| was not previously coalesced
  249. // with |span|, i.e. is NULL or has got location other than |old_location|.
  250. #ifndef NDEBUG
  251. const PageID p = leftover->start;
  252. const Length len = leftover->length;
  253. Span* next = GetDescriptor(p+len);
  254. ASSERT (next == NULL ||
  255. next->location == Span::IN_USE ||
  256. next->location != leftover->location);
  257. #endif
  258. PrependToFreeList(leftover); // Skip coalescing - no candidates possible
  259. span->length = n;
  260. pagemap_.set(span->start + n - 1, span);
  261. }
  262. ASSERT(Check());
  263. if (old_location == Span::ON_RETURNED_FREELIST) {
  264. // We need to recommit this address space.
  265. CommitSpan(span);
  266. }
  267. ASSERT(span->location == Span::IN_USE);
  268. ASSERT(span->length == n);
  269. ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
  270. return span;
  271. }
  272. void PageHeap::Delete(Span* span) {
  273. ASSERT(Check());
  274. ASSERT(span->location == Span::IN_USE);
  275. ASSERT(span->length > 0);
  276. ASSERT(GetDescriptor(span->start) == span);
  277. ASSERT(GetDescriptor(span->start + span->length - 1) == span);
  278. const Length n = span->length;
  279. span->sizeclass = 0;
  280. span->sample = 0;
  281. span->location = Span::ON_NORMAL_FREELIST;
  282. Event(span, 'D', span->length);
  283. MergeIntoFreeList(span); // Coalesces if possible
  284. IncrementalScavenge(n);
  285. ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
  286. ASSERT(Check());
  287. }
  288. bool PageHeap::MayMergeSpans(Span *span, Span *other) {
  289. if (aggressive_decommit_) {
  290. return other->location != Span::IN_USE;
  291. }
  292. return span->location == other->location;
  293. }
  294. void PageHeap::MergeIntoFreeList(Span* span) {
  295. ASSERT(span->location != Span::IN_USE);
  296. // Coalesce -- we guarantee that "p" != 0, so no bounds checking
  297. // necessary. We do not bother resetting the stale pagemap
  298. // entries for the pieces we are merging together because we only
  299. // care about the pagemap entries for the boundaries.
  300. //
  301. // Note: depending on aggressive_decommit_ mode we allow only
  302. // similar spans to be coalesced.
  303. //
  304. // The following applies if aggressive_decommit_ is enabled:
  305. //
  306. // Note that the adjacent spans we merge into "span" may come out of a
  307. // "normal" (committed) list, and cleanly merge with our IN_USE span, which
  308. // is implicitly committed. If the adjacents spans are on the "returned"
  309. // (decommitted) list, then we must get both spans into the same state before
  310. // or after we coalesce them. The current code always decomits. This is
  311. // achieved by blindly decommitting the entire coalesced region, which may
  312. // include any combination of committed and decommitted spans, at the end of
  313. // the method.
  314. // TODO(jar): "Always decommit" causes some extra calls to commit when we are
  315. // called in GrowHeap() during an allocation :-/. We need to eval the cost of
  316. // that oscillation, and possibly do something to reduce it.
  317. // TODO(jar): We need a better strategy for deciding to commit, or decommit,
  318. // based on memory usage and free heap sizes.
  319. uint64_t temp_committed = 0;
  320. const PageID p = span->start;
  321. const Length n = span->length;
  322. Span* prev = GetDescriptor(p-1);
  323. if (prev != NULL && MayMergeSpans(span, prev)) {
  324. // Merge preceding span into this span
  325. ASSERT(prev->start + prev->length == p);
  326. const Length len = prev->length;
  327. if (aggressive_decommit_ && prev->location == Span::ON_RETURNED_FREELIST) {
  328. // We're about to put the merge span into the returned freelist and call
  329. // DecommitSpan() on it, which will mark the entire span including this
  330. // one as released and decrease stats_.committed_bytes by the size of the
  331. // merged span. To make the math work out we temporarily increase the
  332. // stats_.committed_bytes amount.
  333. temp_committed = prev->length << kPageShift;
  334. }
  335. RemoveFromFreeList(prev);
  336. DeleteSpan(prev);
  337. span->start -= len;
  338. span->length += len;
  339. pagemap_.set(span->start, span);
  340. Event(span, 'L', len);
  341. }
  342. Span* next = GetDescriptor(p+n);
  343. if (next != NULL && MayMergeSpans(span, next)) {
  344. // Merge next span into this span
  345. ASSERT(next->start == p+n);
  346. const Length len = next->length;
  347. if (aggressive_decommit_ && next->location == Span::ON_RETURNED_FREELIST) {
  348. // See the comment below 'if (prev->location ...' for explanation.
  349. temp_committed += next->length << kPageShift;
  350. }
  351. RemoveFromFreeList(next);
  352. DeleteSpan(next);
  353. span->length += len;
  354. pagemap_.set(span->start + span->length - 1, span);
  355. Event(span, 'R', len);
  356. }
  357. if (aggressive_decommit_) {
  358. if (DecommitSpan(span)) {
  359. span->location = Span::ON_RETURNED_FREELIST;
  360. stats_.committed_bytes += temp_committed;
  361. } else {
  362. ASSERT(temp_committed == 0);
  363. }
  364. }
  365. PrependToFreeList(span);
  366. }
  367. void PageHeap::PrependToFreeList(Span* span) {
  368. ASSERT(span->location != Span::IN_USE);
  369. SpanList* list = (span->length < kMaxPages) ? &free_[span->length] : &large_;
  370. if (span->location == Span::ON_NORMAL_FREELIST) {
  371. stats_.free_bytes += (span->length << kPageShift);
  372. DLL_Prepend(&list->normal, span);
  373. } else {
  374. stats_.unmapped_bytes += (span->length << kPageShift);
  375. DLL_Prepend(&list->returned, span);
  376. }
  377. }
  378. void PageHeap::RemoveFromFreeList(Span* span) {
  379. ASSERT(span->location != Span::IN_USE);
  380. if (span->location == Span::ON_NORMAL_FREELIST) {
  381. stats_.free_bytes -= (span->length << kPageShift);
  382. } else {
  383. stats_.unmapped_bytes -= (span->length << kPageShift);
  384. }
  385. DLL_Remove(span);
  386. }
  387. void PageHeap::IncrementalScavenge(Length n) {
  388. // Fast path; not yet time to release memory
  389. scavenge_counter_ -= n;
  390. if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
  391. const double rate = FLAGS_tcmalloc_release_rate;
  392. if (rate <= 1e-6) {
  393. // Tiny release rate means that releasing is disabled.
  394. scavenge_counter_ = kDefaultReleaseDelay;
  395. return;
  396. }
  397. Length released_pages = ReleaseAtLeastNPages(1);
  398. if (released_pages == 0) {
  399. // Nothing to scavenge, delay for a while.
  400. scavenge_counter_ = kDefaultReleaseDelay;
  401. } else {
  402. // Compute how long to wait until we return memory.
  403. // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
  404. // after releasing one page.
  405. const double mult = 1000.0 / rate;
  406. double wait = mult * static_cast<double>(released_pages);
  407. if (wait > kMaxReleaseDelay) {
  408. // Avoid overflow and bound to reasonable range.
  409. wait = kMaxReleaseDelay;
  410. }
  411. scavenge_counter_ = static_cast<int64_t>(wait);
  412. }
  413. }
  414. Length PageHeap::ReleaseLastNormalSpan(SpanList* slist) {
  415. Span* s = slist->normal.prev;
  416. ASSERT(s->location == Span::ON_NORMAL_FREELIST);
  417. if (DecommitSpan(s)) {
  418. RemoveFromFreeList(s);
  419. const Length n = s->length;
  420. s->location = Span::ON_RETURNED_FREELIST;
  421. MergeIntoFreeList(s); // Coalesces if possible.
  422. return n;
  423. }
  424. return 0;
  425. }
  426. Length PageHeap::ReleaseAtLeastNPages(Length num_pages) {
  427. Length released_pages = 0;
  428. // Round robin through the lists of free spans, releasing the last
  429. // span in each list. Stop after releasing at least num_pages
  430. // or when there is nothing more to release.
  431. while (released_pages < num_pages && stats_.free_bytes > 0) {
  432. for (int i = 0; i < kMaxPages+1 && released_pages < num_pages;
  433. i++, release_index_++) {
  434. if (release_index_ > kMaxPages) release_index_ = 0;
  435. SpanList* slist = (release_index_ == kMaxPages) ?
  436. &large_ : &free_[release_index_];
  437. if (!DLL_IsEmpty(&slist->normal)) {
  438. Length released_len = ReleaseLastNormalSpan(slist);
  439. // Some systems do not support release
  440. if (released_len == 0) return released_pages;
  441. released_pages += released_len;
  442. }
  443. }
  444. }
  445. return released_pages;
  446. }
  447. bool PageHeap::EnsureLimit(Length n, bool withRelease)
  448. {
  449. Length limit = (FLAGS_tcmalloc_heap_limit_mb*1024*1024) >> kPageShift;
  450. if (limit == 0) return true; //there is no limit
  451. // We do not use stats_.system_bytes because it does not take
  452. // MetaDataAllocs into account.
  453. Length takenPages = TCMalloc_SystemTaken >> kPageShift;
  454. //XXX takenPages may be slightly bigger than limit for two reasons:
  455. //* MetaDataAllocs ignore the limit (it is not easy to handle
  456. // out of memory there)
  457. //* sys_alloc may round allocation up to huge page size,
  458. // although smaller limit was ensured
  459. ASSERT(takenPages >= stats_.unmapped_bytes >> kPageShift);
  460. takenPages -= stats_.unmapped_bytes >> kPageShift;
  461. if (takenPages + n > limit && withRelease) {
  462. takenPages -= ReleaseAtLeastNPages(takenPages + n - limit);
  463. }
  464. return takenPages + n <= limit;
  465. }
  466. void PageHeap::RegisterSizeClass(Span* span, size_t sc) {
  467. // Associate span object with all interior pages as well
  468. ASSERT(span->location == Span::IN_USE);
  469. ASSERT(GetDescriptor(span->start) == span);
  470. ASSERT(GetDescriptor(span->start+span->length-1) == span);
  471. Event(span, 'C', sc);
  472. span->sizeclass = sc;
  473. for (Length i = 1; i < span->length-1; i++) {
  474. pagemap_.set(span->start+i, span);
  475. }
  476. }
  477. void PageHeap::GetSmallSpanStats(SmallSpanStats* result) {
  478. for (int s = 0; s < kMaxPages; s++) {
  479. result->normal_length[s] = DLL_Length(&free_[s].normal);
  480. result->returned_length[s] = DLL_Length(&free_[s].returned);
  481. }
  482. }
  483. void PageHeap::GetLargeSpanStats(LargeSpanStats* result) {
  484. result->spans = 0;
  485. result->normal_pages = 0;
  486. result->returned_pages = 0;
  487. for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
  488. result->normal_pages += s->length;;
  489. result->spans++;
  490. }
  491. for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
  492. result->returned_pages += s->length;
  493. result->spans++;
  494. }
  495. }
  496. bool PageHeap::GetNextRange(PageID start, base::MallocRange* r) {
  497. Span* span = reinterpret_cast<Span*>(pagemap_.Next(start));
  498. if (span == NULL) {
  499. return false;
  500. }
  501. r->address = span->start << kPageShift;
  502. r->length = span->length << kPageShift;
  503. r->fraction = 0;
  504. switch (span->location) {
  505. case Span::IN_USE:
  506. r->type = base::MallocRange::INUSE;
  507. r->fraction = 1;
  508. if (span->sizeclass > 0) {
  509. // Only some of the objects in this span may be in use.
  510. const size_t osize = Static::sizemap()->class_to_size(span->sizeclass);
  511. r->fraction = (1.0 * osize * span->refcount) / r->length;
  512. }
  513. break;
  514. case Span::ON_NORMAL_FREELIST:
  515. r->type = base::MallocRange::FREE;
  516. break;
  517. case Span::ON_RETURNED_FREELIST:
  518. r->type = base::MallocRange::UNMAPPED;
  519. break;
  520. default:
  521. r->type = base::MallocRange::UNKNOWN;
  522. break;
  523. }
  524. return true;
  525. }
  526. static void RecordGrowth(size_t growth) {
  527. StackTrace* t = Static::stacktrace_allocator()->New();
  528. t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
  529. t->size = growth;
  530. t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(Static::growth_stacks());
  531. Static::set_growth_stacks(t);
  532. }
  533. bool PageHeap::GrowHeap(Length n) {
  534. ASSERT(kMaxPages >= kMinSystemAlloc);
  535. if (n > kMaxValidPages) return false;
  536. Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
  537. size_t actual_size;
  538. void* ptr = NULL;
  539. if (EnsureLimit(ask)) {
  540. ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
  541. }
  542. if (ptr == NULL) {
  543. if (n < ask) {
  544. // Try growing just "n" pages
  545. ask = n;
  546. if (EnsureLimit(ask)) {
  547. ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
  548. }
  549. }
  550. if (ptr == NULL) return false;
  551. }
  552. ask = actual_size >> kPageShift;
  553. RecordGrowth(ask << kPageShift);
  554. uint64_t old_system_bytes = stats_.system_bytes;
  555. stats_.system_bytes += (ask << kPageShift);
  556. stats_.committed_bytes += (ask << kPageShift);
  557. const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
  558. ASSERT(p > 0);
  559. // If we have already a lot of pages allocated, just pre allocate a bunch of
  560. // memory for the page map. This prevents fragmentation by pagemap metadata
  561. // when a program keeps allocating and freeing large blocks.
  562. if (old_system_bytes < kPageMapBigAllocationThreshold
  563. && stats_.system_bytes >= kPageMapBigAllocationThreshold) {
  564. pagemap_.PreallocateMoreMemory();
  565. }
  566. // Make sure pagemap_ has entries for all of the new pages.
  567. // Plus ensure one before and one after so coalescing code
  568. // does not need bounds-checking.
  569. if (pagemap_.Ensure(p-1, ask+2)) {
  570. // Pretend the new area is allocated and then Delete() it to cause
  571. // any necessary coalescing to occur.
  572. Span* span = NewSpan(p, ask);
  573. RecordSpan(span);
  574. Delete(span);
  575. ASSERT(stats_.unmapped_bytes+ stats_.committed_bytes==stats_.system_bytes);
  576. ASSERT(Check());
  577. return true;
  578. } else {
  579. // We could not allocate memory within "pagemap_"
  580. // TODO: Once we can return memory to the system, return the new span
  581. return false;
  582. }
  583. }
  584. bool PageHeap::Check() {
  585. ASSERT(free_[0].normal.next == &free_[0].normal);
  586. ASSERT(free_[0].returned.next == &free_[0].returned);
  587. return true;
  588. }
  589. bool PageHeap::CheckExpensive() {
  590. bool result = Check();
  591. CheckList(&large_.normal, kMaxPages, 1000000000, Span::ON_NORMAL_FREELIST);
  592. CheckList(&large_.returned, kMaxPages, 1000000000, Span::ON_RETURNED_FREELIST);
  593. for (Length s = 1; s < kMaxPages; s++) {
  594. CheckList(&free_[s].normal, s, s, Span::ON_NORMAL_FREELIST);
  595. CheckList(&free_[s].returned, s, s, Span::ON_RETURNED_FREELIST);
  596. }
  597. return result;
  598. }
  599. bool PageHeap::CheckList(Span* list, Length min_pages, Length max_pages,
  600. int freelist) {
  601. for (Span* s = list->next; s != list; s = s->next) {
  602. CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
  603. CHECK_CONDITION(s->length >= min_pages);
  604. CHECK_CONDITION(s->length <= max_pages);
  605. CHECK_CONDITION(GetDescriptor(s->start) == s);
  606. CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
  607. }
  608. return true;
  609. }
  610. } // namespace tcmalloc