page_heap.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. // Copyright (c) 2008, Google Inc.
  3. // All rights reserved.
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. // ---
  31. // Author: Sanjay Ghemawat <opensource@google.com>
  32. #ifndef TCMALLOC_PAGE_HEAP_H_
  33. #define TCMALLOC_PAGE_HEAP_H_
  34. #include <config.h>
  35. #include <stddef.h> // for size_t
  36. #ifdef HAVE_STDINT_H
  37. #include <stdint.h> // for uint64_t, int64_t, uint16_t
  38. #endif
  39. #include <gperftools/malloc_extension.h>
  40. #include "base/basictypes.h"
  41. #include "common.h"
  42. #include "packed-cache-inl.h"
  43. #include "pagemap.h"
  44. #include "span.h"
  45. // We need to dllexport PageHeap just for the unittest. MSVC complains
  46. // that we don't dllexport the PageHeap members, but we don't need to
  47. // test those, so I just suppress this warning.
  48. #ifdef _MSC_VER
  49. #pragma warning(push)
  50. #pragma warning(disable:4251)
  51. #endif
  52. // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
  53. // you're porting to a system where you really can't get a stacktrace.
  54. // Because we control the definition of GetStackTrace, all clients of
  55. // GetStackTrace should #include us rather than stacktrace.h.
  56. #ifdef NO_TCMALLOC_SAMPLES
  57. // We use #define so code compiles even if you #include stacktrace.h somehow.
  58. # define GetStackTrace(stack, depth, skip) (0)
  59. #else
  60. # include <gperftools/stacktrace.h>
  61. #endif
  62. namespace base {
  63. struct MallocRange;
  64. }
  65. namespace tcmalloc {
  66. // -------------------------------------------------------------------------
  67. // Map from page-id to per-page data
  68. // -------------------------------------------------------------------------
  69. // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
  70. // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
  71. // because sometimes the sizeclass is all the information we need.
  72. // Selector class -- general selector uses 3-level map
  73. template <int BITS> class MapSelector {
  74. public:
  75. typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
  76. typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
  77. };
  78. // A two-level map for 32-bit machines
  79. template <> class MapSelector<32> {
  80. public:
  81. typedef TCMalloc_PageMap2<32-kPageShift> Type;
  82. typedef PackedCache<32-kPageShift, uint16_t> CacheType;
  83. };
  84. // -------------------------------------------------------------------------
  85. // Page-level allocator
  86. // * Eager coalescing
  87. //
  88. // Heap for page-level allocation. We allow allocating and freeing a
  89. // contiguous runs of pages (called a "span").
  90. // -------------------------------------------------------------------------
  91. class PERFTOOLS_DLL_DECL PageHeap {
  92. public:
  93. PageHeap();
  94. // Allocate a run of "n" pages. Returns zero if out of memory.
  95. // Caller should not pass "n == 0" -- instead, n should have
  96. // been rounded up already.
  97. Span* New(Length n);
  98. // Delete the span "[p, p+n-1]".
  99. // REQUIRES: span was returned by earlier call to New() and
  100. // has not yet been deleted.
  101. void Delete(Span* span);
  102. // Mark an allocated span as being used for small objects of the
  103. // specified size-class.
  104. // REQUIRES: span was returned by an earlier call to New()
  105. // and has not yet been deleted.
  106. void RegisterSizeClass(Span* span, size_t sc);
  107. // Split an allocated span into two spans: one of length "n" pages
  108. // followed by another span of length "span->length - n" pages.
  109. // Modifies "*span" to point to the first span of length "n" pages.
  110. // Returns a pointer to the second span.
  111. //
  112. // REQUIRES: "0 < n < span->length"
  113. // REQUIRES: span->location == IN_USE
  114. // REQUIRES: span->sizeclass == 0
  115. Span* Split(Span* span, Length n);
  116. // Return the descriptor for the specified page. Returns NULL if
  117. // this PageID was not allocated previously.
  118. inline Span* GetDescriptor(PageID p) const {
  119. return reinterpret_cast<Span*>(pagemap_.get(p));
  120. }
  121. // If this page heap is managing a range with starting page # >= start,
  122. // store info about the range in *r and return true. Else return false.
  123. bool GetNextRange(PageID start, base::MallocRange* r);
  124. // Page heap statistics
  125. struct Stats {
  126. Stats() : system_bytes(0), free_bytes(0), unmapped_bytes(0), committed_bytes(0) {}
  127. uint64_t system_bytes; // Total bytes allocated from system
  128. uint64_t free_bytes; // Total bytes on normal freelists
  129. uint64_t unmapped_bytes; // Total bytes on returned freelists
  130. uint64_t committed_bytes; // Bytes committed, always <= system_bytes_.
  131. };
  132. inline Stats stats() const { return stats_; }
  133. struct SmallSpanStats {
  134. // For each free list of small spans, the length (in spans) of the
  135. // normal and returned free lists for that size.
  136. int64 normal_length[kMaxPages];
  137. int64 returned_length[kMaxPages];
  138. };
  139. void GetSmallSpanStats(SmallSpanStats* result);
  140. // Stats for free large spans (i.e., spans with more than kMaxPages pages).
  141. struct LargeSpanStats {
  142. int64 spans; // Number of such spans
  143. int64 normal_pages; // Combined page length of normal large spans
  144. int64 returned_pages; // Combined page length of unmapped spans
  145. };
  146. void GetLargeSpanStats(LargeSpanStats* result);
  147. bool Check();
  148. // Like Check() but does some more comprehensive checking.
  149. bool CheckExpensive();
  150. bool CheckList(Span* list, Length min_pages, Length max_pages,
  151. int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
  152. // Try to release at least num_pages for reuse by the OS. Returns
  153. // the actual number of pages released, which may be less than
  154. // num_pages if there weren't enough pages to release. The result
  155. // may also be larger than num_pages since page_heap might decide to
  156. // release one large range instead of fragmenting it into two
  157. // smaller released and unreleased ranges.
  158. Length ReleaseAtLeastNPages(Length num_pages);
  159. // Return 0 if we have no information, or else the correct sizeclass for p.
  160. // Reads and writes to pagemap_cache_ do not require locking.
  161. // The entries are 64 bits on 64-bit hardware and 16 bits on
  162. // 32-bit hardware, and we don't mind raciness as long as each read of
  163. // an entry yields a valid entry, not a partially updated entry.
  164. size_t GetSizeClassIfCached(PageID p) const {
  165. return pagemap_cache_.GetOrDefault(p, 0);
  166. }
  167. void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
  168. bool GetAggressiveDecommit(void) {return aggressive_decommit_;}
  169. void SetAggressiveDecommit(bool aggressive_decommit) {
  170. aggressive_decommit_ = aggressive_decommit;
  171. }
  172. private:
  173. // Allocates a big block of memory for the pagemap once we reach more than
  174. // 128MB
  175. static const size_t kPageMapBigAllocationThreshold = 128 << 20;
  176. // Minimum number of pages to fetch from system at a time. Must be
  177. // significantly bigger than kBlockSize to amortize system-call
  178. // overhead, and also to reduce external fragementation. Also, we
  179. // should keep this value big because various incarnations of Linux
  180. // have small limits on the number of mmap() regions per
  181. // address-space.
  182. // REQUIRED: kMinSystemAlloc <= kMaxPages;
  183. static const int kMinSystemAlloc = kMaxPages;
  184. // Never delay scavenging for more than the following number of
  185. // deallocated pages. With 4K pages, this comes to 4GB of
  186. // deallocation.
  187. static const int kMaxReleaseDelay = 1 << 20;
  188. // If there is nothing to release, wait for so many pages before
  189. // scavenging again. With 4K pages, this comes to 1GB of memory.
  190. static const int kDefaultReleaseDelay = 1 << 18;
  191. // Pick the appropriate map and cache types based on pointer size
  192. typedef MapSelector<kAddressBits>::Type PageMap;
  193. typedef MapSelector<kAddressBits>::CacheType PageMapCache;
  194. PageMap pagemap_;
  195. mutable PageMapCache pagemap_cache_;
  196. // We segregate spans of a given size into two circular linked
  197. // lists: one for normal spans, and one for spans whose memory
  198. // has been returned to the system.
  199. struct SpanList {
  200. Span normal;
  201. Span returned;
  202. };
  203. // List of free spans of length >= kMaxPages
  204. SpanList large_;
  205. // Array mapping from span length to a doubly linked list of free spans
  206. SpanList free_[kMaxPages];
  207. // Statistics on system, free, and unmapped bytes
  208. Stats stats_;
  209. Span* SearchFreeAndLargeLists(Length n);
  210. bool GrowHeap(Length n);
  211. // REQUIRES: span->length >= n
  212. // REQUIRES: span->location != IN_USE
  213. // Remove span from its free list, and move any leftover part of
  214. // span into appropriate free lists. Also update "span" to have
  215. // length exactly "n" and mark it as non-free so it can be returned
  216. // to the client. After all that, decrease free_pages_ by n and
  217. // return span.
  218. Span* Carve(Span* span, Length n);
  219. void RecordSpan(Span* span) {
  220. pagemap_.set(span->start, span);
  221. if (span->length > 1) {
  222. pagemap_.set(span->start + span->length - 1, span);
  223. }
  224. }
  225. // Allocate a large span of length == n. If successful, returns a
  226. // span of exactly the specified length. Else, returns NULL.
  227. Span* AllocLarge(Length n);
  228. // Coalesce span with neighboring spans if possible, prepend to
  229. // appropriate free list, and adjust stats.
  230. void MergeIntoFreeList(Span* span);
  231. // Commit the span.
  232. void CommitSpan(Span* span);
  233. // Decommit the span.
  234. bool DecommitSpan(Span* span);
  235. // Prepends span to appropriate free list, and adjusts stats.
  236. void PrependToFreeList(Span* span);
  237. // Removes span from its free list, and adjust stats.
  238. void RemoveFromFreeList(Span* span);
  239. // Incrementally release some memory to the system.
  240. // IncrementalScavenge(n) is called whenever n pages are freed.
  241. void IncrementalScavenge(Length n);
  242. // Release the last span on the normal portion of this list.
  243. // Return the length of that span or zero if release failed.
  244. Length ReleaseLastNormalSpan(SpanList* slist);
  245. // Checks if we are allowed to take more memory from the system.
  246. // If limit is reached and allowRelease is true, tries to release
  247. // some unused spans.
  248. bool EnsureLimit(Length n, bool allowRelease = true);
  249. bool MayMergeSpans(Span *span, Span *other);
  250. // Number of pages to deallocate before doing more scavenging
  251. int64_t scavenge_counter_;
  252. // Index of last free list where we released memory to the OS.
  253. int release_index_;
  254. bool aggressive_decommit_;
  255. };
  256. } // namespace tcmalloc
  257. #ifdef _MSC_VER
  258. #pragma warning(pop)
  259. #endif
  260. #endif // TCMALLOC_PAGE_HEAP_H_