heap-profile-table.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. // Copyright (c) 2006, Google Inc.
  3. // All rights reserved.
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. // ---
  31. // Author: Sanjay Ghemawat
  32. // Maxim Lifantsev (refactoring)
  33. //
  34. #ifndef BASE_HEAP_PROFILE_TABLE_H_
  35. #define BASE_HEAP_PROFILE_TABLE_H_
  36. #include "addressmap-inl.h"
  37. #include "base/basictypes.h"
  38. #include "base/logging.h" // for RawFD
  39. #include "heap-profile-stats.h"
  40. // Table to maintain a heap profile data inside,
  41. // i.e. the set of currently active heap memory allocations.
  42. // thread-unsafe and non-reentrant code:
  43. // each instance object must be used by one thread
  44. // at a time w/o self-recursion.
  45. //
  46. // TODO(maxim): add a unittest for this class.
  47. class HeapProfileTable {
  48. public:
  49. // Extension to be used for heap pforile files.
  50. static const char kFileExt[];
  51. // Longest stack trace we record.
  52. static const int kMaxStackDepth = 32;
  53. // data types ----------------------------
  54. // Profile stats.
  55. typedef HeapProfileStats Stats;
  56. // Info we can return about an allocation.
  57. struct AllocInfo {
  58. size_t object_size; // size of the allocation
  59. const void* const* call_stack; // call stack that made the allocation call
  60. int stack_depth; // depth of call_stack
  61. bool live;
  62. bool ignored;
  63. };
  64. // Info we return about an allocation context.
  65. // An allocation context is a unique caller stack trace
  66. // of an allocation operation.
  67. struct AllocContextInfo : public Stats {
  68. int stack_depth; // Depth of stack trace
  69. const void* const* call_stack; // Stack trace
  70. };
  71. // Memory (de)allocator interface we'll use.
  72. typedef void* (*Allocator)(size_t size);
  73. typedef void (*DeAllocator)(void* ptr);
  74. // interface ---------------------------
  75. HeapProfileTable(Allocator alloc, DeAllocator dealloc, bool profile_mmap);
  76. ~HeapProfileTable();
  77. // Collect the stack trace for the function that asked to do the
  78. // allocation for passing to RecordAlloc() below.
  79. //
  80. // The stack trace is stored in 'stack'. The stack depth is returned.
  81. //
  82. // 'skip_count' gives the number of stack frames between this call
  83. // and the memory allocation function.
  84. static int GetCallerStackTrace(int skip_count, void* stack[kMaxStackDepth]);
  85. // Record an allocation at 'ptr' of 'bytes' bytes. 'stack_depth'
  86. // and 'call_stack' identifying the function that requested the
  87. // allocation. They can be generated using GetCallerStackTrace() above.
  88. void RecordAlloc(const void* ptr, size_t bytes,
  89. int stack_depth, const void* const call_stack[]);
  90. // Record the deallocation of memory at 'ptr'.
  91. void RecordFree(const void* ptr);
  92. // Return true iff we have recorded an allocation at 'ptr'.
  93. // If yes, fill *object_size with the allocation byte size.
  94. bool FindAlloc(const void* ptr, size_t* object_size) const;
  95. // Same as FindAlloc, but fills all of *info.
  96. bool FindAllocDetails(const void* ptr, AllocInfo* info) const;
  97. // Return true iff "ptr" points into a recorded allocation
  98. // If yes, fill *object_ptr with the actual allocation address
  99. // and *object_size with the allocation byte size.
  100. // max_size specifies largest currently possible allocation size.
  101. bool FindInsideAlloc(const void* ptr, size_t max_size,
  102. const void** object_ptr, size_t* object_size) const;
  103. // If "ptr" points to a recorded allocation and it's not marked as live
  104. // mark it as live and return true. Else return false.
  105. // All allocations start as non-live.
  106. bool MarkAsLive(const void* ptr);
  107. // If "ptr" points to a recorded allocation, mark it as "ignored".
  108. // Ignored objects are treated like other objects, except that they
  109. // are skipped in heap checking reports.
  110. void MarkAsIgnored(const void* ptr);
  111. // Return current total (de)allocation statistics. It doesn't contain
  112. // mmap'ed regions.
  113. const Stats& total() const { return total_; }
  114. // Allocation data iteration callback: gets passed object pointer and
  115. // fully-filled AllocInfo.
  116. typedef void (*AllocIterator)(const void* ptr, const AllocInfo& info);
  117. // Iterate over the allocation profile data calling "callback"
  118. // for every allocation.
  119. void IterateAllocs(AllocIterator callback) const {
  120. address_map_->Iterate(MapArgsAllocIterator, callback);
  121. }
  122. // Allocation context profile data iteration callback
  123. typedef void (*AllocContextIterator)(const AllocContextInfo& info);
  124. // Iterate over the allocation context profile data calling "callback"
  125. // for every allocation context. Allocation contexts are ordered by the
  126. // size of allocated space.
  127. void IterateOrderedAllocContexts(AllocContextIterator callback) const;
  128. // Fill profile data into buffer 'buf' of size 'size'
  129. // and return the actual size occupied by the dump in 'buf'.
  130. // The profile buckets are dumped in the decreasing order
  131. // of currently allocated bytes.
  132. // We do not provision for 0-terminating 'buf'.
  133. int FillOrderedProfile(char buf[], int size) const;
  134. // Cleanup any old profile files matching prefix + ".*" + kFileExt.
  135. static void CleanupOldProfiles(const char* prefix);
  136. // Return a snapshot of the current contents of *this.
  137. // Caller must call ReleaseSnapshot() on result when no longer needed.
  138. // The result is only valid while this exists and until
  139. // the snapshot is discarded by calling ReleaseSnapshot().
  140. class Snapshot;
  141. Snapshot* TakeSnapshot();
  142. // Release a previously taken snapshot. snapshot must not
  143. // be used after this call.
  144. void ReleaseSnapshot(Snapshot* snapshot);
  145. // Return a snapshot of every non-live, non-ignored object in *this.
  146. // If "base" is non-NULL, skip any objects present in "base".
  147. // As a side-effect, clears the "live" bit on every live object in *this.
  148. // Caller must call ReleaseSnapshot() on result when no longer needed.
  149. Snapshot* NonLiveSnapshot(Snapshot* base);
  150. private:
  151. // data types ----------------------------
  152. // Hash table bucket to hold (de)allocation stats
  153. // for a given allocation call stack trace.
  154. typedef HeapProfileBucket Bucket;
  155. // Info stored in the address map
  156. struct AllocValue {
  157. // Access to the stack-trace bucket
  158. Bucket* bucket() const {
  159. return reinterpret_cast<Bucket*>(bucket_rep & ~uintptr_t(kMask));
  160. }
  161. // This also does set_live(false).
  162. void set_bucket(Bucket* b) { bucket_rep = reinterpret_cast<uintptr_t>(b); }
  163. size_t bytes; // Number of bytes in this allocation
  164. // Access to the allocation liveness flag (for leak checking)
  165. bool live() const { return bucket_rep & kLive; }
  166. void set_live(bool l) {
  167. bucket_rep = (bucket_rep & ~uintptr_t(kLive)) | (l ? kLive : 0);
  168. }
  169. // Should this allocation be ignored if it looks like a leak?
  170. bool ignore() const { return bucket_rep & kIgnore; }
  171. void set_ignore(bool r) {
  172. bucket_rep = (bucket_rep & ~uintptr_t(kIgnore)) | (r ? kIgnore : 0);
  173. }
  174. private:
  175. // We store a few bits in the bottom bits of bucket_rep.
  176. // (Alignment is at least four, so we have at least two bits.)
  177. static const int kLive = 1;
  178. static const int kIgnore = 2;
  179. static const int kMask = kLive | kIgnore;
  180. uintptr_t bucket_rep;
  181. };
  182. // helper for FindInsideAlloc
  183. static size_t AllocValueSize(const AllocValue& v) { return v.bytes; }
  184. typedef AddressMap<AllocValue> AllocationMap;
  185. // Arguments that need to be passed DumpBucketIterator callback below.
  186. struct BufferArgs {
  187. BufferArgs(char* buf_arg, int buflen_arg, int bufsize_arg)
  188. : buf(buf_arg),
  189. buflen(buflen_arg),
  190. bufsize(bufsize_arg) {
  191. }
  192. char* buf;
  193. int buflen;
  194. int bufsize;
  195. DISALLOW_COPY_AND_ASSIGN(BufferArgs);
  196. };
  197. // Arguments that need to be passed DumpNonLiveIterator callback below.
  198. struct DumpArgs {
  199. DumpArgs(RawFD fd_arg, Stats* profile_stats_arg)
  200. : fd(fd_arg),
  201. profile_stats(profile_stats_arg) {
  202. }
  203. RawFD fd; // file to write to
  204. Stats* profile_stats; // stats to update (may be NULL)
  205. };
  206. // helpers ----------------------------
  207. // Unparse bucket b and print its portion of profile dump into buf.
  208. // We return the amount of space in buf that we use. We start printing
  209. // at buf + buflen, and promise not to go beyond buf + bufsize.
  210. // We do not provision for 0-terminating 'buf'.
  211. //
  212. // If profile_stats is non-NULL, we update *profile_stats by
  213. // counting bucket b.
  214. //
  215. // "extra" is appended to the unparsed bucket. Typically it is empty,
  216. // but may be set to something like " heapprofile" for the total
  217. // bucket to indicate the type of the profile.
  218. static int UnparseBucket(const Bucket& b,
  219. char* buf, int buflen, int bufsize,
  220. const char* extra,
  221. Stats* profile_stats);
  222. // Get the bucket for the caller stack trace 'key' of depth 'depth'
  223. // creating the bucket if needed.
  224. Bucket* GetBucket(int depth, const void* const key[]);
  225. // Helper for IterateAllocs to do callback signature conversion
  226. // from AllocationMap::Iterate to AllocIterator.
  227. static void MapArgsAllocIterator(const void* ptr, AllocValue* v,
  228. AllocIterator callback) {
  229. AllocInfo info;
  230. info.object_size = v->bytes;
  231. info.call_stack = v->bucket()->stack;
  232. info.stack_depth = v->bucket()->depth;
  233. info.live = v->live();
  234. info.ignored = v->ignore();
  235. callback(ptr, info);
  236. }
  237. // Helper to dump a bucket.
  238. inline static void DumpBucketIterator(const Bucket* bucket,
  239. BufferArgs* args);
  240. // Helper for DumpNonLiveProfile to do object-granularity
  241. // heap profile dumping. It gets passed to AllocationMap::Iterate.
  242. inline static void DumpNonLiveIterator(const void* ptr, AllocValue* v,
  243. const DumpArgs& args);
  244. // Helper for IterateOrderedAllocContexts and FillOrderedProfile.
  245. // Creates a sorted list of Buckets whose length is num_buckets_.
  246. // The caller is responsible for deallocating the returned list.
  247. Bucket** MakeSortedBucketList() const;
  248. // Helper for TakeSnapshot. Saves object to snapshot.
  249. static void AddToSnapshot(const void* ptr, AllocValue* v, Snapshot* s);
  250. // Arguments passed to AddIfNonLive
  251. struct AddNonLiveArgs {
  252. Snapshot* dest;
  253. Snapshot* base;
  254. };
  255. // Helper for NonLiveSnapshot. Adds the object to the destination
  256. // snapshot if it is non-live.
  257. static void AddIfNonLive(const void* ptr, AllocValue* v,
  258. AddNonLiveArgs* arg);
  259. // Write contents of "*allocations" as a heap profile to
  260. // "file_name". "total" must contain the total of all entries in
  261. // "*allocations".
  262. static bool WriteProfile(const char* file_name,
  263. const Bucket& total,
  264. AllocationMap* allocations);
  265. // data ----------------------------
  266. // Memory (de)allocator that we use.
  267. Allocator alloc_;
  268. DeAllocator dealloc_;
  269. // Overall profile stats; we use only the Stats part,
  270. // but make it a Bucket to pass to UnparseBucket.
  271. Bucket total_;
  272. bool profile_mmap_;
  273. // Bucket hash table for malloc.
  274. // We hand-craft one instead of using one of the pre-written
  275. // ones because we do not want to use malloc when operating on the table.
  276. // It is only few lines of code, so no big deal.
  277. Bucket** bucket_table_;
  278. int num_buckets_;
  279. // Map of all currently allocated objects and mapped regions we know about.
  280. AllocationMap* address_map_;
  281. DISALLOW_COPY_AND_ASSIGN(HeapProfileTable);
  282. };
  283. class HeapProfileTable::Snapshot {
  284. public:
  285. const Stats& total() const { return total_; }
  286. // Report anything in this snapshot as a leak.
  287. // May use new/delete for temporary storage.
  288. // If should_symbolize is true, will fork (which is not threadsafe)
  289. // to turn addresses into symbol names. Set to false for maximum safety.
  290. // Also writes a heap profile to "filename" that contains
  291. // all of the objects in this snapshot.
  292. void ReportLeaks(const char* checker_name, const char* filename,
  293. bool should_symbolize);
  294. // Report the addresses of all leaked objects.
  295. // May use new/delete for temporary storage.
  296. void ReportIndividualObjects();
  297. bool Empty() const {
  298. return (total_.allocs == 0) && (total_.alloc_size == 0);
  299. }
  300. private:
  301. friend class HeapProfileTable;
  302. // Total count/size are stored in a Bucket so we can reuse UnparseBucket
  303. Bucket total_;
  304. // We share the Buckets managed by the parent table, but have our
  305. // own object->bucket map.
  306. AllocationMap map_;
  307. Snapshot(Allocator alloc, DeAllocator dealloc) : map_(alloc, dealloc) {
  308. memset(&total_, 0, sizeof(total_));
  309. }
  310. // Callback used to populate a Snapshot object with entries found
  311. // in another allocation map.
  312. inline void Add(const void* ptr, const AllocValue& v) {
  313. map_.Insert(ptr, v);
  314. total_.allocs++;
  315. total_.alloc_size += v.bytes;
  316. }
  317. // Helpers for sorting and generating leak reports
  318. struct Entry;
  319. struct ReportState;
  320. static void ReportCallback(const void* ptr, AllocValue* v, ReportState*);
  321. static void ReportObject(const void* ptr, AllocValue* v, char*);
  322. DISALLOW_COPY_AND_ASSIGN(Snapshot);
  323. };
  324. #endif // BASE_HEAP_PROFILE_TABLE_H_