heap-profile-table.cc 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. // Copyright (c) 2006, Google Inc.
  3. // All rights reserved.
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. // ---
  31. // Author: Sanjay Ghemawat
  32. // Maxim Lifantsev (refactoring)
  33. //
  34. #include <config.h>
  35. #ifdef HAVE_UNISTD_H
  36. #include <unistd.h> // for write()
  37. #endif
  38. #include <fcntl.h> // for open()
  39. #ifdef HAVE_GLOB_H
  40. #include <glob.h>
  41. #ifndef GLOB_NOMATCH // true on some old cygwins
  42. # define GLOB_NOMATCH 0
  43. #endif
  44. #endif
  45. #ifdef HAVE_INTTYPES_H
  46. #include <inttypes.h> // for PRIxPTR
  47. #endif
  48. #ifdef HAVE_POLL_H
  49. #include <poll.h>
  50. #endif
  51. #include <errno.h>
  52. #include <stdarg.h>
  53. #include <string>
  54. #include <map>
  55. #include <algorithm> // for sort(), equal(), and copy()
  56. #include "heap-profile-table.h"
  57. #include "base/logging.h"
  58. #include "raw_printer.h"
  59. #include "symbolize.h"
  60. #include <gperftools/stacktrace.h>
  61. #include <gperftools/malloc_hook.h>
  62. #include "memory_region_map.h"
  63. #include "base/commandlineflags.h"
  64. #include "base/logging.h" // for the RawFD I/O commands
  65. #include "base/sysinfo.h"
  66. using std::sort;
  67. using std::equal;
  68. using std::copy;
  69. using std::string;
  70. using std::map;
  71. using tcmalloc::FillProcSelfMaps; // from sysinfo.h
  72. using tcmalloc::DumpProcSelfMaps; // from sysinfo.h
  73. //----------------------------------------------------------------------
  74. DEFINE_bool(cleanup_old_heap_profiles,
  75. EnvToBool("HEAP_PROFILE_CLEANUP", true),
  76. "At initialization time, delete old heap profiles.");
  77. DEFINE_int32(heap_check_max_leaks,
  78. EnvToInt("HEAP_CHECK_MAX_LEAKS", 20),
  79. "The maximum number of leak reports to print.");
  80. //----------------------------------------------------------------------
  81. // header of the dumped heap profile
  82. static const char kProfileHeader[] = "heap profile: ";
  83. static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
  84. //----------------------------------------------------------------------
  85. const char HeapProfileTable::kFileExt[] = ".heap";
  86. //----------------------------------------------------------------------
  87. static const int kHashTableSize = 179999; // Size for bucket_table_.
  88. /*static*/ const int HeapProfileTable::kMaxStackDepth;
  89. //----------------------------------------------------------------------
  90. // We strip out different number of stack frames in debug mode
  91. // because less inlining happens in that case
  92. #ifdef NDEBUG
  93. static const int kStripFrames = 2;
  94. #else
  95. static const int kStripFrames = 3;
  96. #endif
  97. // For sorting Stats or Buckets by in-use space
  98. static bool ByAllocatedSpace(HeapProfileTable::Stats* a,
  99. HeapProfileTable::Stats* b) {
  100. // Return true iff "a" has more allocated space than "b"
  101. return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size);
  102. }
  103. //----------------------------------------------------------------------
  104. HeapProfileTable::HeapProfileTable(Allocator alloc,
  105. DeAllocator dealloc,
  106. bool profile_mmap)
  107. : alloc_(alloc),
  108. dealloc_(dealloc),
  109. profile_mmap_(profile_mmap),
  110. bucket_table_(NULL),
  111. num_buckets_(0),
  112. address_map_(NULL) {
  113. // Make a hash table for buckets.
  114. const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
  115. bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
  116. memset(bucket_table_, 0, table_bytes);
  117. // Make an allocation map.
  118. address_map_ =
  119. new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
  120. // Initialize.
  121. memset(&total_, 0, sizeof(total_));
  122. num_buckets_ = 0;
  123. }
  124. HeapProfileTable::~HeapProfileTable() {
  125. // Free the allocation map.
  126. address_map_->~AllocationMap();
  127. dealloc_(address_map_);
  128. address_map_ = NULL;
  129. // Free the hash table.
  130. for (int i = 0; i < kHashTableSize; i++) {
  131. for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
  132. Bucket* bucket = curr;
  133. curr = curr->next;
  134. dealloc_(bucket->stack);
  135. dealloc_(bucket);
  136. }
  137. }
  138. dealloc_(bucket_table_);
  139. bucket_table_ = NULL;
  140. }
  141. HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
  142. const void* const key[]) {
  143. // Make hash-value
  144. uintptr_t h = 0;
  145. for (int i = 0; i < depth; i++) {
  146. h += reinterpret_cast<uintptr_t>(key[i]);
  147. h += h << 10;
  148. h ^= h >> 6;
  149. }
  150. h += h << 3;
  151. h ^= h >> 11;
  152. // Lookup stack trace in table
  153. unsigned int buck = ((unsigned int) h) % kHashTableSize;
  154. for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) {
  155. if ((b->hash == h) &&
  156. (b->depth == depth) &&
  157. equal(key, key + depth, b->stack)) {
  158. return b;
  159. }
  160. }
  161. // Create new bucket
  162. const size_t key_size = sizeof(key[0]) * depth;
  163. const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
  164. copy(key, key + depth, kcopy);
  165. Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
  166. memset(b, 0, sizeof(*b));
  167. b->hash = h;
  168. b->depth = depth;
  169. b->stack = kcopy;
  170. b->next = bucket_table_[buck];
  171. bucket_table_[buck] = b;
  172. num_buckets_++;
  173. return b;
  174. }
  175. int HeapProfileTable::GetCallerStackTrace(
  176. int skip_count, void* stack[kMaxStackDepth]) {
  177. return MallocHook::GetCallerStackTrace(
  178. stack, kMaxStackDepth, kStripFrames + skip_count + 1);
  179. }
  180. void HeapProfileTable::RecordAlloc(
  181. const void* ptr, size_t bytes, int stack_depth,
  182. const void* const call_stack[]) {
  183. Bucket* b = GetBucket(stack_depth, call_stack);
  184. b->allocs++;
  185. b->alloc_size += bytes;
  186. total_.allocs++;
  187. total_.alloc_size += bytes;
  188. AllocValue v;
  189. v.set_bucket(b); // also did set_live(false); set_ignore(false)
  190. v.bytes = bytes;
  191. address_map_->Insert(ptr, v);
  192. }
  193. void HeapProfileTable::RecordFree(const void* ptr) {
  194. AllocValue v;
  195. if (address_map_->FindAndRemove(ptr, &v)) {
  196. Bucket* b = v.bucket();
  197. b->frees++;
  198. b->free_size += v.bytes;
  199. total_.frees++;
  200. total_.free_size += v.bytes;
  201. }
  202. }
  203. bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const {
  204. const AllocValue* alloc_value = address_map_->Find(ptr);
  205. if (alloc_value != NULL) *object_size = alloc_value->bytes;
  206. return alloc_value != NULL;
  207. }
  208. bool HeapProfileTable::FindAllocDetails(const void* ptr,
  209. AllocInfo* info) const {
  210. const AllocValue* alloc_value = address_map_->Find(ptr);
  211. if (alloc_value != NULL) {
  212. info->object_size = alloc_value->bytes;
  213. info->call_stack = alloc_value->bucket()->stack;
  214. info->stack_depth = alloc_value->bucket()->depth;
  215. }
  216. return alloc_value != NULL;
  217. }
  218. bool HeapProfileTable::FindInsideAlloc(const void* ptr,
  219. size_t max_size,
  220. const void** object_ptr,
  221. size_t* object_size) const {
  222. const AllocValue* alloc_value =
  223. address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr);
  224. if (alloc_value != NULL) *object_size = alloc_value->bytes;
  225. return alloc_value != NULL;
  226. }
  227. bool HeapProfileTable::MarkAsLive(const void* ptr) {
  228. AllocValue* alloc = address_map_->FindMutable(ptr);
  229. if (alloc && !alloc->live()) {
  230. alloc->set_live(true);
  231. return true;
  232. }
  233. return false;
  234. }
  235. void HeapProfileTable::MarkAsIgnored(const void* ptr) {
  236. AllocValue* alloc = address_map_->FindMutable(ptr);
  237. if (alloc) {
  238. alloc->set_ignore(true);
  239. }
  240. }
  241. // We'd be happier using snprintfer, but we don't to reduce dependencies.
  242. int HeapProfileTable::UnparseBucket(const Bucket& b,
  243. char* buf, int buflen, int bufsize,
  244. const char* extra,
  245. Stats* profile_stats) {
  246. if (profile_stats != NULL) {
  247. profile_stats->allocs += b.allocs;
  248. profile_stats->alloc_size += b.alloc_size;
  249. profile_stats->frees += b.frees;
  250. profile_stats->free_size += b.free_size;
  251. }
  252. int printed =
  253. snprintf(buf + buflen, bufsize - buflen, "%6d: %8" PRId64 " [%6d: %8" PRId64 "] @%s",
  254. b.allocs - b.frees,
  255. b.alloc_size - b.free_size,
  256. b.allocs,
  257. b.alloc_size,
  258. extra);
  259. // If it looks like the snprintf failed, ignore the fact we printed anything
  260. if (printed < 0 || printed >= bufsize - buflen) return buflen;
  261. buflen += printed;
  262. for (int d = 0; d < b.depth; d++) {
  263. printed = snprintf(buf + buflen, bufsize - buflen, " 0x%08" PRIxPTR,
  264. reinterpret_cast<uintptr_t>(b.stack[d]));
  265. if (printed < 0 || printed >= bufsize - buflen) return buflen;
  266. buflen += printed;
  267. }
  268. printed = snprintf(buf + buflen, bufsize - buflen, "\n");
  269. if (printed < 0 || printed >= bufsize - buflen) return buflen;
  270. buflen += printed;
  271. return buflen;
  272. }
  273. HeapProfileTable::Bucket**
  274. HeapProfileTable::MakeSortedBucketList() const {
  275. Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_));
  276. int bucket_count = 0;
  277. for (int i = 0; i < kHashTableSize; i++) {
  278. for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) {
  279. list[bucket_count++] = curr;
  280. }
  281. }
  282. RAW_DCHECK(bucket_count == num_buckets_, "");
  283. sort(list, list + num_buckets_, ByAllocatedSpace);
  284. return list;
  285. }
  286. void HeapProfileTable::IterateOrderedAllocContexts(
  287. AllocContextIterator callback) const {
  288. Bucket** list = MakeSortedBucketList();
  289. AllocContextInfo info;
  290. for (int i = 0; i < num_buckets_; ++i) {
  291. *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
  292. info.stack_depth = list[i]->depth;
  293. info.call_stack = list[i]->stack;
  294. callback(info);
  295. }
  296. dealloc_(list);
  297. }
  298. int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
  299. Bucket** list = MakeSortedBucketList();
  300. // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
  301. // In the cases buf is too small, we'd rather leave out the last
  302. // buckets than leave out the /proc/self/maps info. To ensure that,
  303. // we actually print the /proc/self/maps info first, then move it to
  304. // the end of the buffer, then write the bucket info into whatever
  305. // is remaining, and then move the maps info one last time to close
  306. // any gaps. Whew!
  307. int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
  308. if (map_length < 0 || map_length >= size) {
  309. dealloc_(list);
  310. return 0;
  311. }
  312. bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
  313. map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
  314. RAW_DCHECK(map_length <= size, "");
  315. char* const map_start = buf + size - map_length; // move to end
  316. memmove(map_start, buf, map_length);
  317. size -= map_length;
  318. Stats stats;
  319. memset(&stats, 0, sizeof(stats));
  320. int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
  321. if (bucket_length < 0 || bucket_length >= size) {
  322. dealloc_(list);
  323. return 0;
  324. }
  325. bucket_length = UnparseBucket(total_, buf, bucket_length, size,
  326. " heapprofile", &stats);
  327. // Dump the mmap list first.
  328. if (profile_mmap_) {
  329. BufferArgs buffer(buf, bucket_length, size);
  330. MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
  331. bucket_length = buffer.buflen;
  332. }
  333. for (int i = 0; i < num_buckets_; i++) {
  334. bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
  335. &stats);
  336. }
  337. RAW_DCHECK(bucket_length < size, "");
  338. dealloc_(list);
  339. RAW_DCHECK(buf + bucket_length <= map_start, "");
  340. memmove(buf + bucket_length, map_start, map_length); // close the gap
  341. return bucket_length + map_length;
  342. }
  343. // static
  344. void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
  345. BufferArgs* args) {
  346. args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
  347. "", NULL);
  348. }
  349. inline
  350. void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
  351. const DumpArgs& args) {
  352. if (v->live()) {
  353. v->set_live(false);
  354. return;
  355. }
  356. if (v->ignore()) {
  357. return;
  358. }
  359. Bucket b;
  360. memset(&b, 0, sizeof(b));
  361. b.allocs = 1;
  362. b.alloc_size = v->bytes;
  363. b.depth = v->bucket()->depth;
  364. b.stack = v->bucket()->stack;
  365. char buf[1024];
  366. int len = UnparseBucket(b, buf, 0, sizeof(buf), "", args.profile_stats);
  367. RawWrite(args.fd, buf, len);
  368. }
  369. // Callback from NonLiveSnapshot; adds entry to arg->dest
  370. // if not the entry is not live and is not present in arg->base.
  371. void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v,
  372. AddNonLiveArgs* arg) {
  373. if (v->live()) {
  374. v->set_live(false);
  375. } else {
  376. if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) {
  377. // Present in arg->base, so do not save
  378. } else {
  379. arg->dest->Add(ptr, *v);
  380. }
  381. }
  382. }
  383. bool HeapProfileTable::WriteProfile(const char* file_name,
  384. const Bucket& total,
  385. AllocationMap* allocations) {
  386. RAW_VLOG(1, "Dumping non-live heap profile to %s", file_name);
  387. RawFD fd = RawOpenForWriting(file_name);
  388. if (fd != kIllegalRawFD) {
  389. RawWrite(fd, kProfileHeader, strlen(kProfileHeader));
  390. char buf[512];
  391. int len = UnparseBucket(total, buf, 0, sizeof(buf), " heapprofile",
  392. NULL);
  393. RawWrite(fd, buf, len);
  394. const DumpArgs args(fd, NULL);
  395. allocations->Iterate<const DumpArgs&>(DumpNonLiveIterator, args);
  396. RawWrite(fd, kProcSelfMapsHeader, strlen(kProcSelfMapsHeader));
  397. DumpProcSelfMaps(fd);
  398. RawClose(fd);
  399. return true;
  400. } else {
  401. RAW_LOG(ERROR, "Failed dumping filtered heap profile to %s", file_name);
  402. return false;
  403. }
  404. }
  405. void HeapProfileTable::CleanupOldProfiles(const char* prefix) {
  406. if (!FLAGS_cleanup_old_heap_profiles)
  407. return;
  408. string pattern = string(prefix) + ".*" + kFileExt;
  409. #if defined(HAVE_GLOB_H)
  410. glob_t g;
  411. const int r = glob(pattern.c_str(), GLOB_ERR, NULL, &g);
  412. if (r == 0 || r == GLOB_NOMATCH) {
  413. const int prefix_length = strlen(prefix);
  414. for (int i = 0; i < g.gl_pathc; i++) {
  415. const char* fname = g.gl_pathv[i];
  416. if ((strlen(fname) >= prefix_length) &&
  417. (memcmp(fname, prefix, prefix_length) == 0)) {
  418. RAW_VLOG(1, "Removing old heap profile %s", fname);
  419. unlink(fname);
  420. }
  421. }
  422. }
  423. globfree(&g);
  424. #else /* HAVE_GLOB_H */
  425. RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())");
  426. #endif
  427. }
  428. HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() {
  429. Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
  430. address_map_->Iterate(AddToSnapshot, s);
  431. return s;
  432. }
  433. void HeapProfileTable::ReleaseSnapshot(Snapshot* s) {
  434. s->~Snapshot();
  435. dealloc_(s);
  436. }
  437. // Callback from TakeSnapshot; adds a single entry to snapshot
  438. void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v,
  439. Snapshot* snapshot) {
  440. snapshot->Add(ptr, *v);
  441. }
  442. HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot(
  443. Snapshot* base) {
  444. RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n",
  445. int(total_.allocs - total_.frees),
  446. int(total_.alloc_size - total_.free_size));
  447. Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_);
  448. AddNonLiveArgs args;
  449. args.dest = s;
  450. args.base = base;
  451. address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args);
  452. RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n",
  453. int(s->total_.allocs - s->total_.frees),
  454. int(s->total_.alloc_size - s->total_.free_size));
  455. return s;
  456. }
  457. // Information kept per unique bucket seen
  458. struct HeapProfileTable::Snapshot::Entry {
  459. int count;
  460. int bytes;
  461. Bucket* bucket;
  462. Entry() : count(0), bytes(0) { }
  463. // Order by decreasing bytes
  464. bool operator<(const Entry& x) const {
  465. return this->bytes > x.bytes;
  466. }
  467. };
  468. // State used to generate leak report. We keep a mapping from Bucket pointer
  469. // the collected stats for that bucket.
  470. struct HeapProfileTable::Snapshot::ReportState {
  471. map<Bucket*, Entry> buckets_;
  472. };
  473. // Callback from ReportLeaks; updates ReportState.
  474. void HeapProfileTable::Snapshot::ReportCallback(const void* ptr,
  475. AllocValue* v,
  476. ReportState* state) {
  477. Entry* e = &state->buckets_[v->bucket()]; // Creates empty Entry first time
  478. e->bucket = v->bucket();
  479. e->count++;
  480. e->bytes += v->bytes;
  481. }
  482. void HeapProfileTable::Snapshot::ReportLeaks(const char* checker_name,
  483. const char* filename,
  484. bool should_symbolize) {
  485. // This is only used by the heap leak checker, but is intimately
  486. // tied to the allocation map that belongs in this module and is
  487. // therefore placed here.
  488. RAW_LOG(ERROR, "Leak check %s detected leaks of %" PRIuS " bytes "
  489. "in %" PRIuS " objects",
  490. checker_name,
  491. size_t(total_.alloc_size),
  492. size_t(total_.allocs));
  493. // Group objects by Bucket
  494. ReportState state;
  495. map_.Iterate(&ReportCallback, &state);
  496. // Sort buckets by decreasing leaked size
  497. const int n = state.buckets_.size();
  498. Entry* entries = new Entry[n];
  499. int dst = 0;
  500. for (map<Bucket*,Entry>::const_iterator iter = state.buckets_.begin();
  501. iter != state.buckets_.end();
  502. ++iter) {
  503. entries[dst++] = iter->second;
  504. }
  505. sort(entries, entries + n);
  506. // Report a bounded number of leaks to keep the leak report from
  507. // growing too long.
  508. const int to_report =
  509. (FLAGS_heap_check_max_leaks > 0 &&
  510. n > FLAGS_heap_check_max_leaks) ? FLAGS_heap_check_max_leaks : n;
  511. RAW_LOG(ERROR, "The %d largest leaks:", to_report);
  512. // Print
  513. SymbolTable symbolization_table;
  514. for (int i = 0; i < to_report; i++) {
  515. const Entry& e = entries[i];
  516. for (int j = 0; j < e.bucket->depth; j++) {
  517. symbolization_table.Add(e.bucket->stack[j]);
  518. }
  519. }
  520. static const int kBufSize = 2<<10;
  521. char buffer[kBufSize];
  522. if (should_symbolize)
  523. symbolization_table.Symbolize();
  524. for (int i = 0; i < to_report; i++) {
  525. const Entry& e = entries[i];
  526. base::RawPrinter printer(buffer, kBufSize);
  527. printer.Printf("Leak of %d bytes in %d objects allocated from:\n",
  528. e.bytes, e.count);
  529. for (int j = 0; j < e.bucket->depth; j++) {
  530. const void* pc = e.bucket->stack[j];
  531. printer.Printf("\t@ %" PRIxPTR " %s\n",
  532. reinterpret_cast<uintptr_t>(pc), symbolization_table.GetSymbol(pc));
  533. }
  534. RAW_LOG(ERROR, "%s", buffer);
  535. }
  536. if (to_report < n) {
  537. RAW_LOG(ERROR, "Skipping leaks numbered %d..%d",
  538. to_report, n-1);
  539. }
  540. delete[] entries;
  541. // TODO: Dump the sorted Entry list instead of dumping raw data?
  542. // (should be much shorter)
  543. if (!HeapProfileTable::WriteProfile(filename, total_, &map_)) {
  544. RAW_LOG(ERROR, "Could not write pprof profile to %s", filename);
  545. }
  546. }
  547. void HeapProfileTable::Snapshot::ReportObject(const void* ptr,
  548. AllocValue* v,
  549. char* unused) {
  550. // Perhaps also log the allocation stack trace (unsymbolized)
  551. // on this line in case somebody finds it useful.
  552. RAW_LOG(ERROR, "leaked %" PRIuS " byte object %p", v->bytes, ptr);
  553. }
  554. void HeapProfileTable::Snapshot::ReportIndividualObjects() {
  555. char unused;
  556. map_.Iterate(ReportObject, &unused);
  557. }