heap-profiler.cc 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. // Copyright (c) 2005, Google Inc.
  3. // All rights reserved.
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. // ---
  31. // Author: Sanjay Ghemawat
  32. //
  33. // TODO: Log large allocations
  34. #include <config.h>
  35. #include <stddef.h>
  36. #include <stdio.h>
  37. #include <stdlib.h>
  38. #ifdef HAVE_UNISTD_H
  39. #include <unistd.h>
  40. #endif
  41. #ifdef HAVE_INTTYPES_H
  42. #include <inttypes.h>
  43. #endif
  44. #ifdef HAVE_FCNTL_H
  45. #include <fcntl.h> // for open()
  46. #endif
  47. #ifdef HAVE_MMAP
  48. #include <sys/mman.h>
  49. #endif
  50. #include <errno.h>
  51. #include <assert.h>
  52. #include <sys/types.h>
  53. #include <signal.h>
  54. #include <algorithm>
  55. #include <string>
  56. #include <gperftools/heap-profiler.h>
  57. #include "base/logging.h"
  58. #include "base/basictypes.h" // for PRId64, among other things
  59. #include "base/googleinit.h"
  60. #include "base/commandlineflags.h"
  61. #include "malloc_hook-inl.h"
  62. #include "tcmalloc_guard.h"
  63. #include <gperftools/malloc_hook.h>
  64. #include <gperftools/malloc_extension.h>
  65. #include "base/spinlock.h"
  66. #include "base/low_level_alloc.h"
  67. #include "base/sysinfo.h" // for GetUniquePathFromEnv()
  68. #include "heap-profile-table.h"
  69. #include "memory_region_map.h"
  70. #ifndef PATH_MAX
  71. #ifdef MAXPATHLEN
  72. #define PATH_MAX MAXPATHLEN
  73. #else
  74. #define PATH_MAX 4096 // seems conservative for max filename len!
  75. #endif
  76. #endif
  77. using STL_NAMESPACE::string;
  78. using STL_NAMESPACE::sort;
  79. //----------------------------------------------------------------------
  80. // Flags that control heap-profiling
  81. //
  82. // The thread-safety of the profiler depends on these being immutable
  83. // after main starts, so don't change them.
  84. //----------------------------------------------------------------------
  85. DEFINE_int64(heap_profile_allocation_interval,
  86. EnvToInt64("HEAP_PROFILE_ALLOCATION_INTERVAL", 1 << 30 /*1GB*/),
  87. "If non-zero, dump heap profiling information once every "
  88. "specified number of bytes allocated by the program since "
  89. "the last dump.");
  90. DEFINE_int64(heap_profile_deallocation_interval,
  91. EnvToInt64("HEAP_PROFILE_DEALLOCATION_INTERVAL", 0),
  92. "If non-zero, dump heap profiling information once every "
  93. "specified number of bytes deallocated by the program "
  94. "since the last dump.");
  95. // We could also add flags that report whenever inuse_bytes changes by
  96. // X or -X, but there hasn't been a need for that yet, so we haven't.
  97. DEFINE_int64(heap_profile_inuse_interval,
  98. EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/),
  99. "If non-zero, dump heap profiling information whenever "
  100. "the high-water memory usage mark increases by the specified "
  101. "number of bytes.");
  102. DEFINE_int64(heap_profile_time_interval,
  103. EnvToInt64("HEAP_PROFILE_TIME_INTERVAL", 0),
  104. "If non-zero, dump heap profiling information once every "
  105. "specified number of seconds since the last dump.");
  106. DEFINE_bool(mmap_log,
  107. EnvToBool("HEAP_PROFILE_MMAP_LOG", false),
  108. "Should mmap/munmap calls be logged?");
  109. DEFINE_bool(mmap_profile,
  110. EnvToBool("HEAP_PROFILE_MMAP", false),
  111. "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
  112. DEFINE_bool(only_mmap_profile,
  113. EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
  114. "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
  115. "do not profile malloc/new/etc");
  116. //----------------------------------------------------------------------
  117. // Locking
  118. //----------------------------------------------------------------------
  119. // A pthread_mutex has way too much lock contention to be used here.
  120. //
  121. // I would like to use Mutex, but it can call malloc(),
  122. // which can cause us to fall into an infinite recursion.
  123. //
  124. // So we use a simple spinlock.
  125. static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED);
  126. //----------------------------------------------------------------------
  127. // Simple allocator for heap profiler's internal memory
  128. //----------------------------------------------------------------------
  129. static LowLevelAlloc::Arena *heap_profiler_memory;
  130. static void* ProfilerMalloc(size_t bytes) {
  131. return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory);
  132. }
  133. static void ProfilerFree(void* p) {
  134. LowLevelAlloc::Free(p);
  135. }
  136. // We use buffers of this size in DoGetHeapProfile.
  137. static const int kProfileBufferSize = 1 << 20;
  138. // This is a last-ditch buffer we use in DumpProfileLocked in case we
  139. // can't allocate more memory from ProfilerMalloc. We expect this
  140. // will be used by HeapProfileEndWriter when the application has to
  141. // exit due to out-of-memory. This buffer is allocated in
  142. // HeapProfilerStart. Access to this must be protected by heap_lock.
  143. static char* global_profiler_buffer = NULL;
  144. //----------------------------------------------------------------------
  145. // Profiling control/state data
  146. //----------------------------------------------------------------------
  147. // Access to all of these is protected by heap_lock.
  148. static bool is_on = false; // If are on as a subsytem.
  149. static bool dumping = false; // Dumping status to prevent recursion
  150. static char* filename_prefix = NULL; // Prefix used for profile file names
  151. // (NULL if no need for dumping yet)
  152. static int dump_count = 0; // How many dumps so far
  153. static int64 last_dump_alloc = 0; // alloc_size when did we last dump
  154. static int64 last_dump_free = 0; // free_size when did we last dump
  155. static int64 high_water_mark = 0; // In-use-bytes at last high-water dump
  156. static int64 last_dump_time = 0; // The time of the last dump
  157. static HeapProfileTable* heap_profile = NULL; // the heap profile table
  158. //----------------------------------------------------------------------
  159. // Profile generation
  160. //----------------------------------------------------------------------
  161. // Input must be a buffer of size at least 1MB.
  162. static char* DoGetHeapProfileLocked(char* buf, int buflen) {
  163. // We used to be smarter about estimating the required memory and
  164. // then capping it to 1MB and generating the profile into that.
  165. if (buf == NULL || buflen < 1)
  166. return NULL;
  167. RAW_DCHECK(heap_lock.IsHeld(), "");
  168. int bytes_written = 0;
  169. if (is_on) {
  170. HeapProfileTable::Stats const stats = heap_profile->total();
  171. (void)stats; // avoid an unused-variable warning in non-debug mode.
  172. bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1);
  173. // FillOrderedProfile should not reduce the set of active mmap-ed regions,
  174. // hence MemoryRegionMap will let us remove everything we've added above:
  175. RAW_DCHECK(stats.Equivalent(heap_profile->total()), "");
  176. // if this fails, we somehow removed by FillOrderedProfile
  177. // more than we have added.
  178. }
  179. buf[bytes_written] = '\0';
  180. RAW_DCHECK(bytes_written == strlen(buf), "");
  181. return buf;
  182. }
  183. extern "C" char* GetHeapProfile() {
  184. // Use normal malloc: we return the profile to the user to free it:
  185. char* buffer = reinterpret_cast<char*>(malloc(kProfileBufferSize));
  186. SpinLockHolder l(&heap_lock);
  187. return DoGetHeapProfileLocked(buffer, kProfileBufferSize);
  188. }
  189. // defined below
  190. static void NewHook(const void* ptr, size_t size);
  191. static void DeleteHook(const void* ptr);
  192. // Helper for HeapProfilerDump.
  193. static void DumpProfileLocked(const char* reason) {
  194. RAW_DCHECK(heap_lock.IsHeld(), "");
  195. RAW_DCHECK(is_on, "");
  196. RAW_DCHECK(!dumping, "");
  197. if (filename_prefix == NULL) return; // we do not yet need dumping
  198. dumping = true;
  199. // Make file name
  200. char file_name[1000];
  201. dump_count++;
  202. snprintf(file_name, sizeof(file_name), "%s.%04d%s",
  203. filename_prefix, dump_count, HeapProfileTable::kFileExt);
  204. // Dump the profile
  205. RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
  206. // We must use file routines that don't access memory, since we hold
  207. // a memory lock now.
  208. RawFD fd = RawOpenForWriting(file_name);
  209. if (fd == kIllegalRawFD) {
  210. RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
  211. dumping = false;
  212. return;
  213. }
  214. // This case may be impossible, but it's best to be safe.
  215. // It's safe to use the global buffer: we're protected by heap_lock.
  216. if (global_profiler_buffer == NULL) {
  217. global_profiler_buffer =
  218. reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
  219. }
  220. char* profile = DoGetHeapProfileLocked(global_profiler_buffer,
  221. kProfileBufferSize);
  222. RawWrite(fd, profile, strlen(profile));
  223. RawClose(fd);
  224. dumping = false;
  225. }
  226. //----------------------------------------------------------------------
  227. // Profile collection
  228. //----------------------------------------------------------------------
  229. // Dump a profile after either an allocation or deallocation, if
  230. // the memory use has changed enough since the last dump.
  231. static void MaybeDumpProfileLocked() {
  232. if (!dumping) {
  233. const HeapProfileTable::Stats& total = heap_profile->total();
  234. const int64 inuse_bytes = total.alloc_size - total.free_size;
  235. bool need_to_dump = false;
  236. char buf[128];
  237. int64 current_time = time(NULL);
  238. if (FLAGS_heap_profile_allocation_interval > 0 &&
  239. total.alloc_size >=
  240. last_dump_alloc + FLAGS_heap_profile_allocation_interval) {
  241. snprintf(buf, sizeof(buf), ("%" PRId64 " MB allocated cumulatively, "
  242. "%" PRId64 " MB currently in use"),
  243. total.alloc_size >> 20, inuse_bytes >> 20);
  244. need_to_dump = true;
  245. } else if (FLAGS_heap_profile_deallocation_interval > 0 &&
  246. total.free_size >=
  247. last_dump_free + FLAGS_heap_profile_deallocation_interval) {
  248. snprintf(buf, sizeof(buf), ("%" PRId64 " MB freed cumulatively, "
  249. "%" PRId64 " MB currently in use"),
  250. total.free_size >> 20, inuse_bytes >> 20);
  251. need_to_dump = true;
  252. } else if (FLAGS_heap_profile_inuse_interval > 0 &&
  253. inuse_bytes >
  254. high_water_mark + FLAGS_heap_profile_inuse_interval) {
  255. snprintf(buf, sizeof(buf), "%" PRId64 " MB currently in use",
  256. inuse_bytes >> 20);
  257. need_to_dump = true;
  258. } else if (FLAGS_heap_profile_time_interval > 0 &&
  259. current_time - last_dump_time >=
  260. FLAGS_heap_profile_time_interval) {
  261. snprintf(buf, sizeof(buf), "%" PRId64 " sec since the last dump",
  262. current_time - last_dump_time);
  263. need_to_dump = true;
  264. last_dump_time = current_time;
  265. }
  266. if (need_to_dump) {
  267. DumpProfileLocked(buf);
  268. last_dump_alloc = total.alloc_size;
  269. last_dump_free = total.free_size;
  270. if (inuse_bytes > high_water_mark)
  271. high_water_mark = inuse_bytes;
  272. }
  273. }
  274. }
  275. // Record an allocation in the profile.
  276. static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
  277. // Take the stack trace outside the critical section.
  278. void* stack[HeapProfileTable::kMaxStackDepth];
  279. int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack);
  280. SpinLockHolder l(&heap_lock);
  281. if (is_on) {
  282. heap_profile->RecordAlloc(ptr, bytes, depth, stack);
  283. MaybeDumpProfileLocked();
  284. }
  285. }
  286. // Record a deallocation in the profile.
  287. static void RecordFree(const void* ptr) {
  288. SpinLockHolder l(&heap_lock);
  289. if (is_on) {
  290. heap_profile->RecordFree(ptr);
  291. MaybeDumpProfileLocked();
  292. }
  293. }
  294. //----------------------------------------------------------------------
  295. // Allocation/deallocation hooks for MallocHook
  296. //----------------------------------------------------------------------
  297. // static
  298. void NewHook(const void* ptr, size_t size) {
  299. if (ptr != NULL) RecordAlloc(ptr, size, 0);
  300. }
  301. // static
  302. void DeleteHook(const void* ptr) {
  303. if (ptr != NULL) RecordFree(ptr);
  304. }
  305. // TODO(jandrews): Re-enable stack tracing
  306. #ifdef TODO_REENABLE_STACK_TRACING
  307. static void RawInfoStackDumper(const char* message, void*) {
  308. RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message);
  309. // -1 is to chop the \n which will be added by RAW_LOG
  310. }
  311. #endif
  312. static void MmapHook(const void* result, const void* start, size_t size,
  313. int prot, int flags, int fd, off_t offset) {
  314. if (FLAGS_mmap_log) { // log it
  315. // We use PRIxS not just '%p' to avoid deadlocks
  316. // in pretty-printing of NULL as "nil".
  317. // TODO(maxim): instead should use a safe snprintf reimplementation
  318. RAW_LOG(INFO,
  319. "mmap(start=0x%" PRIxPTR ", len=%" PRIuS ", prot=0x%x, flags=0x%x, "
  320. "fd=%d, offset=0x%x) = 0x%" PRIxPTR "",
  321. (uintptr_t) start, size, prot, flags, fd, (unsigned int) offset,
  322. (uintptr_t) result);
  323. #ifdef TODO_REENABLE_STACK_TRACING
  324. DumpStackTrace(1, RawInfoStackDumper, NULL);
  325. #endif
  326. }
  327. }
  328. static void MremapHook(const void* result, const void* old_addr,
  329. size_t old_size, size_t new_size,
  330. int flags, const void* new_addr) {
  331. if (FLAGS_mmap_log) { // log it
  332. // We use PRIxS not just '%p' to avoid deadlocks
  333. // in pretty-printing of NULL as "nil".
  334. // TODO(maxim): instead should use a safe snprintf reimplementation
  335. RAW_LOG(INFO,
  336. "mremap(old_addr=0x%" PRIxPTR ", old_size=%" PRIuS ", "
  337. "new_size=%" PRIuS ", flags=0x%x, new_addr=0x%" PRIxPTR ") = "
  338. "0x%" PRIxPTR "",
  339. (uintptr_t) old_addr, old_size, new_size, flags,
  340. (uintptr_t) new_addr, (uintptr_t) result);
  341. #ifdef TODO_REENABLE_STACK_TRACING
  342. DumpStackTrace(1, RawInfoStackDumper, NULL);
  343. #endif
  344. }
  345. }
  346. static void MunmapHook(const void* ptr, size_t size) {
  347. if (FLAGS_mmap_log) { // log it
  348. // We use PRIxS not just '%p' to avoid deadlocks
  349. // in pretty-printing of NULL as "nil".
  350. // TODO(maxim): instead should use a safe snprintf reimplementation
  351. RAW_LOG(INFO, "munmap(start=0x%" PRIxPTR ", len=%" PRIuS ")",
  352. (uintptr_t) ptr, size);
  353. #ifdef TODO_REENABLE_STACK_TRACING
  354. DumpStackTrace(1, RawInfoStackDumper, NULL);
  355. #endif
  356. }
  357. }
  358. static void SbrkHook(const void* result, ptrdiff_t increment) {
  359. if (FLAGS_mmap_log) { // log it
  360. RAW_LOG(INFO, "sbrk(inc=%" PRIdS ") = 0x%" PRIxPTR "",
  361. increment, (uintptr_t) result);
  362. #ifdef TODO_REENABLE_STACK_TRACING
  363. DumpStackTrace(1, RawInfoStackDumper, NULL);
  364. #endif
  365. }
  366. }
  367. //----------------------------------------------------------------------
  368. // Starting/stopping/dumping
  369. //----------------------------------------------------------------------
  370. extern "C" void HeapProfilerStart(const char* prefix) {
  371. SpinLockHolder l(&heap_lock);
  372. if (is_on) return;
  373. is_on = true;
  374. RAW_VLOG(0, "Starting tracking the heap");
  375. // This should be done before the hooks are set up, since it should
  376. // call new, and we want that to be accounted for correctly.
  377. MallocExtension::Initialize();
  378. if (FLAGS_only_mmap_profile) {
  379. FLAGS_mmap_profile = true;
  380. }
  381. if (FLAGS_mmap_profile) {
  382. // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
  383. // call stack traces of at least size kMaxStackDepth:
  384. MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth,
  385. /* use_buckets */ true);
  386. }
  387. if (FLAGS_mmap_log) {
  388. // Install our hooks to do the logging:
  389. RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
  390. RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
  391. RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
  392. RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
  393. }
  394. heap_profiler_memory =
  395. LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
  396. // Reserve space now for the heap profiler, so we can still write a
  397. // heap profile even if the application runs out of memory.
  398. global_profiler_buffer =
  399. reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
  400. heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
  401. HeapProfileTable(ProfilerMalloc, ProfilerFree, FLAGS_mmap_profile);
  402. last_dump_alloc = 0;
  403. last_dump_free = 0;
  404. high_water_mark = 0;
  405. last_dump_time = 0;
  406. // We do not reset dump_count so if the user does a sequence of
  407. // HeapProfilerStart/HeapProfileStop, we will get a continuous
  408. // sequence of profiles.
  409. if (FLAGS_only_mmap_profile == false) {
  410. // Now set the hooks that capture new/delete and malloc/free.
  411. RAW_CHECK(MallocHook::AddNewHook(&NewHook), "");
  412. RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), "");
  413. }
  414. // Copy filename prefix
  415. RAW_DCHECK(filename_prefix == NULL, "");
  416. const int prefix_length = strlen(prefix);
  417. filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
  418. memcpy(filename_prefix, prefix, prefix_length);
  419. filename_prefix[prefix_length] = '\0';
  420. }
  421. extern "C" int IsHeapProfilerRunning() {
  422. SpinLockHolder l(&heap_lock);
  423. return is_on ? 1 : 0; // return an int, because C code doesn't have bool
  424. }
  425. extern "C" void HeapProfilerStop() {
  426. SpinLockHolder l(&heap_lock);
  427. if (!is_on) return;
  428. if (FLAGS_only_mmap_profile == false) {
  429. // Unset our new/delete hooks, checking they were set:
  430. RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), "");
  431. RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), "");
  432. }
  433. if (FLAGS_mmap_log) {
  434. // Restore mmap/sbrk hooks, checking that our hooks were set:
  435. RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
  436. RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
  437. RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
  438. RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
  439. }
  440. // free profile
  441. heap_profile->~HeapProfileTable();
  442. ProfilerFree(heap_profile);
  443. heap_profile = NULL;
  444. // free output-buffer memory
  445. ProfilerFree(global_profiler_buffer);
  446. // free prefix
  447. ProfilerFree(filename_prefix);
  448. filename_prefix = NULL;
  449. if (!LowLevelAlloc::DeleteArena(heap_profiler_memory)) {
  450. RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
  451. }
  452. if (FLAGS_mmap_profile) {
  453. MemoryRegionMap::Shutdown();
  454. }
  455. is_on = false;
  456. }
  457. extern "C" void HeapProfilerDump(const char *reason) {
  458. SpinLockHolder l(&heap_lock);
  459. if (is_on && !dumping) {
  460. DumpProfileLocked(reason);
  461. }
  462. }
  463. // Signal handler that is registered when a user selectable signal
  464. // number is defined in the environment variable HEAPPROFILESIGNAL.
  465. static void HeapProfilerDumpSignal(int signal_number) {
  466. (void)signal_number;
  467. if (!heap_lock.TryLock()) {
  468. return;
  469. }
  470. if (is_on && !dumping) {
  471. DumpProfileLocked("signal");
  472. }
  473. heap_lock.Unlock();
  474. }
  475. //----------------------------------------------------------------------
  476. // Initialization/finalization code
  477. //----------------------------------------------------------------------
  478. // Initialization code
  479. static void HeapProfilerInit() {
  480. // Everything after this point is for setting up the profiler based on envvar
  481. char fname[PATH_MAX];
  482. if (!GetUniquePathFromEnv("HEAPPROFILE", fname)) {
  483. return;
  484. }
  485. // We do a uid check so we don't write out files in a setuid executable.
  486. #ifdef HAVE_GETEUID
  487. if (getuid() != geteuid()) {
  488. RAW_LOG(WARNING, ("HeapProfiler: ignoring HEAPPROFILE because "
  489. "program seems to be setuid\n"));
  490. return;
  491. }
  492. #endif
  493. char *signal_number_str = getenv("HEAPPROFILESIGNAL");
  494. if (signal_number_str != NULL) {
  495. long int signal_number = strtol(signal_number_str, NULL, 10);
  496. intptr_t old_signal_handler = reinterpret_cast<intptr_t>(signal(signal_number, HeapProfilerDumpSignal));
  497. if (old_signal_handler == reinterpret_cast<intptr_t>(SIG_ERR)) {
  498. RAW_LOG(FATAL, "Failed to set signal. Perhaps signal number %s is invalid\n", signal_number_str);
  499. } else if (old_signal_handler == 0) {
  500. RAW_LOG(INFO,"Using signal %d as heap profiling switch", signal_number);
  501. } else {
  502. RAW_LOG(FATAL, "Signal %d already in use\n", signal_number);
  503. }
  504. }
  505. HeapProfileTable::CleanupOldProfiles(fname);
  506. HeapProfilerStart(fname);
  507. }
  508. // class used for finalization -- dumps the heap-profile at program exit
  509. struct HeapProfileEndWriter {
  510. ~HeapProfileEndWriter() {
  511. char buf[128];
  512. if (heap_profile) {
  513. const HeapProfileTable::Stats& total = heap_profile->total();
  514. const int64 inuse_bytes = total.alloc_size - total.free_size;
  515. if ((inuse_bytes >> 20) > 0) {
  516. snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " MB in use"),
  517. inuse_bytes >> 20);
  518. } else if ((inuse_bytes >> 10) > 0) {
  519. snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " kB in use"),
  520. inuse_bytes >> 10);
  521. } else {
  522. snprintf(buf, sizeof(buf), ("Exiting, %" PRId64 " bytes in use"),
  523. inuse_bytes);
  524. }
  525. } else {
  526. snprintf(buf, sizeof(buf), ("Exiting"));
  527. }
  528. HeapProfilerDump(buf);
  529. }
  530. };
  531. // We want to make sure tcmalloc is up and running before starting the profiler
  532. static const TCMallocGuard tcmalloc_initializer;
  533. REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit());
  534. static HeapProfileEndWriter heap_profile_end_writer;