heap-checker_unittest.cc 48 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538
  1. // -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
  2. // Copyright (c) 2005, Google Inc.
  3. // All rights reserved.
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. // ---
  31. // Author: Maxim Lifantsev
  32. //
  33. // Running:
  34. // ./heap-checker_unittest
  35. //
  36. // If the unittest crashes because it can't find pprof, try:
  37. // PPROF_PATH=/usr/local/someplace/bin/pprof ./heap-checker_unittest
  38. //
  39. // To test that the whole-program heap checker will actually cause a leak, try:
  40. // HEAPCHECK_TEST_LEAK= ./heap-checker_unittest
  41. // HEAPCHECK_TEST_LOOP_LEAK= ./heap-checker_unittest
  42. //
  43. // Note: Both of the above commands *should* abort with an error message.
  44. // CAVEAT: Do not use vector<> and string on-heap objects in this test,
  45. // otherwise the test can sometimes fail for tricky leak checks
  46. // when we want some allocated object not to be found live by the heap checker.
  47. // This can happen with memory allocators like tcmalloc that can allocate
  48. // heap objects back to back without any book-keeping data in between.
  49. // What happens is that end-of-storage pointers of a live vector
  50. // (or a string depending on the STL implementation used)
  51. // can happen to point to that other heap-allocated
  52. // object that is not reachable otherwise and that
  53. // we don't want to be reachable.
  54. //
  55. // The implication of this for real leak checking
  56. // is just one more chance for the liveness flood to be inexact
  57. // (see the comment in our .h file).
  58. #include "config_for_unittests.h"
  59. #ifdef HAVE_POLL_H
  60. #include <poll.h>
  61. #endif
  62. #if defined HAVE_STDINT_H
  63. #include <stdint.h> // to get uint16_t (ISO naming madness)
  64. #elif defined HAVE_INTTYPES_H
  65. #include <inttypes.h> // another place uint16_t might be defined
  66. #endif
  67. #include <sys/types.h>
  68. #include <stdlib.h>
  69. #include <errno.h> // errno
  70. #ifdef HAVE_UNISTD_H
  71. #include <unistd.h> // for sleep(), geteuid()
  72. #endif
  73. #ifdef HAVE_MMAP
  74. #include <sys/mman.h>
  75. #endif
  76. #include <fcntl.h> // for open(), close()
  77. #ifdef HAVE_EXECINFO_H
  78. #include <execinfo.h> // backtrace
  79. #endif
  80. #ifdef HAVE_GRP_H
  81. #include <grp.h> // getgrent, getgrnam
  82. #endif
  83. #ifdef HAVE_PWD_H
  84. #include <pwd.h>
  85. #endif
  86. #include <algorithm>
  87. #include <iostream> // for cout
  88. #include <iomanip> // for hex
  89. #include <list>
  90. #include <map>
  91. #include <memory>
  92. #include <set>
  93. #include <string>
  94. #include <vector>
  95. #include "base/commandlineflags.h"
  96. #include "base/googleinit.h"
  97. #include "base/logging.h"
  98. #include "base/commandlineflags.h"
  99. #include "base/thread_lister.h"
  100. #include <gperftools/heap-checker.h>
  101. #include "memory_region_map.h"
  102. #include <gperftools/malloc_extension.h>
  103. #include <gperftools/stacktrace.h>
  104. // On systems (like freebsd) that don't define MAP_ANONYMOUS, use the old
  105. // form of the name instead.
  106. #ifndef MAP_ANONYMOUS
  107. # define MAP_ANONYMOUS MAP_ANON
  108. #endif
  109. using namespace std;
  110. // ========================================================================= //
  111. // TODO(maxim): write a shell script to test that these indeed crash us
  112. // (i.e. we do detect leaks)
  113. // Maybe add more such crash tests.
  114. DEFINE_bool(test_leak,
  115. EnvToBool("HEAP_CHECKER_TEST_TEST_LEAK", false),
  116. "If should cause a leak crash");
  117. DEFINE_bool(test_loop_leak,
  118. EnvToBool("HEAP_CHECKER_TEST_TEST_LOOP_LEAK", false),
  119. "If should cause a looped leak crash");
  120. DEFINE_bool(test_register_leak,
  121. EnvToBool("HEAP_CHECKER_TEST_TEST_REGISTER_LEAK", false),
  122. "If should cause a leak crash by hiding a pointer "
  123. "that is only in a register");
  124. DEFINE_bool(test_cancel_global_check,
  125. EnvToBool("HEAP_CHECKER_TEST_TEST_CANCEL_GLOBAL_CHECK", false),
  126. "If should test HeapLeakChecker::CancelGlobalCheck "
  127. "when --test_leak or --test_loop_leak are given; "
  128. "the test should not fail then");
  129. DEFINE_bool(maybe_stripped,
  130. EnvToBool("HEAP_CHECKER_TEST_MAYBE_STRIPPED", true),
  131. "If we think we can be a stripped binary");
  132. DEFINE_bool(interfering_threads,
  133. EnvToBool("HEAP_CHECKER_TEST_INTERFERING_THREADS", true),
  134. "If we should use threads trying "
  135. "to interfere with leak checking");
  136. DEFINE_bool(hoarding_threads,
  137. EnvToBool("HEAP_CHECKER_TEST_HOARDING_THREADS", true),
  138. "If threads (usually the manager thread) are known "
  139. "to retain some old state in their global buffers, "
  140. "so that it's hard to force leaks when threads are around");
  141. // TODO(maxim): Chage the default to false
  142. // when the standard environment used NTPL threads:
  143. // they do not seem to have this problem.
  144. DEFINE_bool(no_threads,
  145. EnvToBool("HEAP_CHECKER_TEST_NO_THREADS", false),
  146. "If we should not use any threads");
  147. // This is used so we can make can_create_leaks_reliably true
  148. // for any pthread implementation and test with that.
  149. DECLARE_int64(heap_check_max_pointer_offset); // heap-checker.cc
  150. DECLARE_string(heap_check); // in heap-checker.cc
  151. #define WARN_IF(cond, msg) LOG_IF(WARNING, cond, msg)
  152. // This is an evil macro! Be very careful using it...
  153. #undef VLOG // and we start by evilling overriding logging.h VLOG
  154. #define VLOG(lvl) if (FLAGS_verbose >= (lvl)) cout << "\n"
  155. // This is, likewise, evil
  156. #define LOGF VLOG(INFO)
  157. static void RunHeapBusyThreads(); // below
  158. class Closure {
  159. public:
  160. virtual ~Closure() { }
  161. virtual void Run() = 0;
  162. };
  163. class Callback0 : public Closure {
  164. public:
  165. typedef void (*FunctionSignature)();
  166. inline Callback0(FunctionSignature f) : f_(f) {}
  167. virtual void Run() { (*f_)(); delete this; }
  168. private:
  169. FunctionSignature f_;
  170. };
  171. template <class P1> class Callback1 : public Closure {
  172. public:
  173. typedef void (*FunctionSignature)(P1);
  174. inline Callback1<P1>(FunctionSignature f, P1 p1) : f_(f), p1_(p1) {}
  175. virtual void Run() { (*f_)(p1_); delete this; }
  176. private:
  177. FunctionSignature f_;
  178. P1 p1_;
  179. };
  180. template <class P1, class P2> class Callback2 : public Closure {
  181. public:
  182. typedef void (*FunctionSignature)(P1,P2);
  183. inline Callback2<P1,P2>(FunctionSignature f, P1 p1, P2 p2) : f_(f), p1_(p1), p2_(p2) {}
  184. virtual void Run() { (*f_)(p1_, p2_); delete this; }
  185. private:
  186. FunctionSignature f_;
  187. P1 p1_;
  188. P2 p2_;
  189. };
  190. inline Callback0* NewCallback(void (*function)()) {
  191. return new Callback0(function);
  192. }
  193. template <class P1>
  194. inline Callback1<P1>* NewCallback(void (*function)(P1), P1 p1) {
  195. return new Callback1<P1>(function, p1);
  196. }
  197. template <class P1, class P2>
  198. inline Callback2<P1,P2>* NewCallback(void (*function)(P1,P2), P1 p1, P2 p2) {
  199. return new Callback2<P1,P2>(function, p1, p2);
  200. }
  201. // Set to true at end of main, so threads know. Not entirely thread-safe!,
  202. // but probably good enough.
  203. static bool g_have_exited_main = false;
  204. // If we can reliably create leaks (i.e. make leaked object
  205. // really unreachable from any global data).
  206. static bool can_create_leaks_reliably = false;
  207. // We use a simple allocation wrapper
  208. // to make sure we wipe out the newly allocated objects
  209. // in case they still happened to contain some pointer data
  210. // accidentally left by the memory allocator.
  211. struct Initialized { };
  212. static Initialized initialized;
  213. void* operator new(size_t size, const Initialized&) {
  214. // Below we use "p = new(initialized) Foo[1];" and "delete[] p;"
  215. // instead of "p = new(initialized) Foo;"
  216. // when we need to delete an allocated object.
  217. void* p = malloc(size);
  218. memset(p, 0, size);
  219. return p;
  220. }
  221. void* operator new[](size_t size, const Initialized&) {
  222. char* p = new char[size];
  223. memset(p, 0, size);
  224. return p;
  225. }
  226. static void DoWipeStack(int n); // defined below
  227. static void WipeStack() { DoWipeStack(20); }
  228. static void Pause() {
  229. poll(NULL, 0, 77); // time for thread activity in HeapBusyThreadBody
  230. // Indirectly test malloc_extension.*:
  231. CHECK(MallocExtension::instance()->VerifyAllMemory());
  232. int blocks;
  233. size_t total;
  234. int histogram[kMallocHistogramSize];
  235. if (MallocExtension::instance()
  236. ->MallocMemoryStats(&blocks, &total, histogram) && total != 0) {
  237. VLOG(3) << "Malloc stats: " << blocks << " blocks of "
  238. << total << " bytes";
  239. for (int i = 0; i < kMallocHistogramSize; ++i) {
  240. if (histogram[i]) {
  241. VLOG(3) << " Malloc histogram at " << i << " : " << histogram[i];
  242. }
  243. }
  244. }
  245. WipeStack(); // e.g. MallocExtension::VerifyAllMemory
  246. // can leave pointers to heap objects on stack
  247. }
  248. // Make gcc think a pointer is "used"
  249. template <class T>
  250. static void Use(T** foo) {
  251. VLOG(2) << "Dummy-using " << static_cast<void*>(*foo) << " at " << foo;
  252. }
  253. // Arbitrary value, but not such that xor'ing with it is likely
  254. // to map one valid pointer to another valid pointer:
  255. static const uintptr_t kHideMask =
  256. static_cast<uintptr_t>(0xF03A5F7BF03A5F7BLL);
  257. // Helpers to hide a pointer from live data traversal.
  258. // We just xor the pointer so that (with high probability)
  259. // it's not a valid address of a heap object anymore.
  260. // Both Hide and UnHide must be executed within RunHidden() below
  261. // to prevent leaving stale data on active stack that can be a pointer
  262. // to a heap object that is not actually reachable via live variables.
  263. // (UnHide might leave heap pointer value for an object
  264. // that will be deallocated but later another object
  265. // can be allocated at the same heap address.)
  266. template <class T>
  267. static void Hide(T** ptr) {
  268. // we cast values, not dereferenced pointers, so no aliasing issues:
  269. *ptr = reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(*ptr) ^ kHideMask);
  270. VLOG(2) << "hid: " << static_cast<void*>(*ptr);
  271. }
  272. template <class T>
  273. static void UnHide(T** ptr) {
  274. VLOG(2) << "unhiding: " << static_cast<void*>(*ptr);
  275. // we cast values, not dereferenced pointers, so no aliasing issues:
  276. *ptr = reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(*ptr) ^ kHideMask);
  277. }
  278. static void LogHidden(const char* message, const void* ptr) {
  279. LOGF << message << " : "
  280. << ptr << " ^ " << reinterpret_cast<void*>(kHideMask) << endl;
  281. }
  282. // volatile to fool the compiler against inlining the calls to these
  283. void (*volatile run_hidden_ptr)(Closure* c, int n);
  284. void (*volatile wipe_stack_ptr)(int n);
  285. static void DoRunHidden(Closure* c, int n) {
  286. if (n) {
  287. VLOG(10) << "Level " << n << " at " << &n;
  288. (*run_hidden_ptr)(c, n-1);
  289. (*wipe_stack_ptr)(n);
  290. sleep(0); // undo -foptimize-sibling-calls
  291. } else {
  292. c->Run();
  293. }
  294. }
  295. /*static*/ void DoWipeStack(int n) {
  296. VLOG(10) << "Wipe level " << n << " at " << &n;
  297. if (n) {
  298. const int sz = 30;
  299. volatile int arr[sz] ATTRIBUTE_UNUSED;
  300. for (int i = 0; i < sz; ++i) arr[i] = 0;
  301. (*wipe_stack_ptr)(n-1);
  302. sleep(0); // undo -foptimize-sibling-calls
  303. }
  304. }
  305. // This executes closure c several stack frames down from the current one
  306. // and then makes an effort to also wipe out the stack data that was used by
  307. // the closure.
  308. // This way we prevent leak checker from finding any temporary pointers
  309. // of the closure execution on the stack and deciding that
  310. // these pointers (and the pointed objects) are still live.
  311. static void RunHidden(Closure* c) {
  312. DoRunHidden(c, 15);
  313. DoWipeStack(20);
  314. }
  315. static void DoAllocHidden(size_t size, void** ptr) {
  316. void* p = new(initialized) char[size];
  317. Hide(&p);
  318. Use(&p); // use only hidden versions
  319. VLOG(2) << "Allocated hidden " << p << " at " << &p;
  320. *ptr = p; // assign the hidden versions
  321. }
  322. static void* AllocHidden(size_t size) {
  323. void* r;
  324. RunHidden(NewCallback(DoAllocHidden, size, &r));
  325. return r;
  326. }
  327. static void DoDeAllocHidden(void** ptr) {
  328. Use(ptr); // use only hidden versions
  329. void* p = *ptr;
  330. VLOG(2) << "Deallocating hidden " << p;
  331. UnHide(&p);
  332. delete [] reinterpret_cast<char*>(p);
  333. }
  334. static void DeAllocHidden(void** ptr) {
  335. RunHidden(NewCallback(DoDeAllocHidden, ptr));
  336. *ptr = NULL;
  337. Use(ptr);
  338. }
  339. void PreventHeapReclaiming(size_t size) {
  340. #ifdef NDEBUG
  341. if (true) {
  342. static void** no_reclaim_list = NULL;
  343. CHECK(size >= sizeof(void*));
  344. // We can't use malloc_reclaim_memory flag in opt mode as debugallocation.cc
  345. // is not used. Instead we allocate a bunch of heap objects that are
  346. // of the same size as what we are going to leak to ensure that the object
  347. // we are about to leak is not at the same address as some old allocated
  348. // and freed object that might still have pointers leading to it.
  349. for (int i = 0; i < 100; ++i) {
  350. void** p = reinterpret_cast<void**>(new(initialized) char[size]);
  351. p[0] = no_reclaim_list;
  352. no_reclaim_list = p;
  353. }
  354. }
  355. #endif
  356. }
  357. static bool RunSilent(HeapLeakChecker* check,
  358. bool (HeapLeakChecker::* func)()) {
  359. // By default, don't print the 'we detected a leak' message in the
  360. // cases we're expecting a leak (we still print when --v is >= 1).
  361. // This way, the logging output is less confusing: we only print
  362. // "we detected a leak", and how to diagnose it, for *unexpected* leaks.
  363. int32 old_FLAGS_verbose = FLAGS_verbose;
  364. if (!VLOG_IS_ON(1)) // not on a verbose setting
  365. FLAGS_verbose = FATAL; // only log fatal errors
  366. const bool retval = (check->*func)();
  367. FLAGS_verbose = old_FLAGS_verbose;
  368. return retval;
  369. }
  370. #define RUN_SILENT(check, func) RunSilent(&(check), &HeapLeakChecker::func)
  371. enum CheckType { SAME_HEAP, NO_LEAKS };
  372. static void VerifyLeaks(HeapLeakChecker* check, CheckType type,
  373. int leaked_bytes, int leaked_objects) {
  374. WipeStack(); // to help with can_create_leaks_reliably
  375. const bool no_leaks =
  376. type == NO_LEAKS ? RUN_SILENT(*check, BriefNoLeaks)
  377. : RUN_SILENT(*check, BriefSameHeap);
  378. if (can_create_leaks_reliably) {
  379. // these might still fail occasionally, but it should be very rare
  380. CHECK_EQ(no_leaks, false);
  381. CHECK_EQ(check->BytesLeaked(), leaked_bytes);
  382. CHECK_EQ(check->ObjectsLeaked(), leaked_objects);
  383. } else {
  384. WARN_IF(no_leaks != false,
  385. "Expected leaks not found: "
  386. "Some liveness flood must be too optimistic");
  387. }
  388. }
  389. // not deallocates
  390. static void TestHeapLeakCheckerDeathSimple() {
  391. HeapLeakChecker check("death_simple");
  392. void* foo = AllocHidden(100 * sizeof(int));
  393. Use(&foo);
  394. void* bar = AllocHidden(300);
  395. Use(&bar);
  396. LogHidden("Leaking", foo);
  397. LogHidden("Leaking", bar);
  398. Pause();
  399. VerifyLeaks(&check, NO_LEAKS, 300 + 100 * sizeof(int), 2);
  400. DeAllocHidden(&foo);
  401. DeAllocHidden(&bar);
  402. }
  403. static void MakeDeathLoop(void** arr1, void** arr2) {
  404. PreventHeapReclaiming(2 * sizeof(void*));
  405. void** a1 = new(initialized) void*[2];
  406. void** a2 = new(initialized) void*[2];
  407. a1[1] = reinterpret_cast<void*>(a2);
  408. a2[1] = reinterpret_cast<void*>(a1);
  409. Hide(&a1);
  410. Hide(&a2);
  411. Use(&a1);
  412. Use(&a2);
  413. VLOG(2) << "Made hidden loop at " << &a1 << " to " << arr1;
  414. *arr1 = a1;
  415. *arr2 = a2;
  416. }
  417. // not deallocates two objects linked together
  418. static void TestHeapLeakCheckerDeathLoop() {
  419. HeapLeakChecker check("death_loop");
  420. void* arr1;
  421. void* arr2;
  422. RunHidden(NewCallback(MakeDeathLoop, &arr1, &arr2));
  423. Use(&arr1);
  424. Use(&arr2);
  425. LogHidden("Leaking", arr1);
  426. LogHidden("Leaking", arr2);
  427. Pause();
  428. VerifyLeaks(&check, NO_LEAKS, 4 * sizeof(void*), 2);
  429. DeAllocHidden(&arr1);
  430. DeAllocHidden(&arr2);
  431. }
  432. // deallocates more than allocates
  433. static void TestHeapLeakCheckerDeathInverse() {
  434. void* bar = AllocHidden(250 * sizeof(int));
  435. Use(&bar);
  436. LogHidden("Pre leaking", bar);
  437. Pause();
  438. HeapLeakChecker check("death_inverse");
  439. void* foo = AllocHidden(100 * sizeof(int));
  440. Use(&foo);
  441. LogHidden("Leaking", foo);
  442. DeAllocHidden(&bar);
  443. Pause();
  444. VerifyLeaks(&check, SAME_HEAP,
  445. 100 * static_cast<int64>(sizeof(int)),
  446. 1);
  447. DeAllocHidden(&foo);
  448. }
  449. // deallocates more than allocates
  450. static void TestHeapLeakCheckerDeathNoLeaks() {
  451. void* foo = AllocHidden(100 * sizeof(int));
  452. Use(&foo);
  453. void* bar = AllocHidden(250 * sizeof(int));
  454. Use(&bar);
  455. HeapLeakChecker check("death_noleaks");
  456. DeAllocHidden(&bar);
  457. CHECK_EQ(check.BriefNoLeaks(), true);
  458. DeAllocHidden(&foo);
  459. }
  460. // have less objecs
  461. static void TestHeapLeakCheckerDeathCountLess() {
  462. void* bar1 = AllocHidden(50 * sizeof(int));
  463. Use(&bar1);
  464. void* bar2 = AllocHidden(50 * sizeof(int));
  465. Use(&bar2);
  466. LogHidden("Pre leaking", bar1);
  467. LogHidden("Pre leaking", bar2);
  468. Pause();
  469. HeapLeakChecker check("death_count_less");
  470. void* foo = AllocHidden(100 * sizeof(int));
  471. Use(&foo);
  472. LogHidden("Leaking", foo);
  473. DeAllocHidden(&bar1);
  474. DeAllocHidden(&bar2);
  475. Pause();
  476. VerifyLeaks(&check, SAME_HEAP,
  477. 100 * sizeof(int),
  478. 1);
  479. DeAllocHidden(&foo);
  480. }
  481. // have more objecs
  482. static void TestHeapLeakCheckerDeathCountMore() {
  483. void* foo = AllocHidden(100 * sizeof(int));
  484. Use(&foo);
  485. LogHidden("Pre leaking", foo);
  486. Pause();
  487. HeapLeakChecker check("death_count_more");
  488. void* bar1 = AllocHidden(50 * sizeof(int));
  489. Use(&bar1);
  490. void* bar2 = AllocHidden(50 * sizeof(int));
  491. Use(&bar2);
  492. LogHidden("Leaking", bar1);
  493. LogHidden("Leaking", bar2);
  494. DeAllocHidden(&foo);
  495. Pause();
  496. VerifyLeaks(&check, SAME_HEAP,
  497. 100 * sizeof(int),
  498. 2);
  499. DeAllocHidden(&bar1);
  500. DeAllocHidden(&bar2);
  501. }
  502. static void TestHiddenPointer() {
  503. int i;
  504. void* foo = &i;
  505. HiddenPointer<void> p(foo);
  506. CHECK_EQ(foo, p.get());
  507. // Confirm pointer doesn't appear to contain a byte sequence
  508. // that == the pointer. We don't really need to test that
  509. // the xor trick itself works, as without it nothing in this
  510. // test suite would work. See the Hide/Unhide/*Hidden* set
  511. // of helper methods.
  512. void **pvoid = reinterpret_cast<void**>(&p);
  513. CHECK_NE(foo, *pvoid);
  514. }
  515. // simple tests that deallocate what they allocated
  516. static void TestHeapLeakChecker() {
  517. { HeapLeakChecker check("trivial");
  518. int foo = 5;
  519. int* p = &foo;
  520. Use(&p);
  521. Pause();
  522. CHECK(check.BriefSameHeap());
  523. }
  524. Pause();
  525. { HeapLeakChecker check("simple");
  526. void* foo = AllocHidden(100 * sizeof(int));
  527. Use(&foo);
  528. void* bar = AllocHidden(200 * sizeof(int));
  529. Use(&bar);
  530. DeAllocHidden(&foo);
  531. DeAllocHidden(&bar);
  532. Pause();
  533. CHECK(check.BriefSameHeap());
  534. }
  535. }
  536. // no false positives
  537. static void TestHeapLeakCheckerNoFalsePositives() {
  538. { HeapLeakChecker check("trivial_p");
  539. int foo = 5;
  540. int* p = &foo;
  541. Use(&p);
  542. Pause();
  543. CHECK(check.BriefSameHeap());
  544. }
  545. Pause();
  546. { HeapLeakChecker check("simple_p");
  547. void* foo = AllocHidden(100 * sizeof(int));
  548. Use(&foo);
  549. void* bar = AllocHidden(200 * sizeof(int));
  550. Use(&bar);
  551. DeAllocHidden(&foo);
  552. DeAllocHidden(&bar);
  553. Pause();
  554. CHECK(check.SameHeap());
  555. }
  556. }
  557. // test that we detect leaks when we have same total # of bytes and
  558. // objects, but different individual object sizes
  559. static void TestLeakButTotalsMatch() {
  560. void* bar1 = AllocHidden(240 * sizeof(int));
  561. Use(&bar1);
  562. void* bar2 = AllocHidden(160 * sizeof(int));
  563. Use(&bar2);
  564. LogHidden("Pre leaking", bar1);
  565. LogHidden("Pre leaking", bar2);
  566. Pause();
  567. HeapLeakChecker check("trick");
  568. void* foo1 = AllocHidden(280 * sizeof(int));
  569. Use(&foo1);
  570. void* foo2 = AllocHidden(120 * sizeof(int));
  571. Use(&foo2);
  572. LogHidden("Leaking", foo1);
  573. LogHidden("Leaking", foo2);
  574. DeAllocHidden(&bar1);
  575. DeAllocHidden(&bar2);
  576. Pause();
  577. // foo1 and foo2 leaked
  578. VerifyLeaks(&check, NO_LEAKS, (280+120)*sizeof(int), 2);
  579. DeAllocHidden(&foo1);
  580. DeAllocHidden(&foo2);
  581. }
  582. // no false negatives from pprof
  583. static void TestHeapLeakCheckerDeathTrick() {
  584. void* bar1 = AllocHidden(240 * sizeof(int));
  585. Use(&bar1);
  586. void* bar2 = AllocHidden(160 * sizeof(int));
  587. Use(&bar2);
  588. HeapLeakChecker check("death_trick");
  589. DeAllocHidden(&bar1);
  590. DeAllocHidden(&bar2);
  591. void* foo1 = AllocHidden(280 * sizeof(int));
  592. Use(&foo1);
  593. void* foo2 = AllocHidden(120 * sizeof(int));
  594. Use(&foo2);
  595. // TODO(maxim): use the above if we make pprof work in automated test runs
  596. if (!FLAGS_maybe_stripped) {
  597. CHECK_EQ(RUN_SILENT(check, SameHeap), false);
  598. // pprof checking should catch the leak
  599. } else {
  600. WARN_IF(RUN_SILENT(check, SameHeap) != false,
  601. "death_trick leak is not caught; "
  602. "we must be using a stripped binary");
  603. }
  604. DeAllocHidden(&foo1);
  605. DeAllocHidden(&foo2);
  606. }
  607. // simple leak
  608. static void TransLeaks() {
  609. AllocHidden(1 * sizeof(char));
  610. }
  611. // range-based disabling using Disabler
  612. static void ScopedDisabledLeaks() {
  613. HeapLeakChecker::Disabler disabler;
  614. AllocHidden(3 * sizeof(int));
  615. TransLeaks();
  616. (void)malloc(10); // Direct leak
  617. }
  618. // have different disabled leaks
  619. static void* RunDisabledLeaks(void* a) {
  620. ScopedDisabledLeaks();
  621. return a;
  622. }
  623. // have different disabled leaks inside of a thread
  624. static void ThreadDisabledLeaks() {
  625. if (FLAGS_no_threads) return;
  626. pthread_t tid;
  627. pthread_attr_t attr;
  628. CHECK_EQ(pthread_attr_init(&attr), 0);
  629. CHECK_EQ(pthread_create(&tid, &attr, RunDisabledLeaks, NULL), 0);
  630. void* res;
  631. CHECK_EQ(pthread_join(tid, &res), 0);
  632. }
  633. // different disabled leaks (some in threads)
  634. static void TestHeapLeakCheckerDisabling() {
  635. HeapLeakChecker check("disabling");
  636. RunDisabledLeaks(NULL);
  637. RunDisabledLeaks(NULL);
  638. ThreadDisabledLeaks();
  639. RunDisabledLeaks(NULL);
  640. ThreadDisabledLeaks();
  641. ThreadDisabledLeaks();
  642. Pause();
  643. CHECK(check.SameHeap());
  644. }
  645. typedef set<int> IntSet;
  646. static int some_ints[] = { 1, 2, 3, 21, 22, 23, 24, 25 };
  647. static void DoTestSTLAlloc() {
  648. IntSet* x = new(initialized) IntSet[1];
  649. *x = IntSet(some_ints, some_ints + 6);
  650. for (int i = 0; i < 1000; i++) {
  651. x->insert(i*3);
  652. }
  653. delete [] x;
  654. }
  655. // Check that normal STL usage does not result in a leak report.
  656. // (In particular we test that there's no complex STL's own allocator
  657. // running on top of our allocator with hooks to heap profiler
  658. // that can result in false leak report in this case.)
  659. static void TestSTLAlloc() {
  660. HeapLeakChecker check("stl");
  661. RunHidden(NewCallback(DoTestSTLAlloc));
  662. CHECK_EQ(check.BriefSameHeap(), true);
  663. }
  664. static void DoTestSTLAllocInverse(IntSet** setx) {
  665. IntSet* x = new(initialized) IntSet[1];
  666. *x = IntSet(some_ints, some_ints + 3);
  667. for (int i = 0; i < 100; i++) {
  668. x->insert(i*2);
  669. }
  670. Hide(&x);
  671. *setx = x;
  672. }
  673. static void FreeTestSTLAllocInverse(IntSet** setx) {
  674. IntSet* x = *setx;
  675. UnHide(&x);
  676. delete [] x;
  677. }
  678. // Check that normal leaked STL usage *does* result in a leak report.
  679. // (In particular we test that there's no complex STL's own allocator
  680. // running on top of our allocator with hooks to heap profiler
  681. // that can result in false absence of leak report in this case.)
  682. static void TestSTLAllocInverse() {
  683. HeapLeakChecker check("death_inverse_stl");
  684. IntSet* x;
  685. RunHidden(NewCallback(DoTestSTLAllocInverse, &x));
  686. LogHidden("Leaking", x);
  687. if (can_create_leaks_reliably) {
  688. WipeStack(); // to help with can_create_leaks_reliably
  689. // these might still fail occasionally, but it should be very rare
  690. CHECK_EQ(RUN_SILENT(check, BriefNoLeaks), false);
  691. CHECK_GE(check.BytesLeaked(), 100 * sizeof(int));
  692. CHECK_GE(check.ObjectsLeaked(), 100);
  693. // assumes set<>s are represented by some kind of binary tree
  694. // or something else allocating >=1 heap object per set object
  695. } else {
  696. WARN_IF(RUN_SILENT(check, BriefNoLeaks) != false,
  697. "Expected leaks not found: "
  698. "Some liveness flood must be too optimistic");
  699. }
  700. RunHidden(NewCallback(FreeTestSTLAllocInverse, &x));
  701. }
  702. template<class Alloc>
  703. static void DirectTestSTLAlloc(Alloc allocator, const char* name) {
  704. HeapLeakChecker check((string("direct_stl-") + name).c_str());
  705. static const int kSize = 1000;
  706. typename Alloc::pointer ptrs[kSize];
  707. for (int i = 0; i < kSize; ++i) {
  708. typename Alloc::pointer p = allocator.allocate(i*3+1);
  709. HeapLeakChecker::IgnoreObject(p);
  710. // This will crash if p is not known to heap profiler:
  711. // (i.e. STL's "allocator" does not have a direct hook to heap profiler)
  712. HeapLeakChecker::UnIgnoreObject(p);
  713. ptrs[i] = p;
  714. }
  715. for (int i = 0; i < kSize; ++i) {
  716. allocator.deallocate(ptrs[i], i*3+1);
  717. ptrs[i] = NULL;
  718. }
  719. CHECK(check.BriefSameHeap()); // just in case
  720. }
  721. static struct group* grp = NULL;
  722. static const int kKeys = 50;
  723. static pthread_key_t key[kKeys];
  724. static void KeyFree(void* ptr) {
  725. delete [] reinterpret_cast<char*>(ptr);
  726. }
  727. static bool key_init_has_run = false;
  728. static void KeyInit() {
  729. for (int i = 0; i < kKeys; ++i) {
  730. CHECK_EQ(pthread_key_create(&key[i], KeyFree), 0);
  731. VLOG(2) << "pthread key " << i << " : " << key[i];
  732. }
  733. key_init_has_run = true; // needed for a sanity-check
  734. }
  735. // force various C library static and thread-specific allocations
  736. static void TestLibCAllocate() {
  737. CHECK(key_init_has_run);
  738. for (int i = 0; i < kKeys; ++i) {
  739. void* p = pthread_getspecific(key[i]);
  740. if (NULL == p) {
  741. if (i == 0) {
  742. // Test-logging inside threads which (potentially) creates and uses
  743. // thread-local data inside standard C++ library:
  744. VLOG(0) << "Adding pthread-specifics for thread " << pthread_self()
  745. << " pid " << getpid();
  746. }
  747. p = new(initialized) char[77 + i];
  748. VLOG(2) << "pthread specific " << i << " : " << p;
  749. pthread_setspecific(key[i], p);
  750. }
  751. }
  752. strerror(errno);
  753. const time_t now = time(NULL);
  754. ctime(&now);
  755. #ifdef HAVE_EXECINFO_H
  756. void *stack[1];
  757. backtrace(stack, 1);
  758. #endif
  759. #ifdef HAVE_GRP_H
  760. gid_t gid = getgid();
  761. getgrgid(gid);
  762. if (grp == NULL) grp = getgrent(); // a race condition here is okay
  763. getgrnam(grp->gr_name);
  764. #endif
  765. #ifdef HAVE_PWD_H
  766. getpwuid(geteuid());
  767. #endif
  768. }
  769. // Continuous random heap memory activity to try to disrupt heap checking.
  770. static void* HeapBusyThreadBody(void* a) {
  771. const int thread_num = reinterpret_cast<intptr_t>(a);
  772. VLOG(0) << "A new HeapBusyThread " << thread_num;
  773. TestLibCAllocate();
  774. int user = 0;
  775. // Try to hide ptr from heap checker in a CPU register:
  776. // Here we are just making a best effort to put the only pointer
  777. // to a heap object into a thread register to test
  778. // the thread-register finding machinery in the heap checker.
  779. #if defined(__i386__) && defined(__GNUC__)
  780. register int** ptr asm("esi");
  781. #elif defined(__x86_64__) && defined(__GNUC__)
  782. register int** ptr asm("r15");
  783. #else
  784. register int** ptr;
  785. #endif
  786. ptr = NULL;
  787. typedef set<int> Set;
  788. Set s1;
  789. while (1) {
  790. // TestLibCAllocate() calls libc functions that don't work so well
  791. // after main() has exited. So we just don't do the test then.
  792. if (!g_have_exited_main)
  793. TestLibCAllocate();
  794. if (ptr == NULL) {
  795. ptr = new(initialized) int*[1];
  796. *ptr = new(initialized) int[1];
  797. }
  798. set<int>* s2 = new(initialized) set<int>[1];
  799. s1.insert(random());
  800. s2->insert(*s1.begin());
  801. user += *s2->begin();
  802. **ptr += user;
  803. if (random() % 51 == 0) {
  804. s1.clear();
  805. if (random() % 2 == 0) {
  806. s1.~Set();
  807. new(&s1) Set;
  808. }
  809. }
  810. VLOG(3) << pthread_self() << " (" << getpid() << "): in wait: "
  811. << ptr << ", " << *ptr << "; " << s1.size();
  812. VLOG(2) << pthread_self() << " (" << getpid() << "): in wait, ptr = "
  813. << reinterpret_cast<void*>(
  814. reinterpret_cast<uintptr_t>(ptr) ^ kHideMask)
  815. << "^" << reinterpret_cast<void*>(kHideMask);
  816. if (FLAGS_test_register_leak && thread_num % 5 == 0) {
  817. // Hide the register "ptr" value with an xor mask.
  818. // If one provides --test_register_leak flag, the test should
  819. // (with very high probability) crash on some leak check
  820. // with a leak report (of some x * sizeof(int) + y * sizeof(int*) bytes)
  821. // pointing at the two lines above in this function
  822. // with "new(initialized) int" in them as the allocators
  823. // of the leaked objects.
  824. // CAVEAT: We can't really prevent a compiler to save some
  825. // temporary values of "ptr" on the stack and thus let us find
  826. // the heap objects not via the register.
  827. // Hence it's normal if for certain compilers or optimization modes
  828. // --test_register_leak does not cause a leak crash of the above form
  829. // (this happens e.g. for gcc 4.0.1 in opt mode).
  830. ptr = reinterpret_cast<int **>(
  831. reinterpret_cast<uintptr_t>(ptr) ^ kHideMask);
  832. // busy loop to get the thread interrupted at:
  833. for (int i = 1; i < 10000000; ++i) user += (1 + user * user * 5) / i;
  834. ptr = reinterpret_cast<int **>(
  835. reinterpret_cast<uintptr_t>(ptr) ^ kHideMask);
  836. } else {
  837. poll(NULL, 0, random() % 100);
  838. }
  839. VLOG(2) << pthread_self() << ": continuing";
  840. if (random() % 3 == 0) {
  841. delete [] *ptr;
  842. delete [] ptr;
  843. ptr = NULL;
  844. }
  845. delete [] s2;
  846. }
  847. return a;
  848. }
  849. static void RunHeapBusyThreads() {
  850. KeyInit();
  851. if (!FLAGS_interfering_threads || FLAGS_no_threads) return;
  852. const int n = 17; // make many threads
  853. pthread_t tid;
  854. pthread_attr_t attr;
  855. CHECK_EQ(pthread_attr_init(&attr), 0);
  856. // make them and let them run
  857. for (int i = 0; i < n; ++i) {
  858. VLOG(0) << "Creating extra thread " << i + 1;
  859. CHECK(pthread_create(&tid, &attr, HeapBusyThreadBody,
  860. reinterpret_cast<void*>(i)) == 0);
  861. }
  862. Pause();
  863. Pause();
  864. }
  865. // ========================================================================= //
  866. // This code section is to test that objects that are reachable from global
  867. // variables are not reported as leaks
  868. // as well as that (Un)IgnoreObject work for such objects fine.
  869. // An object making functions:
  870. // returns a "weird" pointer to a new object for which
  871. // it's worth checking that the object is reachable via that pointer.
  872. typedef void* (*ObjMakerFunc)();
  873. static list<ObjMakerFunc> obj_makers; // list of registered object makers
  874. // Helper macro to register an object making function
  875. // 'name' is an identifier of this object maker,
  876. // 'body' is its function body that must declare
  877. // pointer 'p' to the nex object to return.
  878. // Usage example:
  879. // REGISTER_OBJ_MAKER(trivial, int* p = new(initialized) int;)
  880. #define REGISTER_OBJ_MAKER(name, body) \
  881. void* ObjMaker_##name##_() { \
  882. VLOG(1) << "Obj making " << #name; \
  883. body; \
  884. return p; \
  885. } \
  886. static ObjMakerRegistrar maker_reg_##name##__(&ObjMaker_##name##_);
  887. // helper class for REGISTER_OBJ_MAKER
  888. struct ObjMakerRegistrar {
  889. ObjMakerRegistrar(ObjMakerFunc obj_maker) { obj_makers.push_back(obj_maker); }
  890. };
  891. // List of the objects/pointers made with all the obj_makers
  892. // to test reachability via global data pointers during leak checks.
  893. static list<void*>* live_objects = new list<void*>;
  894. // pointer so that it does not get destructed on exit
  895. // Exerciser for one ObjMakerFunc.
  896. static void TestPointerReach(ObjMakerFunc obj_maker) {
  897. HeapLeakChecker::IgnoreObject(obj_maker()); // test IgnoreObject
  898. void* obj = obj_maker();
  899. HeapLeakChecker::IgnoreObject(obj);
  900. HeapLeakChecker::UnIgnoreObject(obj); // test UnIgnoreObject
  901. HeapLeakChecker::IgnoreObject(obj); // not to need deletion for obj
  902. live_objects->push_back(obj_maker()); // test reachability at leak check
  903. }
  904. // Test all ObjMakerFunc registred via REGISTER_OBJ_MAKER.
  905. static void TestObjMakers() {
  906. for (list<ObjMakerFunc>::const_iterator i = obj_makers.begin();
  907. i != obj_makers.end(); ++i) {
  908. TestPointerReach(*i);
  909. TestPointerReach(*i); // a couple more times would not hurt
  910. TestPointerReach(*i);
  911. }
  912. }
  913. // A dummy class to mimic allocation behavior of string-s.
  914. template<class T>
  915. struct Array {
  916. Array() {
  917. size = 3 + random() % 30;
  918. ptr = new(initialized) T[size];
  919. }
  920. ~Array() { delete [] ptr; }
  921. Array(const Array& x) {
  922. size = x.size;
  923. ptr = new(initialized) T[size];
  924. for (size_t i = 0; i < size; ++i) {
  925. ptr[i] = x.ptr[i];
  926. }
  927. }
  928. void operator=(const Array& x) {
  929. delete [] ptr;
  930. size = x.size;
  931. ptr = new(initialized) T[size];
  932. for (size_t i = 0; i < size; ++i) {
  933. ptr[i] = x.ptr[i];
  934. }
  935. }
  936. void append(const Array& x) {
  937. T* p = new(initialized) T[size + x.size];
  938. for (size_t i = 0; i < size; ++i) {
  939. p[i] = ptr[i];
  940. }
  941. for (size_t i = 0; i < x.size; ++i) {
  942. p[size+i] = x.ptr[i];
  943. }
  944. size += x.size;
  945. delete [] ptr;
  946. ptr = p;
  947. }
  948. private:
  949. size_t size;
  950. T* ptr;
  951. };
  952. // to test pointers to objects, built-in arrays, string, etc:
  953. REGISTER_OBJ_MAKER(plain, int* p = new(initialized) int;)
  954. REGISTER_OBJ_MAKER(int_array_1, int* p = new(initialized) int[1];)
  955. REGISTER_OBJ_MAKER(int_array, int* p = new(initialized) int[10];)
  956. REGISTER_OBJ_MAKER(string, Array<char>* p = new(initialized) Array<char>();)
  957. REGISTER_OBJ_MAKER(string_array,
  958. Array<char>* p = new(initialized) Array<char>[5];)
  959. REGISTER_OBJ_MAKER(char_array, char* p = new(initialized) char[5];)
  960. REGISTER_OBJ_MAKER(appended_string,
  961. Array<char>* p = new Array<char>();
  962. p->append(Array<char>());
  963. )
  964. REGISTER_OBJ_MAKER(plain_ptr, int** p = new(initialized) int*;)
  965. REGISTER_OBJ_MAKER(linking_ptr,
  966. int** p = new(initialized) int*;
  967. *p = new(initialized) int;
  968. )
  969. // small objects:
  970. REGISTER_OBJ_MAKER(0_sized, void* p = malloc(0);) // 0-sized object (important)
  971. REGISTER_OBJ_MAKER(1_sized, void* p = malloc(1);)
  972. REGISTER_OBJ_MAKER(2_sized, void* p = malloc(2);)
  973. REGISTER_OBJ_MAKER(3_sized, void* p = malloc(3);)
  974. REGISTER_OBJ_MAKER(4_sized, void* p = malloc(4);)
  975. static int set_data[] = { 1, 2, 3, 4, 5, 6, 7, 21, 22, 23, 24, 25, 26, 27 };
  976. static set<int> live_leak_set(set_data, set_data+7);
  977. static const set<int> live_leak_const_set(set_data, set_data+14);
  978. REGISTER_OBJ_MAKER(set,
  979. set<int>* p = new(initialized) set<int>(set_data, set_data + 13);
  980. )
  981. class ClassA {
  982. public:
  983. explicit ClassA(int a) : ptr(NULL) { }
  984. mutable char* ptr;
  985. };
  986. static const ClassA live_leak_mutable(1);
  987. template<class C>
  988. class TClass {
  989. public:
  990. explicit TClass(int a) : ptr(NULL) { }
  991. mutable C val;
  992. mutable C* ptr;
  993. };
  994. static const TClass<Array<char> > live_leak_templ_mutable(1);
  995. class ClassB {
  996. public:
  997. ClassB() { }
  998. char b[7];
  999. virtual void f() { }
  1000. virtual ~ClassB() { }
  1001. };
  1002. class ClassB2 {
  1003. public:
  1004. ClassB2() { }
  1005. char b2[11];
  1006. virtual void f2() { }
  1007. virtual ~ClassB2() { }
  1008. };
  1009. class ClassD1 : public ClassB {
  1010. char d1[15];
  1011. virtual void f() { }
  1012. };
  1013. class ClassD2 : public ClassB2 {
  1014. char d2[19];
  1015. virtual void f2() { }
  1016. };
  1017. class ClassD : public ClassD1, public ClassD2 {
  1018. char d[3];
  1019. virtual void f() { }
  1020. virtual void f2() { }
  1021. };
  1022. // to test pointers to objects of base subclasses:
  1023. REGISTER_OBJ_MAKER(B, ClassB* p = new(initialized) ClassB;)
  1024. REGISTER_OBJ_MAKER(D1, ClassD1* p = new(initialized) ClassD1;)
  1025. REGISTER_OBJ_MAKER(D2, ClassD2* p = new(initialized) ClassD2;)
  1026. REGISTER_OBJ_MAKER(D, ClassD* p = new(initialized) ClassD;)
  1027. REGISTER_OBJ_MAKER(D1_as_B, ClassB* p = new(initialized) ClassD1;)
  1028. REGISTER_OBJ_MAKER(D2_as_B2, ClassB2* p = new(initialized) ClassD2;)
  1029. REGISTER_OBJ_MAKER(D_as_B, ClassB* p = new(initialized) ClassD;)
  1030. REGISTER_OBJ_MAKER(D_as_D1, ClassD1* p = new(initialized) ClassD;)
  1031. // inside-object pointers:
  1032. REGISTER_OBJ_MAKER(D_as_B2, ClassB2* p = new(initialized) ClassD;)
  1033. REGISTER_OBJ_MAKER(D_as_D2, ClassD2* p = new(initialized) ClassD;)
  1034. class InterfaceA {
  1035. public:
  1036. virtual void A() = 0;
  1037. virtual ~InterfaceA() { }
  1038. protected:
  1039. InterfaceA() { }
  1040. };
  1041. class InterfaceB {
  1042. public:
  1043. virtual void B() = 0;
  1044. virtual ~InterfaceB() { }
  1045. protected:
  1046. InterfaceB() { }
  1047. };
  1048. class InterfaceC : public InterfaceA {
  1049. public:
  1050. virtual void C() = 0;
  1051. virtual ~InterfaceC() { }
  1052. protected:
  1053. InterfaceC() { }
  1054. };
  1055. class ClassMltD1 : public ClassB, public InterfaceB, public InterfaceC {
  1056. public:
  1057. char d1[11];
  1058. virtual void f() { }
  1059. virtual void A() { }
  1060. virtual void B() { }
  1061. virtual void C() { }
  1062. };
  1063. class ClassMltD2 : public InterfaceA, public InterfaceB, public ClassB {
  1064. public:
  1065. char d2[15];
  1066. virtual void f() { }
  1067. virtual void A() { }
  1068. virtual void B() { }
  1069. };
  1070. // to specifically test heap reachability under
  1071. // inerface-only multiple inheritance (some use inside-object pointers):
  1072. REGISTER_OBJ_MAKER(MltD1, ClassMltD1* p = new(initialized) ClassMltD1;)
  1073. REGISTER_OBJ_MAKER(MltD1_as_B, ClassB* p = new(initialized) ClassMltD1;)
  1074. REGISTER_OBJ_MAKER(MltD1_as_IA, InterfaceA* p = new(initialized) ClassMltD1;)
  1075. REGISTER_OBJ_MAKER(MltD1_as_IB, InterfaceB* p = new(initialized) ClassMltD1;)
  1076. REGISTER_OBJ_MAKER(MltD1_as_IC, InterfaceC* p = new(initialized) ClassMltD1;)
  1077. REGISTER_OBJ_MAKER(MltD2, ClassMltD2* p = new(initialized) ClassMltD2;)
  1078. REGISTER_OBJ_MAKER(MltD2_as_B, ClassB* p = new(initialized) ClassMltD2;)
  1079. REGISTER_OBJ_MAKER(MltD2_as_IA, InterfaceA* p = new(initialized) ClassMltD2;)
  1080. REGISTER_OBJ_MAKER(MltD2_as_IB, InterfaceB* p = new(initialized) ClassMltD2;)
  1081. // to mimic UnicodeString defined in third_party/icu,
  1082. // which store a platform-independent-sized refcount in the first
  1083. // few bytes and keeps a pointer pointing behind the refcount.
  1084. REGISTER_OBJ_MAKER(unicode_string,
  1085. char* p = new char[sizeof(uint32) * 10];
  1086. p += sizeof(uint32);
  1087. )
  1088. // similar, but for platform-dependent-sized refcount
  1089. REGISTER_OBJ_MAKER(ref_counted,
  1090. char* p = new char[sizeof(int) * 20];
  1091. p += sizeof(int);
  1092. )
  1093. struct Nesting {
  1094. struct Inner {
  1095. Nesting* parent;
  1096. Inner(Nesting* p) : parent(p) {}
  1097. };
  1098. Inner i0;
  1099. char n1[5];
  1100. Inner i1;
  1101. char n2[11];
  1102. Inner i2;
  1103. char n3[27];
  1104. Inner i3;
  1105. Nesting() : i0(this), i1(this), i2(this), i3(this) {}
  1106. };
  1107. // to test inside-object pointers pointing at objects nested into heap objects:
  1108. REGISTER_OBJ_MAKER(nesting_i0, Nesting::Inner* p = &((new Nesting())->i0);)
  1109. REGISTER_OBJ_MAKER(nesting_i1, Nesting::Inner* p = &((new Nesting())->i1);)
  1110. REGISTER_OBJ_MAKER(nesting_i2, Nesting::Inner* p = &((new Nesting())->i2);)
  1111. REGISTER_OBJ_MAKER(nesting_i3, Nesting::Inner* p = &((new Nesting())->i3);)
  1112. void (* volatile init_forcer)(...);
  1113. // allocate many objects reachable from global data
  1114. static void TestHeapLeakCheckerLiveness() {
  1115. live_leak_mutable.ptr = new(initialized) char[77];
  1116. live_leak_templ_mutable.ptr = new(initialized) Array<char>();
  1117. live_leak_templ_mutable.val = Array<char>();
  1118. // smart compiler may see that live_leak_mutable is not used
  1119. // anywhere so .ptr assignment is not used.
  1120. //
  1121. // We force compiler to assume that it is used by having function
  1122. // variable (set to 0 which hopefully won't be known to compiler)
  1123. // which gets address of those objects. So compiler has to assume
  1124. // that .ptr is used.
  1125. if (init_forcer) {
  1126. init_forcer(&live_leak_mutable, &live_leak_templ_mutable);
  1127. }
  1128. TestObjMakers();
  1129. }
  1130. // ========================================================================= //
  1131. // Get address (PC value) following the mmap call into addr_after_mmap_call
  1132. static void* Mmapper(uintptr_t* addr_after_mmap_call) {
  1133. void* r = mmap(NULL, 100, PROT_READ|PROT_WRITE,
  1134. MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  1135. // Get current PC value into addr_after_mmap_call
  1136. void* stack[1];
  1137. CHECK_EQ(GetStackTrace(stack, 1, 0), 1);
  1138. *addr_after_mmap_call = reinterpret_cast<uintptr_t>(stack[0]);
  1139. sleep(0); // undo -foptimize-sibling-calls
  1140. return r;
  1141. }
  1142. // On PPC64 the stacktrace returned by GetStatcTrace contains the function
  1143. // address from .text segment while function pointers points to ODP entries.
  1144. // The following code decodes the ODP to get the actual symbol address.
  1145. #if defined(__linux) && defined(__PPC64__) && (_CALL_ELF != 2)
  1146. static inline uintptr_t GetFunctionAddress (void* (*func)(uintptr_t*))
  1147. {
  1148. struct odp_entry_t {
  1149. unsigned long int symbol;
  1150. unsigned long int toc;
  1151. unsigned long int env;
  1152. } *odp_entry = reinterpret_cast<odp_entry_t*>(func);
  1153. return static_cast<uintptr_t>(odp_entry->symbol);
  1154. }
  1155. #else
  1156. static inline uintptr_t GetFunctionAddress (void* (*func)(uintptr_t*))
  1157. {
  1158. return reinterpret_cast<uintptr_t>(func);
  1159. }
  1160. #endif
  1161. // to trick complier into preventing inlining
  1162. static void* (*mmapper_addr)(uintptr_t* addr) = &Mmapper;
  1163. // TODO(maxim): copy/move this to memory_region_map_unittest
  1164. // TODO(maxim): expand this test to include mmap64, mremap and sbrk calls.
  1165. static void VerifyMemoryRegionMapStackGet() {
  1166. uintptr_t caller_addr_limit;
  1167. void* addr = (*mmapper_addr)(&caller_addr_limit);
  1168. uintptr_t caller = 0;
  1169. { MemoryRegionMap::LockHolder l;
  1170. for (MemoryRegionMap::RegionIterator
  1171. i = MemoryRegionMap::BeginRegionLocked();
  1172. i != MemoryRegionMap::EndRegionLocked(); ++i) {
  1173. if (i->start_addr == reinterpret_cast<uintptr_t>(addr)) {
  1174. CHECK_EQ(caller, 0);
  1175. caller = i->caller();
  1176. }
  1177. }
  1178. }
  1179. // caller must point into Mmapper function:
  1180. if (!(GetFunctionAddress(mmapper_addr) <= caller &&
  1181. caller < caller_addr_limit)) {
  1182. LOGF << std::hex << "0x" << caller
  1183. << " does not seem to point into code of function Mmapper at "
  1184. << "0x" << reinterpret_cast<uintptr_t>(mmapper_addr)
  1185. << "! Stack frame collection must be off in MemoryRegionMap!";
  1186. LOG(FATAL, "\n");
  1187. }
  1188. munmap(addr, 100);
  1189. }
  1190. static void* Mallocer(uintptr_t* addr_after_malloc_call) {
  1191. void* r = malloc(100);
  1192. sleep(0); // undo -foptimize-sibling-calls
  1193. // Get current PC value into addr_after_malloc_call
  1194. void* stack[1];
  1195. CHECK_EQ(GetStackTrace(stack, 1, 0), 1);
  1196. *addr_after_malloc_call = reinterpret_cast<uintptr_t>(stack[0]);
  1197. return r;
  1198. }
  1199. // to trick compiler into preventing inlining
  1200. static void* (* volatile mallocer_addr)(uintptr_t* addr) = &Mallocer;
  1201. // non-static for friendship with HeapProfiler
  1202. // TODO(maxim): expand this test to include
  1203. // realloc, calloc, memalign, valloc, pvalloc, new, and new[].
  1204. extern void VerifyHeapProfileTableStackGet() {
  1205. uintptr_t caller_addr_limit;
  1206. void* addr = (*mallocer_addr)(&caller_addr_limit);
  1207. uintptr_t caller =
  1208. reinterpret_cast<uintptr_t>(HeapLeakChecker::GetAllocCaller(addr));
  1209. // caller must point into Mallocer function:
  1210. if (!(GetFunctionAddress(mallocer_addr) <= caller &&
  1211. caller < caller_addr_limit)) {
  1212. LOGF << std::hex << "0x" << caller
  1213. << " does not seem to point into code of function Mallocer at "
  1214. << "0x" << reinterpret_cast<uintptr_t>(mallocer_addr)
  1215. << "! Stack frame collection must be off in heap profiler!";
  1216. LOG(FATAL, "\n");
  1217. }
  1218. free(addr);
  1219. }
  1220. // ========================================================================= //
  1221. static void MakeALeak(void** arr) {
  1222. PreventHeapReclaiming(10 * sizeof(int));
  1223. void* a = new(initialized) int[10];
  1224. Hide(&a);
  1225. *arr = a;
  1226. }
  1227. // Helper to do 'return 0;' inside main(): insted we do 'return Pass();'
  1228. static int Pass() {
  1229. fprintf(stdout, "PASS\n");
  1230. g_have_exited_main = true;
  1231. return 0;
  1232. }
  1233. int main(int argc, char** argv) {
  1234. run_hidden_ptr = DoRunHidden;
  1235. wipe_stack_ptr = DoWipeStack;
  1236. if (!HeapLeakChecker::IsActive()) {
  1237. CHECK_EQ(FLAGS_heap_check, "");
  1238. LOG(WARNING, "HeapLeakChecker got turned off; we won't test much...");
  1239. } else {
  1240. VerifyMemoryRegionMapStackGet();
  1241. VerifyHeapProfileTableStackGet();
  1242. }
  1243. KeyInit();
  1244. // glibc 2.4, on x86_64 at least, has a lock-ordering bug, which
  1245. // means deadlock is possible when one thread calls dl_open at the
  1246. // same time another thread is calling dl_iterate_phdr. libunwind
  1247. // calls dl_iterate_phdr, and TestLibCAllocate calls dl_open (or the
  1248. // various syscalls in it do), at least the first time it's run.
  1249. // To avoid the deadlock, we run TestLibCAllocate once before getting
  1250. // multi-threaded.
  1251. // TODO(csilvers): once libc is fixed, or libunwind can work around it,
  1252. // get rid of this early call. We *want* our test to
  1253. // find potential problems like this one!
  1254. TestLibCAllocate();
  1255. if (FLAGS_interfering_threads) {
  1256. RunHeapBusyThreads(); // add interference early
  1257. }
  1258. TestLibCAllocate();
  1259. LOGF << "In main(): heap_check=" << FLAGS_heap_check << endl;
  1260. CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
  1261. if (FLAGS_test_leak) {
  1262. void* arr;
  1263. RunHidden(NewCallback(MakeALeak, &arr));
  1264. Use(&arr);
  1265. LogHidden("Leaking", arr);
  1266. if (FLAGS_test_cancel_global_check) {
  1267. HeapLeakChecker::CancelGlobalCheck();
  1268. } else {
  1269. // Verify we can call NoGlobalLeaks repeatedly without deadlocking
  1270. HeapLeakChecker::NoGlobalLeaks();
  1271. HeapLeakChecker::NoGlobalLeaks();
  1272. }
  1273. return Pass();
  1274. // whole-program leak-check should (with very high probability)
  1275. // catch the leak of arr (10 * sizeof(int) bytes)
  1276. // (when !FLAGS_test_cancel_global_check)
  1277. }
  1278. if (FLAGS_test_loop_leak) {
  1279. void* arr1;
  1280. void* arr2;
  1281. RunHidden(NewCallback(MakeDeathLoop, &arr1, &arr2));
  1282. Use(&arr1);
  1283. Use(&arr2);
  1284. LogHidden("Loop leaking", arr1);
  1285. LogHidden("Loop leaking", arr2);
  1286. if (FLAGS_test_cancel_global_check) {
  1287. HeapLeakChecker::CancelGlobalCheck();
  1288. } else {
  1289. // Verify we can call NoGlobalLeaks repeatedly without deadlocking
  1290. HeapLeakChecker::NoGlobalLeaks();
  1291. HeapLeakChecker::NoGlobalLeaks();
  1292. }
  1293. return Pass();
  1294. // whole-program leak-check should (with very high probability)
  1295. // catch the leak of arr1 and arr2 (4 * sizeof(void*) bytes)
  1296. // (when !FLAGS_test_cancel_global_check)
  1297. }
  1298. if (FLAGS_test_register_leak) {
  1299. // make us fail only where the .sh test expects:
  1300. Pause();
  1301. for (int i = 0; i < 100; ++i) { // give it some time to crash
  1302. CHECK(HeapLeakChecker::NoGlobalLeaks());
  1303. Pause();
  1304. }
  1305. return Pass();
  1306. }
  1307. TestHeapLeakCheckerLiveness();
  1308. HeapLeakChecker heap_check("all");
  1309. TestHiddenPointer();
  1310. TestHeapLeakChecker();
  1311. Pause();
  1312. TestLeakButTotalsMatch();
  1313. Pause();
  1314. TestHeapLeakCheckerDeathSimple();
  1315. Pause();
  1316. TestHeapLeakCheckerDeathLoop();
  1317. Pause();
  1318. TestHeapLeakCheckerDeathInverse();
  1319. Pause();
  1320. TestHeapLeakCheckerDeathNoLeaks();
  1321. Pause();
  1322. TestHeapLeakCheckerDeathCountLess();
  1323. Pause();
  1324. TestHeapLeakCheckerDeathCountMore();
  1325. Pause();
  1326. TestHeapLeakCheckerDeathTrick();
  1327. Pause();
  1328. CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
  1329. TestHeapLeakCheckerNoFalsePositives();
  1330. Pause();
  1331. TestHeapLeakCheckerDisabling();
  1332. Pause();
  1333. TestSTLAlloc();
  1334. Pause();
  1335. TestSTLAllocInverse();
  1336. Pause();
  1337. // Test that various STL allocators work. Some of these are redundant, but
  1338. // we don't know how STL might change in the future. For example,
  1339. // http://wiki/Main/StringNeStdString.
  1340. #define DTSL(a) { DirectTestSTLAlloc(a, #a); \
  1341. Pause(); }
  1342. DTSL(std::allocator<char>());
  1343. DTSL(std::allocator<int>());
  1344. DTSL(std::string().get_allocator());
  1345. DTSL(string().get_allocator());
  1346. DTSL(vector<int>().get_allocator());
  1347. DTSL(vector<double>().get_allocator());
  1348. DTSL(vector<vector<int> >().get_allocator());
  1349. DTSL(vector<string>().get_allocator());
  1350. DTSL((map<string, string>().get_allocator()));
  1351. DTSL((map<string, int>().get_allocator()));
  1352. DTSL(set<char>().get_allocator());
  1353. #undef DTSL
  1354. TestLibCAllocate();
  1355. Pause();
  1356. CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
  1357. Pause();
  1358. if (!FLAGS_maybe_stripped) {
  1359. CHECK(heap_check.SameHeap());
  1360. } else {
  1361. WARN_IF(heap_check.SameHeap() != true,
  1362. "overall leaks are caught; we must be using a stripped binary");
  1363. }
  1364. CHECK(HeapLeakChecker::NoGlobalLeaks()); // so far, so good
  1365. return Pass();
  1366. }