summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorcsilvers <csilvers@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2008-04-22 01:47:16 +0000
committercsilvers <csilvers@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2008-04-22 01:47:16 +0000
commit7ec719093b1c9fda979ba0d07eed288e2a7c3c9b (patch)
treefb5452ecdba036d1f7689d29b28f57af881e44b9
parent97fdd4a4f97dd15e8803ed51ac153903c2cdffc2 (diff)
downloadgperftools-7ec719093b1c9fda979ba0d07eed288e2a7c3c9b.tar.gz
Mon Apr 21 15:20:52 2008 Google Inc. <opensource@google.com>
* google-perftools: version 0.97 release * Refactor GetHeapProfile to avoid using malloc (maxim) * Fix heap-checker and heap-profiler hook interactions (maxim) * Fix a data race in MemoryRegionMap::Lock (jyasskin) * Improve thread-safety of leak checker (maxim) * Fix mmap profile to no longer deadlock (maxim) * Fix rpm to have devel package depend on non-devel (csilvers) * PORTING: Fix clock-speed detection for Mac OS X (csilvers) git-svn-id: http://gperftools.googlecode.com/svn/trunk@50 6b5cf1ce-ec42-a296-1ba9-69fdba395a50
-rw-r--r--ChangeLog11
-rwxr-xr-xconfigure20
-rw-r--r--configure.ac2
-rw-r--r--doc/heapprofile.html16
-rw-r--r--packages/deb/changelog6
-rw-r--r--packages/rpm/rpm.spec8
-rw-r--r--src/addressmap-inl.h16
-rw-r--r--src/base/cycleclock.h2
-rw-r--r--src/base/elfcore.h9
-rw-r--r--src/base/low_level_alloc.cc8
-rw-r--r--src/base/spinlock.h32
-rw-r--r--src/base/sysinfo.cc19
-rw-r--r--src/base/sysinfo.h5
-rw-r--r--src/google/heap-checker.h125
-rw-r--r--src/google/heap-profiler.h2
-rw-r--r--src/google/profiler.h2
-rw-r--r--src/heap-checker.cc981
-rw-r--r--src/heap-profile-table.cc26
-rw-r--r--src/heap-profile-table.h29
-rw-r--r--src/heap-profiler.cc235
-rw-r--r--src/internal_logging.cc11
-rw-r--r--src/internal_logging.h12
-rw-r--r--src/memory_region_map.cc313
-rw-r--r--src/memory_region_map.h196
-rwxr-xr-xsrc/pprof11
-rw-r--r--src/stacktrace_libunwind-inl.h30
-rw-r--r--src/stacktrace_x86-inl.h19
-rw-r--r--src/tcmalloc.cc30
-rwxr-xr-xsrc/tests/heap-checker-death_unittest.sh8
-rw-r--r--src/tests/heap-checker_unittest.cc79
-rw-r--r--src/tests/heap-profiler_unittest.cc2
-rw-r--r--src/tests/low_level_alloc_unittest.cc4
-rwxr-xr-xsrc/tests/maybe_threads_unittest.sh4
-rw-r--r--src/windows/port.cc3
34 files changed, 1516 insertions, 760 deletions
diff --git a/ChangeLog b/ChangeLog
index 933edc9..37bafc8 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,14 @@
+Mon Apr 21 15:20:52 2008 Google Inc. <opensource@google.com>
+
+ * google-perftools: version 0.97 release
+ * Refactor GetHeapProfile to avoid using malloc (maxim)
+ * Fix heap-checker and heap-profiler hook interactions (maxim)
+ * Fix a data race in MemoryRegionMap::Lock (jyasskin)
+ * Improve thread-safety of leak checker (maxim)
+ * Fix mmap profile to no longer deadlock (maxim)
+ * Fix rpm to have devel package depend on non-devel (csilvers)
+ * PORTING: Fix clock-speed detection for Mac OS X (csilvers)
+
Tue Mar 18 14:30:44 2008 Google Inc. <opensource@google.com>
* google-perftools: version 0.96 release
diff --git a/configure b/configure
index fe83e85..ca956aa 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.59 for google-perftools 0.96.
+# Generated by GNU Autoconf 2.59 for google-perftools 0.97.
#
# Report bugs to <opensource@google.com>.
#
@@ -423,8 +423,8 @@ SHELL=${CONFIG_SHELL-/bin/sh}
# Identity of this package.
PACKAGE_NAME='google-perftools'
PACKAGE_TARNAME='google-perftools'
-PACKAGE_VERSION='0.96'
-PACKAGE_STRING='google-perftools 0.96'
+PACKAGE_VERSION='0.97'
+PACKAGE_STRING='google-perftools 0.97'
PACKAGE_BUGREPORT='opensource@google.com'
ac_unique_file="README"
@@ -954,7 +954,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures google-perftools 0.96 to adapt to many kinds of systems.
+\`configure' configures google-perftools 0.97 to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1021,7 +1021,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of google-perftools 0.96:";;
+ short | recursive ) echo "Configuration of google-perftools 0.97:";;
esac
cat <<\_ACEOF
@@ -1162,7 +1162,7 @@ fi
test -n "$ac_init_help" && exit 0
if $ac_init_version; then
cat <<\_ACEOF
-google-perftools configure 0.96
+google-perftools configure 0.97
generated by GNU Autoconf 2.59
Copyright (C) 2003 Free Software Foundation, Inc.
@@ -1176,7 +1176,7 @@ cat >&5 <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by google-perftools $as_me 0.96, which was
+It was created by google-perftools $as_me 0.97, which was
generated by GNU Autoconf 2.59. Invocation command line was
$ $0 $@
@@ -1904,7 +1904,7 @@ fi
# Define the identity of the package.
PACKAGE='google-perftools'
- VERSION='0.96'
+ VERSION='0.97'
cat >>confdefs.h <<_ACEOF
@@ -24092,7 +24092,7 @@ _ASBOX
} >&5
cat >&5 <<_CSEOF
-This file was extended by google-perftools $as_me 0.96, which was
+This file was extended by google-perftools $as_me 0.97, which was
generated by GNU Autoconf 2.59. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -24155,7 +24155,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF
ac_cs_version="\\
-google-perftools config.status 0.96
+google-perftools config.status 0.97
configured by $0, generated by GNU Autoconf 2.59,
with options \\"`echo "$ac_configure_args" | sed 's/[\\""\`\$]/\\\\&/g'`\\"
diff --git a/configure.ac b/configure.ac
index 2249f62..e8abd24 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4,7 +4,7 @@
# make sure we're interpreted by some minimal autoconf
AC_PREREQ(2.57)
-AC_INIT(google-perftools, 0.96, opensource@google.com)
+AC_INIT(google-perftools, 0.97, opensource@google.com)
# The argument here is just something that should be in the current directory
# (for sanity checking)
AC_CONFIG_SRCDIR(README)
diff --git a/doc/heapprofile.html b/doc/heapprofile.html
index c3b9aa2..6d544e0 100644
--- a/doc/heapprofile.html
+++ b/doc/heapprofile.html
@@ -106,17 +106,29 @@ commandline flags.</p>
<td><code>HEAP_PROFILE_MMAP</code></td>
<td>default: false</td>
<td>
- Profile <code>mmap</code> calls in addition to
+ Profile <code>mmap</code>, <code>mremap</code>, and <code>sbrk</code>
+ calls in addition to
<code>malloc</code>, <code>calloc</code>, <code>realloc</code>,
and <code>new</code>.
</td>
</tr>
<tr valign=top>
+ <td><code>HEAP_PROFILE_MMAP_ONLY</code></td>
+ <td>default: false</td>
+ <td>
+ Only profile <code>mmap</code>, <code>mremap</code>, and <code>sbrk</code>
+ calls; do not profile
+ <code>malloc</code>, <code>calloc</code>, <code>realloc</code>,
+ or <code>new</code>.
+ </td>
+</tr>
+
+<tr valign=top>
<td><code>HEAP_PROFILE_MMAP_LOG</code></td>
<td>default: false</td>
<td>
- Log <code>mmap</code>/<code>munmap</code>calls.
+ Log <code>mmap</code>/<code>munmap</code> calls.
</td>
</tr>
diff --git a/packages/deb/changelog b/packages/deb/changelog
index ae3a422..95e7117 100644
--- a/packages/deb/changelog
+++ b/packages/deb/changelog
@@ -1,3 +1,9 @@
+google-perftools (0.97-1) unstable; urgency=low
+
+ * New upstream release.
+
+ -- Google Inc. <opensource@google.com> Mon, 21 Apr 2008 15:20:52 -0700
+
google-perftools (0.96-1) unstable; urgency=low
* New upstream release.
diff --git a/packages/rpm/rpm.spec b/packages/rpm/rpm.spec
index 2c16b7c..2b292bf 100644
--- a/packages/rpm/rpm.spec
+++ b/packages/rpm/rpm.spec
@@ -1,18 +1,17 @@
-%define ver %VERSION
%define RELEASE 1
%define rel %{?CUSTOM_RELEASE} %{!?CUSTOM_RELEASE:%RELEASE}
%define prefix /usr
Name: %NAME
Summary: Performance tools for C++
-Version: %ver
+Version: %VERSION
Release: %rel
Group: Development/Libraries
-URL: http://goog-perftools.sourceforge.net
+URL: http://code.google.com/p/google-perftools/
License: BSD
Vendor: Google
Packager: Google <opensource@google.com>
-Source: http://goog-perftools.sourceforge.net/%{NAME}-%{PACKAGE_VERSION}.tar.gz
+Source: http://%{NAME}.googlecode.com/files/%{NAME}-%{VERSION}.tar.gz
Distribution: Redhat 7 and above.
Buildroot: %{_tmppath}/%{name}-root
Prefix: %prefix
@@ -25,6 +24,7 @@ malloc() and cpu and heap profiling utilities.
%package devel
Summary: Performance tools for C++
Group: Development/Libraries
+Requires: %{NAME} = %{VERSION}
%description devel
The %name-devel package contains static and debug libraries and header
diff --git a/src/addressmap-inl.h b/src/addressmap-inl.h
index e1ce1bf..1a1ab3b 100644
--- a/src/addressmap-inl.h
+++ b/src/addressmap-inl.h
@@ -77,8 +77,8 @@
// b. An array access in the cluster structure
// c. A traversal over the linked-list for a block
-#ifndef _ADDRESSMAP_H
-#define _ADDRESSMAP_H
+#ifndef BASE_ADDRESSMAP_INL_H__
+#define BASE_ADDRESSMAP_INL_H__
#include "config.h"
#include <stddef.h>
@@ -102,10 +102,10 @@
template <class Value>
class AddressMap {
public:
- typedef void* (*Allocator)(size_t);
- typedef void (*DeAllocator)(void*);
+ typedef void* (*Allocator)(size_t size);
+ typedef void (*DeAllocator)(void* ptr);
typedef const void* Key;
-
+
// Create an AddressMap that uses the specified allocator/deallocator.
// The allocator/deallocator should behave like malloc/free.
// For instance, the allocator does not need to return initialized memory.
@@ -230,7 +230,7 @@ class AddressMap {
}
return NULL;
}
-
+
// Return the block ID for an address within its cluster
static int BlockID(Number address) {
return (address >> kBlockBits) & (kClusterBlocks - 1);
@@ -278,7 +278,7 @@ AddressMap<Value>::AddressMap(Allocator alloc, DeAllocator dealloc)
template <class Value>
AddressMap<Value>::~AddressMap() {
// De-allocate all of the objects we allocated
- for (Object* obj = allocated_; obj != NULL; ) {
+ for (Object* obj = allocated_; obj != NULL; /**/) {
Object* next = obj->next;
(*dealloc_)(obj);
obj = next;
@@ -418,4 +418,4 @@ inline void AddressMap<Value>::Iterate(void (*callback)(Key, Value*, Type),
}
}
-#endif /* _ADDRESSMAP_H */
+#endif // BASE_ADDRESSMAP_INL_H__
diff --git a/src/base/cycleclock.h b/src/base/cycleclock.h
index a5fecb1..9357cf6 100644
--- a/src/base/cycleclock.h
+++ b/src/base/cycleclock.h
@@ -48,7 +48,7 @@
// with modifications by m3b. cf
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
struct CycleClock {
- // This should return the number of cycles since power-on
+ // This should return the number of cycles since power-on. Thread-safe.
static inline int64 Now() {
#if defined(__MACH__) && defined(__APPLE__)
// this goes at the top because we need ALL Macs, regardless
diff --git a/src/base/elfcore.h b/src/base/elfcore.h
index 9f7002a..a4e5f4e 100644
--- a/src/base/elfcore.h
+++ b/src/base/elfcore.h
@@ -28,7 +28,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* ---
- * Author: Markus Gutschke
+ * Author: Markus Gutschke, Carl Crous
*/
#ifndef _ELFCORE_H
@@ -372,9 +372,10 @@ extern "C" {
*/
int InternalGetCoreDump(void *frame, int num_threads, pid_t *thread_pids,
va_list ap
- /* const char *PATH,
- const struct CoredumperCompressor *compressors,
- const struct CoredumperCompressor **selected_comp */);
+ /* const struct CoreDumpParameters *params,
+ const char *file_name,
+ const char *PATH
+ */);
#endif
diff --git a/src/base/low_level_alloc.cc b/src/base/low_level_alloc.cc
index 95ecf3b..d7f6c74 100644
--- a/src/base/low_level_alloc.cc
+++ b/src/base/low_level_alloc.cc
@@ -386,7 +386,7 @@ void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
// find the minimum levels that a block of this size must have
int i = LLA_SkiplistLevels(req_rnd, arena->min_size, false) - 1;
if (i < arena->freelist.levels) { // potential blocks exist
- AllocList *before = &arena->freelist; // predecessor of s
+ AllocList *before = &arena->freelist; // predecessor of s
while ((s = Next(i, before, arena)) != 0 && s->header.size < req_rnd) {
before = s;
}
@@ -397,7 +397,9 @@ void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
// we unlock before mmap() both because mmap() may call a callback hook,
// and because it may be slow.
arena->mu.Unlock();
- size_t new_pages_size = RoundUp(req_rnd, arena->pagesize);
+ // mmap generous 64K chunks to decrease
+ // the chances/impact of fragmentation:
+ size_t new_pages_size = RoundUp(req_rnd, arena->pagesize * 16);
void *new_pages = mmap(0, new_pages_size,
PROT_WRITE|PROT_READ, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
RAW_CHECK(new_pages != MAP_FAILED, "mmap error");
@@ -412,7 +414,7 @@ void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {
AllocList *prev[kMaxLevel];
LLA_SkiplistDelete(&arena->freelist, s, prev); // remove from free list
// s points to the first free region that's big enough
- if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
+ if (req_rnd + arena->min_size <= s->header.size) { // big enough to split
AllocList *n = reinterpret_cast<AllocList *>
(req_rnd + reinterpret_cast<char *>(s));
n->header.size = s->header.size - req_rnd;
diff --git a/src/base/spinlock.h b/src/base/spinlock.h
index 2fd5e87..466492f 100644
--- a/src/base/spinlock.h
+++ b/src/base/spinlock.h
@@ -48,7 +48,16 @@
#include "base/atomicops.h"
#include "base/dynamic_annotations.h"
-class SpinLock {
+// One day, we may use __attribute__ stuff on gcc to annotate these functions
+#define LOCKABLE
+#define SCOPED_LOCKABLE
+#define EXCLUSIVE_LOCK_FUNCTION(...)
+#define EXCLUSIVE_TRYLOCK_FUNCTION(...)
+#define UNLOCK_FUNCTION(...)
+
+
+
+class LOCKABLE SpinLock {
public:
SpinLock() : lockword_(0) { }
@@ -65,14 +74,19 @@ class SpinLock {
// Does nothing; lockword_ is already initialized
}
- inline void Lock() {
+ // Acquire this SpinLock.
+ inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
if (Acquire_CompareAndSwap(&lockword_, 0, 1) != 0) {
SlowLock();
}
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
- inline bool TryLock() {
+ // Acquire this SpinLock and return true if the acquisition can be
+ // done without blocking, else return false. If this SpinLock is
+ // free at the time of the call, TryLock will return true with high
+ // probability.
+ inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
bool res = (Acquire_CompareAndSwap(&lockword_, 0, 1) == 0);
if (res) {
ANNOTATE_RWLOCK_ACQUIRED(this, 1);
@@ -80,7 +94,8 @@ class SpinLock {
return res;
}
- inline void Unlock() {
+ // Release this SpinLock, which must be held by the calling thread.
+ inline void Unlock() UNLOCK_FUNCTION() {
// This is defined in mutex.cc.
extern void SubmitSpinLockProfileData(const void *, int64);
@@ -129,12 +144,15 @@ class SpinLock {
// Corresponding locker object that arranges to acquire a spinlock for
// the duration of a C++ scope.
-class SpinLockHolder {
+class SCOPED_LOCKABLE SpinLockHolder {
private:
SpinLock* lock_;
public:
- inline explicit SpinLockHolder(SpinLock* l) : lock_(l) { l->Lock(); }
- inline ~SpinLockHolder() { lock_->Unlock(); }
+ inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
+ : lock_(l) {
+ l->Lock();
+ }
+ inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
};
// Catch bug where variable name is omitted, e.g. SpinLockHolder (&lock);
#define SpinLockHolder(x) COMPILE_ASSERT(0, spin_lock_decl_missing_var_name)
diff --git a/src/base/sysinfo.cc b/src/base/sysinfo.cc
index ad2cc35..823292e 100644
--- a/src/base/sysinfo.cc
+++ b/src/base/sysinfo.cc
@@ -308,14 +308,27 @@ static void InitializeSystemInfo() {
#elif defined __FreeBSD__
// For this sysctl to work, the machine must be configured without
- // SMP, APIC, or APM support.
+ // SMP, APIC, or APM support. hz should be 64-bit in freebsd 7.0
+ // and later. Before that, it's a 32-bit quantity (and gives the
+ // wrong answer on machines faster than 2^32 Hz). See
+ // http://lists.freebsd.org/pipermail/freebsd-i386/2004-November/001846.html
+ // But also compare FreeBSD 7.0:
+ // http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG70#L223
+ // 231 error = sysctl_handle_quad(oidp, &freq, 0, req);
+ // To FreeBSD 6.3 (it's the same in 6-STABLE):
+ // http://fxr.watson.org/fxr/source/i386/i386/tsc.c?v=RELENG6#L131
+ // 139 error = sysctl_handle_int(oidp, &freq, sizeof(freq), req);
+#if __FreeBSD__ >= 7
+ uint64_t hz = 0;
+#else
unsigned int hz = 0;
+#endif
size_t sz = sizeof(hz);
const char *sysctl_path = "machdep.tsc_freq";
if ( sysctlbyname(sysctl_path, &hz, &sz, NULL, 0) != 0 ) {
+ fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n",
+ sysctl_path, strerror(errno));
cpuinfo_cycles_per_second = EstimateCyclesPerSecond(1000);
- fprintf(stderr, "Unable to determine clock rate from sysctl: %s\n",
- sysctl_path);
} else {
cpuinfo_cycles_per_second = hz;
}
diff --git a/src/base/sysinfo.h b/src/base/sysinfo.h
index e22736c..529a3da 100644
--- a/src/base/sysinfo.h
+++ b/src/base/sysinfo.h
@@ -30,6 +30,9 @@
// ---
// Author: Mike Burrows
+// All functions here are thread-hostile due to file caching unless
+// commented otherwise.
+
#ifndef _SYSINFO_H_
#define _SYSINFO_H_
@@ -65,7 +68,7 @@ extern bool GetUniquePathFromEnv(const char* env_name, char* path);
extern int NumCPUs();
-// processor cycles per second of each processor
+// processor cycles per second of each processor. Thread-safe.
extern double CyclesPerSecond(void);
diff --git a/src/google/heap-checker.h b/src/google/heap-checker.h
index 3ed6587..d3a3c22 100644
--- a/src/google/heap-checker.h
+++ b/src/google/heap-checker.h
@@ -48,7 +48,6 @@
// a memory leak at program-exit, it will print instructions on how
// to track down the leak.
-
#ifndef BASE_HEAP_CHECKER_H__
#define BASE_HEAP_CHECKER_H__
@@ -66,8 +65,13 @@
#endif
+// The class is thread-safe with respect to all the provided static methods,
+// as well as HeapLeakChecker objects: they can be accessed by multiple threads.
class PERFTOOLS_DLL_DECL HeapLeakChecker {
- public: // Static functions for working with (whole-program) leak checking.
+ public:
+
+ // ----------------------------------------------------------------------- //
+ // Static functions for working with (whole-program) leak checking.
// If heap leak checking is currently active in some mode
// e.g. if leak checking was started (and is still active now)
@@ -80,8 +84,10 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
// if FLAGS_heap_check gets set to "" by some code before/during InitGoogle().
static bool IsActive();
- // Return pointer to the whole-program checker if (still) active
+ // Return pointer to the whole-program checker if it has been created
// and NULL otherwise.
+ // Once GlobalChecker() returns non-NULL that object will not disappear and
+ // will be returned by all later GlobalChecker calls.
// This is mainly to access BytesLeaked() and ObjectsLeaked() (see below)
// for the whole-program checker after one calls NoGlobalLeaks()
// or similar and gets false.
@@ -101,7 +107,8 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
// has been called at least once on the whole-program checker.
static void CancelGlobalCheck();
- public: // Non-static functions for starting and doing leak checking.
+ // ----------------------------------------------------------------------- //
+ // Non-static functions for starting and doing leak checking.
// Start checking and name the leak check performed.
// The name is used in naming dumped profiles
@@ -110,6 +117,10 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
// in particular not contain path expressions.
explicit HeapLeakChecker(const char *name);
+ // Destructor (verifies that some *NoLeaks or *SameHeap method
+ // has been called at least once).
+ ~HeapLeakChecker();
+
// Return true iff the heap does not have more objects allocated
// w.r.t. its state at the time of our construction.
// This does full pprof heap change checking and reporting.
@@ -157,20 +168,8 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
ssize_t BytesLeaked() const;
ssize_t ObjectsLeaked() const;
- // Destructor (verifies that some *NoLeaks or *SameHeap method
- // has been called at least once).
- ~HeapLeakChecker();
-
- private: // data
-
- char* name_; // our remembered name
- size_t start_inuse_bytes_; // bytes in use at our construction
- size_t start_inuse_allocs_; // allocations in use at our construction
- bool has_checked_; // if we have done the leak check, so these are ready:
- ssize_t inuse_bytes_increase_; // bytes-in-use increase for this checker
- ssize_t inuse_allocs_increase_; // allocations-in-use increase for this checker
-
- public: // Static helpers to make us ignore certain leaks.
+ // ----------------------------------------------------------------------- //
+ // Static helpers to make us ignore certain leaks.
// NOTE: All calls to DisableChecks* affect all later heap profile generation
// that happens in our construction and inside of *NoLeaks().
@@ -264,7 +263,7 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
// If 'ptr' does not point to an active allocated object
// at the time of this call, it is ignored;
// but if it does, the object must not get deleted from the heap later on;
- // it must also be not already ignored at the time of this call.
+ // it must also be not already ignored at the time of this call.
static void IgnoreObject(const void* ptr);
// Undo what an earlier IgnoreObject() call promised and asked to do.
@@ -272,47 +271,62 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
// allocated object which was previously registered with IgnoreObject().
static void UnIgnoreObject(const void* ptr);
- public: // Initializations; to be called from main() only.
+ // ----------------------------------------------------------------------- //
+ // Initialization; to be called from main() only.
// Full starting of recommended whole-program checking.
static void InternalInitStart();
- public: // Internal types defined in .cc
+ // ----------------------------------------------------------------------- //
+ // Internal types defined in .cc
- struct Allocator;
+ class Allocator;
struct RangeValue;
- private: // Various helpers
+ private:
+
+ // ----------------------------------------------------------------------- //
+ // Various helpers
// Type for DumpProfileLocked
enum ProfileType { START_PROFILE, END_PROFILE };
+
// Helper for dumping start/end heap leak checking profiles
// and getting the byte/object counts.
void DumpProfileLocked(ProfileType profile_type, const void* self_stack_top,
size_t* alloc_bytes, size_t* alloc_objects);
// Helper for constructors
void Create(const char *name);
+
// Types for DoNoLeaks and its helpers.
enum CheckType { SAME_HEAP, NO_LEAKS };
enum CheckFullness { USE_PPROF, USE_COUNTS };
enum ReportMode { PPROF_REPORT, NO_REPORT };
+
// Helpers for *NoLeaks and *SameHeap
bool DoNoLeaks(CheckType check_type,
CheckFullness fullness,
ReportMode report_mode);
- bool DoNoLeaksOnce(CheckType check_type,
- CheckFullness fullness,
- ReportMode report_mode);
+ struct LeakCheckResult; // defined in .cc
+ LeakCheckResult DoLeakCheckLocked();
+ bool DoNoLeaksOnceLocked(CheckType check_type,
+ CheckFullness fullness,
+ ReportMode report_mode);
+
// Helper for DisableChecksAt
static void DisableChecksAtLocked(const void* address);
+
// Helper for DisableChecksIn
static void DisableChecksInLocked(const char* pattern);
+
// Helper for DisableChecksToHereFrom
static void DisableChecksFromToLocked(const void* start_address,
const void* end_address,
int max_depth);
+
// Helper for DoNoLeaks to ignore all objects reachable from all live data
static void IgnoreAllLiveObjectsLocked(const void* self_stack_top);
+
// Callback we pass to ListAllProcessThreads (see thread_lister.h)
// that is invoked when all threads of our process are found and stopped.
// The call back does the things needed to ignore live data reachable from
@@ -324,18 +338,20 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
// Here we only use num_threads and thread_pids, that ListAllProcessThreads
// fills for us with the number and pids of all the threads of our process
// it found and attached to.
- static int IgnoreLiveThreads(void* parameter,
- int num_threads,
- pid_t* thread_pids,
- va_list ap);
- // Helper for IgnoreAllLiveObjectsLocked and IgnoreLiveThreads
- // that we prefer to execute from IgnoreLiveThreads
+ static int IgnoreLiveThreadsLocked(void* parameter,
+ int num_threads,
+ pid_t* thread_pids,
+ va_list ap);
+
+ // Helper for IgnoreAllLiveObjectsLocked and IgnoreLiveThreadsLocked
+ // that we prefer to execute from IgnoreLiveThreadsLocked
// while all threads are stopped.
// This helper does live object discovery and ignoring
// for all objects that are reachable from everything
// not related to thread stacks and registers.
static void IgnoreNonThreadLiveObjectsLocked();
- // Helper for IgnoreNonThreadLiveObjectsLocked and IgnoreLiveThreads
+
+ // Helper for IgnoreNonThreadLiveObjectsLocked and IgnoreLiveThreadsLocked
// to discover and ignore all heap objects
// reachable from currently considered live objects
// (live_objects static global variable in out .cc file).
@@ -343,25 +359,31 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
// in a debug message to describe what kind of live object sources
// are being used.
static void IgnoreLiveObjectsLocked(const char* name, const char* name2);
+
// Runs REGISTER_HEAPCHECK_CLEANUP cleanups and potentially
// calls DoMainHeapCheck
static void RunHeapCleanups();
- // Do the overall whole-program heap leak check
- static void DoMainHeapCheck();
+
+ // Do the overall whole-program heap leak check if needed;
+ // returns true when did the leak check.
+ static bool DoMainHeapCheck();
// Type of task for UseProcMapsLocked
enum ProcMapsTask {
RECORD_GLOBAL_DATA,
DISABLE_LIBRARY_ALLOCS
};
+
// Success/Error Return codes for UseProcMapsLocked.
enum ProcMapsResult {
PROC_MAPS_USED,
CANT_OPEN_PROC_MAPS,
NO_SHARED_LIBS_IN_PROC_MAPS
};
+
// Read /proc/self/maps, parse it, and do the 'proc_maps_task' for each line.
static ProcMapsResult UseProcMapsLocked(ProcMapsTask proc_maps_task);
+
// A ProcMapsTask to disable allocations from 'library'
// that is mapped to [start_address..end_address)
// (only if library is a certain system library).
@@ -377,7 +399,15 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
// and we move "*ptr" to point to the very start of the heap object.
static inline bool HaveOnHeapLocked(const void** ptr, size_t* object_size);
- private:
+ // Helper to shutdown heap leak checker when it's not needed
+ // or can't function properly.
+ static void TurnItselfOffLocked();
+
+ // Internally-used c-tor to start whole-executable checking.
+ HeapLeakChecker();
+
+ // ----------------------------------------------------------------------- //
+ // Friends and externally accessed helpers.
// Helper for VerifyHeapProfileTableStackGet in the unittest
// to get the recorded allocation caller for ptr,
@@ -386,20 +416,27 @@ class PERFTOOLS_DLL_DECL HeapLeakChecker {
friend void VerifyHeapProfileTableStackGet();
// This gets to execute before constructors for all global objects
- static void BeforeConstructors();
+ static void BeforeConstructorsLocked();
friend void HeapLeakChecker_BeforeConstructors();
+
// This gets to execute after destructors for all global objects
friend void HeapLeakChecker_AfterDestructors();
- // Helper to shutdown heap leak checker when it's not needed
- // or can't function properly.
- static void TurnItselfOff();
- private:
+ // ----------------------------------------------------------------------- //
+ // Member data.
- // Start whole-executable checking.
- HeapLeakChecker();
+ class SpinLock* lock_; // to make HeapLeakChecker objects thread-safe
+ const char* name_; // our remembered name (we own it)
+ // NULL means this leak checker is a noop
+ size_t start_inuse_bytes_; // bytes in use at our construction
+ size_t start_inuse_allocs_; // allocations in use at our construction
+ bool has_checked_; // if we have done the leak check, so these are ready:
+ ssize_t inuse_bytes_increase_; // bytes-in-use increase for this checker
+ ssize_t inuse_allocs_increase_; // allocations-in-use increase
+ // for this checker
+
+ // ----------------------------------------------------------------------- //
- private:
// Disallow "evil" constructors.
HeapLeakChecker(const HeapLeakChecker&);
void operator=(const HeapLeakChecker&);
diff --git a/src/google/heap-profiler.h b/src/google/heap-profiler.h
index 2c0824f..6b22e4f 100644
--- a/src/google/heap-profiler.h
+++ b/src/google/heap-profiler.h
@@ -81,4 +81,4 @@ extern "C" PERFTOOLS_DLL_DECL void HeapProfilerDump(const char *reason);
// free()-ed as soon as the caller does not need it anymore.
extern "C" PERFTOOLS_DLL_DECL char* GetHeapProfile();
-#endif /* BASE_HEAP_PROFILER_H__ */
+#endif // BASE_HEAP_PROFILER_H__
diff --git a/src/google/profiler.h b/src/google/profiler.h
index 24ef2f3..8104861 100644
--- a/src/google/profiler.h
+++ b/src/google/profiler.h
@@ -52,6 +52,8 @@
// Use pprof to view the resulting profile output.
// % pprof <path_to_executable> <profile_file_name>
// % pprof --gv <path_to_executable> <profile_file_name>
+//
+// These functions are thread-safe.
#ifndef BASE_PROFILER_H__
#define BASE_PROFILER_H__
diff --git a/src/heap-checker.cc b/src/heap-checker.cc
index 0fea028..312e5aa 100644
--- a/src/heap-checker.cc
+++ b/src/heap-checker.cc
@@ -197,27 +197,35 @@ DEFINE_string(heap_check_dump_directory,
// Copy of FLAGS_heap_profile_pprof.
// Need this since DoNoLeaks can happen
// after FLAGS_heap_profile_pprof is destroyed.
-static string* flags_heap_profile_pprof = &FLAGS_heap_profile_pprof;
+static const string* flags_heap_profile_pprof = &FLAGS_heap_profile_pprof;
//----------------------------------------------------------------------
// HeapLeakChecker global data
//----------------------------------------------------------------------
-// Global lock for (most of) the global data of this module.
-// We could use pthread's lock here, but spinlock is faster.
+// Global lock for all the global data of this module.
static SpinLock heap_checker_lock(SpinLock::LINKER_INITIALIZED);
//----------------------------------------------------------------------
-// Heap profile prefix for leak checking profiles
-static string* profile_prefix = NULL;
+// Heap profile prefix for leak checking profiles.
+// Gets assigned once when leak checking is turned on, then never modified.
+static const string* profile_name_prefix = NULL;
-// Whole-program heap leak checker
+// Whole-program heap leak checker.
+// Gets assigned once when leak checking is turned on,
+// then main_heap_checker is never deleted.
static HeapLeakChecker* main_heap_checker = NULL;
// Whether we will use main_heap_checker to do a check at program exit
+// automatically. In any case user can ask for more checks on main_heap_checker
+// via GlobalChecker().
static bool do_main_heap_check = false;
// The heap profile we use to collect info about the heap.
+// This is created in HeapLeakChecker::BeforeConstructorsLocked
+// together with setting heap_checker_on (below) to true
+// and registering our new/delete malloc hooks;
+// similarly all are unset in HeapLeakChecker::TurnItselfOffLocked.
static HeapProfileTable* heap_profile = NULL;
// If we are doing (or going to do) any kind of heap-checking.
@@ -228,6 +236,10 @@ static pid_t heap_checker_pid = 0;
// If we did heap profiling during global constructors execution
static bool constructor_heap_profiling = false;
+// RAW_VLOG level we dump key INFO messages at. If you want to turn
+// off these messages, set the environment variable PERFTOOLS_VERBOSE=-1.
+static const int heap_checker_info_level = 0;
+
//----------------------------------------------------------------------
// Alignment at which all pointers in memory are supposed to be located;
@@ -252,23 +264,28 @@ static const size_t kPointerDestAlignmentMask = kPointerDestAlignment - 1;
//----------------------------------------------------------------------
// Wrapper of LowLevelAlloc for STL_Allocator and direct use.
-// We always access Allocate/Free in this class under held heap_checker_lock,
-// this allows us to protect the period when threads are stopped
+// We always access this class under held heap_checker_lock,
+// this allows us to in particular protect the period when threads are stopped
// at random spots with ListAllProcessThreads by heap_checker_lock,
// w/o worrying about the lock in LowLevelAlloc::Arena.
// We rely on the fact that we use an own arena with an own lock here.
class HeapLeakChecker::Allocator {
public:
static void Init() {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_DCHECK(arena_ == NULL, "");
arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
}
static void Shutdown() {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
if (!LowLevelAlloc::DeleteArena(arena_) || alloc_count_ != 0) {
RAW_LOG(FATAL, "Internal heap checker leak of %d objects", alloc_count_);
}
}
- static int alloc_count() { return alloc_count_; }
+ static int alloc_count() {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
+ return alloc_count_;
+ }
static void* Allocate(size_t n) {
RAW_DCHECK(arena_ && heap_checker_lock.IsHeld(), "");
void* p = LowLevelAlloc::AllocWithArena(n, arena_);
@@ -303,13 +320,13 @@ int HeapLeakChecker::Allocator::alloc_count_ = 0;
// Cases of live object placement we distinguish
enum ObjectPlacement {
- MUST_BE_ON_HEAP, // Must point to a live object of the matching size in the
- // heap_profile map of the heap when we get to it
- IGNORED_ON_HEAP, // Is a live (ignored) object on heap
- MAYBE_LIVE, // Is simply a piece of writable memory from /proc/self/maps
- IN_GLOBAL_DATA, // Is part of global data region of the executable
- THREAD_DATA, // Part of a thread stack (and a thread descriptor with TLS)
- THREAD_REGISTERS, // Values in registers of some thread
+ MUST_BE_ON_HEAP, // Must point to a live object of the matching size in the
+ // heap_profile map of the heap when we get to it
+ IGNORED_ON_HEAP, // Is a live (ignored) object on heap
+ MAYBE_LIVE, // Is a piece of writable memory from /proc/self/maps
+ IN_GLOBAL_DATA, // Is part of global data region of the executable
+ THREAD_DATA, // Part of a thread stack and a thread descriptor with TLS
+ THREAD_REGISTERS, // Values in registers of some thread
};
// Information about an allocated object
@@ -414,18 +431,29 @@ static uintptr_t max_heap_address = 0;
//----------------------------------------------------------------------
+// Simple casting helpers for uintptr_t and void*:
+template<typename T>
+inline static const void* AsPtr(T addr) {
+ return reinterpret_cast<void*>(addr);
+}
+inline static uintptr_t AsInt(const void* ptr) {
+ return reinterpret_cast<uintptr_t>(ptr);
+}
+
+//----------------------------------------------------------------------
+
// Our hooks for MallocHook
static void NewHook(const void* ptr, size_t size) {
if (ptr != NULL) {
RAW_VLOG(7, "Recording Alloc: %p of %"PRIuS, ptr, size);
- heap_checker_lock.Lock();
- if (size > max_heap_object_size) max_heap_object_size = size;
- uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
- if (addr < min_heap_address) min_heap_address = addr;
- addr += size;
- if (addr > max_heap_address) max_heap_address = addr;
- heap_profile->RecordAlloc(ptr, size, 0);
- heap_checker_lock.Unlock();
+ { SpinLockHolder l(&heap_checker_lock);
+ if (size > max_heap_object_size) max_heap_object_size = size;
+ uintptr_t addr = AsInt(ptr);
+ if (addr < min_heap_address) min_heap_address = addr;
+ addr += size;
+ if (addr > max_heap_address) max_heap_address = addr;
+ heap_profile->RecordAlloc(ptr, size, 0);
+ }
RAW_VLOG(8, "Alloc Recorded: %p of %"PRIuS"", ptr, size);
}
}
@@ -433,9 +461,9 @@ static void NewHook(const void* ptr, size_t size) {
static void DeleteHook(const void* ptr) {
if (ptr != NULL) {
RAW_VLOG(7, "Recording Free %p", ptr);
- heap_checker_lock.Lock();
- heap_profile->RecordFree(ptr);
- heap_checker_lock.Unlock();
+ { SpinLockHolder l(&heap_checker_lock);
+ heap_profile->RecordFree(ptr);
+ }
RAW_VLOG(8, "Free Recorded: %p", ptr);
}
}
@@ -469,9 +497,11 @@ static StackDirection GetStackDirection(const int* ptr) {
static StackDirection stack_direction = UNKNOWN_DIRECTION;
// This routine is called for every thread stack we know about to register it.
-static void RegisterStack(const void* top_ptr) {
+static void RegisterStackLocked(const void* top_ptr) {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
+ RAW_DCHECK(MemoryRegionMap::LockIsHeld(), "");
RAW_VLOG(1, "Thread stack at %p", top_ptr);
- uintptr_t top = reinterpret_cast<uintptr_t>(top_ptr);
+ uintptr_t top = AsInt(top_ptr);
stack_tops->insert(top); // add for later use
// make sure stack_direction is initialized
@@ -481,7 +511,7 @@ static void RegisterStack(const void* top_ptr) {
// Find memory region with this stack
MemoryRegionMap::Region region;
- if (MemoryRegionMap::FindStackRegion(top, &region)) {
+ if (MemoryRegionMap::FindAndMarkStackRegion(top, &region)) {
// Make the proper portion of the stack live:
if (stack_direction == GROWS_TOWARDS_LOW_ADDRESSES) {
RAW_VLOG(2, "Live stack at %p of %"PRIuS" bytes",
@@ -490,8 +520,9 @@ static void RegisterStack(const void* top_ptr) {
THREAD_DATA));
} else { // GROWS_TOWARDS_HIGH_ADDRESSES
RAW_VLOG(2, "Live stack at %p of %"PRIuS" bytes",
- (void*)region.start_addr, top - region.start_addr);
- live_objects->push_back(AllocObject((void*)region.start_addr,
+ AsPtr(region.start_addr),
+ top - region.start_addr);
+ live_objects->push_back(AllocObject(AsPtr(region.start_addr),
top - region.start_addr,
THREAD_DATA));
}
@@ -501,11 +532,11 @@ static void RegisterStack(const void* top_ptr) {
lib != library_live_objects->end(); ++lib) {
for (LiveObjectsStack::iterator span = lib->second.begin();
span != lib->second.end(); ++span) {
- uintptr_t start = reinterpret_cast<uintptr_t>(span->ptr);
+ uintptr_t start = AsInt(span->ptr);
uintptr_t end = start + span->size;
if (start <= top && top < end) {
RAW_VLOG(2, "Stack at %p is inside /proc/self/maps chunk %p..%p",
- top_ptr, (void*)start, (void*)end);
+ top_ptr, AsPtr(start), AsPtr(end));
// Shrink start..end region by chopping away the memory regions in
// MemoryRegionMap that land in it to undo merging of regions
// in /proc/self/maps, so that we correctly identify what portion
@@ -513,6 +544,7 @@ static void RegisterStack(const void* top_ptr) {
uintptr_t stack_start = start;
uintptr_t stack_end = end;
// can optimize-away this loop, but it does not run often
+ RAW_DCHECK(MemoryRegionMap::LockIsHeld(), "");
for (MemoryRegionMap::RegionIterator r =
MemoryRegionMap::BeginRegionLocked();
r != MemoryRegionMap::EndRegionLocked(); ++r) {
@@ -525,7 +557,7 @@ static void RegisterStack(const void* top_ptr) {
}
if (stack_start != start || stack_end != end) {
RAW_VLOG(2, "Stack at %p is actually inside memory chunk %p..%p",
- top_ptr, (void*)stack_start, (void*)stack_end);
+ top_ptr, AsPtr(stack_start), AsPtr(stack_end));
}
// Make the proper portion of the stack live:
if (stack_direction == GROWS_TOWARDS_LOW_ADDRESSES) {
@@ -535,18 +567,18 @@ static void RegisterStack(const void* top_ptr) {
AllocObject(top_ptr, stack_end - top, THREAD_DATA));
} else { // GROWS_TOWARDS_HIGH_ADDRESSES
RAW_VLOG(2, "Live stack at %p of %"PRIuS" bytes",
- (void*)stack_start, top - stack_start);
+ AsPtr(stack_start), top - stack_start);
live_objects->push_back(
- AllocObject((void*)stack_start, top - stack_start, THREAD_DATA));
+ AllocObject(AsPtr(stack_start), top - stack_start, THREAD_DATA));
}
lib->second.erase(span); // kill the rest of the region
// Put the non-stack part(s) of the region back:
if (stack_start != start) {
- lib->second.push_back(AllocObject((void*)start, stack_start - start,
+ lib->second.push_back(AllocObject(AsPtr(start), stack_start - start,
MAYBE_LIVE));
}
if (stack_end != end) {
- lib->second.push_back(AllocObject((void*)stack_end, end - stack_end,
+ lib->second.push_back(AllocObject(AsPtr(stack_end), end - stack_end,
MAYBE_LIVE));
}
return;
@@ -560,12 +592,13 @@ static void RegisterStack(const void* top_ptr) {
// Iterator for heap allocation map data to make objects allocated from
// disabled regions of code to be live.
-static void MakeDisabledLiveCallback(const void* ptr,
- const HeapProfileTable::AllocInfo& info) {
+static void MakeDisabledLiveCallbackLocked(
+ const void* ptr, const HeapProfileTable::AllocInfo& info) {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
bool stack_disable = false;
bool range_disable = false;
for (int depth = 0; depth < info.stack_depth; depth++) {
- uintptr_t addr = reinterpret_cast<uintptr_t>(info.call_stack[depth]);
+ uintptr_t addr = AsInt(info.call_stack[depth]);
if (disabled_addresses &&
disabled_addresses->find(addr) != disabled_addresses->end()) {
stack_disable = true; // found; dropping
@@ -585,7 +618,7 @@ static void MakeDisabledLiveCallback(const void* ptr,
}
}
if (stack_disable || range_disable) {
- uintptr_t start_address = reinterpret_cast<uintptr_t>(ptr);
+ uintptr_t start_address = AsInt(ptr);
uintptr_t end_address = start_address + info.object_size;
StackTopSet::const_iterator iter
= stack_tops->lower_bound(start_address);
@@ -596,14 +629,14 @@ static void MakeDisabledLiveCallback(const void* ptr,
// if they are used to hold thread call stacks
// (i.e. when we find a stack inside).
// The reason is that we'll treat as live the currently used
- // stack portions anyway (see RegisterStack),
+ // stack portions anyway (see RegisterStackLocked),
// and the rest of the region where the stack lives can well
// contain outdated stack variables which are not live anymore,
// hence should not be treated as such.
RAW_VLOG(2, "Not %s-disabling %"PRIuS" bytes at %p"
": have stack inside: %p",
(stack_disable ? "stack" : "range"),
- info.object_size, ptr, (void*)*iter);
+ info.object_size, ptr, AsPtr(*iter));
return;
}
}
@@ -628,13 +661,14 @@ static void RecordGlobalDataLocked(uintptr_t start_address,
uintptr_t end_address,
const char* permissions,
const char* filename) {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
// Ignore non-writeable regions.
if (strchr(permissions, 'w') == NULL) return;
if (filename == NULL || *filename == '\0') filename = "UNNAMED";
RAW_VLOG(2, "Looking into %s: 0x%" PRIxPTR "..0x%" PRIxPTR,
filename, start_address, end_address);
(*library_live_objects)[filename].
- push_back(AllocObject(reinterpret_cast<void*>(start_address),
+ push_back(AllocObject(AsPtr(start_address),
end_address - start_address,
MAYBE_LIVE));
}
@@ -647,6 +681,7 @@ static bool IsLibraryNamed(const char* library, const char* library_base) {
return p != NULL && (p[sz] == '.' || p[sz] == '-');
}
+// static
void HeapLeakChecker::DisableLibraryAllocsLocked(const char* library,
uintptr_t start_address,
uintptr_t end_address) {
@@ -690,9 +725,7 @@ void HeapLeakChecker::DisableLibraryAllocsLocked(const char* library,
}
if (depth) {
RAW_VLOG(1, "Disabling allocations from %s at depth %d:", library, depth);
- DisableChecksFromToLocked(reinterpret_cast<void*>(start_address),
- reinterpret_cast<void*>(end_address),
- depth);
+ DisableChecksFromToLocked(AsPtr(start_address), AsPtr(end_address), depth);
if (IsLibraryNamed(library, "/libpthread") ||
IsLibraryNamed(library, "/libdl") ||
IsLibraryNamed(library, "/ld")) {
@@ -700,7 +733,7 @@ void HeapLeakChecker::DisableLibraryAllocsLocked(const char* library,
library);
if (global_region_caller_ranges == NULL) {
global_region_caller_ranges =
- new (Allocator::Allocate(sizeof(GlobalRegionCallerRangeMap)))
+ new(Allocator::Allocate(sizeof(GlobalRegionCallerRangeMap)))
GlobalRegionCallerRangeMap;
}
global_region_caller_ranges
@@ -709,6 +742,7 @@ void HeapLeakChecker::DisableLibraryAllocsLocked(const char* library,
}
}
+// static
HeapLeakChecker::ProcMapsResult HeapLeakChecker::UseProcMapsLocked(
ProcMapsTask proc_maps_task) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
@@ -806,10 +840,11 @@ static enum {
// due to reliance on locale functions (these are called through RAW_LOG
// and in other ways).
//
-int HeapLeakChecker::IgnoreLiveThreads(void* parameter,
- int num_threads,
- pid_t* thread_pids,
- va_list /*ap*/) {
+/*static*/ int HeapLeakChecker::IgnoreLiveThreadsLocked(void* parameter,
+ int num_threads,
+ pid_t* thread_pids,
+ va_list /*ap*/) {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
thread_listing_status = CALLBACK_STARTED;
RAW_VLOG(2, "Found %d threads (from pid %d)", num_threads, getpid());
@@ -835,11 +870,13 @@ int HeapLeakChecker::IgnoreLiveThreads(void* parameter,
// when all but this thread are suspended.
if (sys_ptrace(PTRACE_GETREGS, thread_pids[i], NULL, &thread_regs) == 0) {
// Need to use SP to get all the data from the very last stack frame:
- RegisterStack((void*) thread_regs.SP);
+ COMPILE_ASSERT(sizeof(thread_regs.SP) == sizeof(void*),
+ SP_register_does_not_look_like_a_pointer);
+ RegisterStackLocked(reinterpret_cast<void*>(thread_regs.SP));
// Make registers live (just in case PTRACE_ATTACH resulted in some
// register pointers still being in the registers and not on the stack):
- for (void** p = (void**)&thread_regs;
- p < (void**)(&thread_regs + 1); ++p) {
+ for (void** p = reinterpret_cast<void**>(&thread_regs);
+ p < reinterpret_cast<void**>(&thread_regs + 1); ++p) {
RAW_VLOG(3, "Thread register %p", *p);
thread_registers.push_back(*p);
}
@@ -874,20 +911,24 @@ int HeapLeakChecker::IgnoreLiveThreads(void* parameter,
// (protected by our lock; IgnoreAllLiveObjectsLocked sets it)
static const void* self_thread_stack_top;
+// static
void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
+ RAW_DCHECK(MemoryRegionMap::LockIsHeld(), "");
RAW_VLOG(2, "Handling self thread with pid %d", self_thread_pid);
// Register our own stack:
// Important that all stack ranges (including the one here)
- // are known before we start looking at them in MakeDisabledLiveCallback:
- RegisterStack(self_thread_stack_top);
+ // are known before we start looking at them
+ // in MakeDisabledLiveCallbackLocked:
+ RegisterStackLocked(self_thread_stack_top);
IgnoreLiveObjectsLocked("stack data", "");
// Make objects we were told to ignore live:
if (ignored_objects) {
for (IgnoredObjectsMap::const_iterator object = ignored_objects->begin();
object != ignored_objects->end(); ++object) {
- const void* ptr = reinterpret_cast<const void*>(object->first);
+ const void* ptr = AsPtr(object->first);
RAW_VLOG(2, "Ignored live object at %p of %"PRIuS" bytes",
ptr, object->second);
live_objects->
@@ -910,7 +951,7 @@ void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() {
// is allocated from libpthreads and we have range-disabled that
// library code with UseProcMapsLocked(DISABLE_LIBRARY_ALLOCS);
// so now we declare all thread-specific data reachable from there as live.
- heap_profile->IterateAllocs(MakeDisabledLiveCallback);
+ heap_profile->IterateAllocs(MakeDisabledLiveCallbackLocked);
IgnoreLiveObjectsLocked("disabled code", "");
// Actually make global data live:
@@ -922,7 +963,8 @@ void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() {
// Process library_live_objects in l->second
// filtering them by MemoryRegionMap:
// It's safe to iterate over MemoryRegionMap
- // w/o locks here as we are inside MemoryRegionMap::Lock().
+ // w/o locks here as we are inside MemoryRegionMap::Lock():
+ RAW_DCHECK(MemoryRegionMap::LockIsHeld(), "");
// The only change to MemoryRegionMap possible in this loop
// is region addition as a result of allocating more memory
// for live_objects. This won't invalidate the RegionIterator
@@ -941,14 +983,14 @@ void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() {
// any allocator on top of mmap.
bool subtract = true;
if (!region->is_stack && global_region_caller_ranges) {
- if (region->caller == static_cast<uintptr_t>(NULL)) {
+ if (region->caller() == static_cast<uintptr_t>(NULL)) {
have_null_region_callers = true;
} else {
GlobalRegionCallerRangeMap::const_iterator iter
- = global_region_caller_ranges->upper_bound(region->caller);
+ = global_region_caller_ranges->upper_bound(region->caller());
if (iter != global_region_caller_ranges->end()) {
- RAW_DCHECK(iter->first > region->caller, "");
- if (iter->second < region->caller) { // in special region
+ RAW_DCHECK(iter->first > region->caller(), "");
+ if (iter->second < region->caller()) { // in special region
subtract = false;
}
}
@@ -959,7 +1001,7 @@ void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() {
for (LiveObjectsStack::const_iterator i = l->second.begin();
i != l->second.end(); ++i) {
// subtract *region from *i
- uintptr_t start = reinterpret_cast<uintptr_t>(i->ptr);
+ uintptr_t start = AsInt(i->ptr);
uintptr_t end = start + i->size;
if (region->start_addr <= start && end <= region->end_addr) {
// full deletion due to subsumption
@@ -968,12 +1010,12 @@ void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() {
live_objects->push_back(AllocObject(i->ptr,
region->start_addr - start,
IN_GLOBAL_DATA));
- live_objects->push_back(AllocObject((void*)region->end_addr,
+ live_objects->push_back(AllocObject(AsPtr(region->end_addr),
end - region->end_addr,
IN_GLOBAL_DATA));
} else if (region->end_addr > start &&
region->start_addr <= start) { // cut from start
- live_objects->push_back(AllocObject((void*)region->end_addr,
+ live_objects->push_back(AllocObject(AsPtr(region->end_addr),
end - region->end_addr,
IN_GLOBAL_DATA));
} else if (region->start_addr > start &&
@@ -1019,8 +1061,8 @@ void HeapLeakChecker::IgnoreNonThreadLiveObjectsLocked() {
static int IsOneThread(void* parameter, int num_threads,
pid_t* thread_pids, va_list ap) {
if (num_threads != 1) {
- RAW_LOG(WARNING, "Have threads: Won't CPU-profile the bulk of "
- "leak checking work happening in IgnoreLiveThreads!");
+ RAW_LOG(WARNING, "Have threads: Won't CPU-profile the bulk of leak "
+ "checking work happening in IgnoreLiveThreadsLocked!");
}
ResumeAllProcessThreads(num_threads, thread_pids);
return num_threads;
@@ -1030,11 +1072,13 @@ static int IsOneThread(void* parameter, int num_threads,
// Making it global helps with compiler warnings.
static va_list dummy_ap;
+// static
void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_CHECK(live_objects == NULL, "");
- live_objects = new (Allocator::Allocate(sizeof(LiveObjectsStack)))
+ live_objects = new(Allocator::Allocate(sizeof(LiveObjectsStack)))
LiveObjectsStack;
- stack_tops = new (Allocator::Allocate(sizeof(StackTopSet))) StackTopSet;
+ stack_tops = new(Allocator::Allocate(sizeof(StackTopSet))) StackTopSet;
// reset the counts
live_objects_total = 0;
live_bytes_total = 0;
@@ -1047,12 +1091,11 @@ void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) {
max_heap_object_size = (
FLAGS_heap_check_max_pointer_offset != -1
? min(size_t(FLAGS_heap_check_max_pointer_offset), max_heap_object_size)
- : max_heap_object_size
- );
+ : max_heap_object_size);
// Record global data as live:
if (FLAGS_heap_check_ignore_global_live) {
library_live_objects =
- new (Allocator::Allocate(sizeof(LibraryLiveObjectsStacks)))
+ new(Allocator::Allocate(sizeof(LibraryLiveObjectsStacks)))
LibraryLiveObjectsStacks;
}
// Ignore all thread stacks:
@@ -1061,7 +1104,7 @@ void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) {
self_thread_pid = getpid();
self_thread_stack_top = self_stack_top;
if (FLAGS_heap_check_ignore_thread_live) {
- // In case we are doing CPU profiling we'd like to do all the work
+ // In case we are doing CPU profiling we'd like to do all the work
// in the main thread, not in the special thread created by
// ListAllProcessThreads, so that CPU profiler can collect all its samples.
// The machinery of ListAllProcessThreads conflicts with the CPU profiler
@@ -1075,13 +1118,13 @@ void HeapLeakChecker::IgnoreAllLiveObjectsLocked(const void* self_stack_top) {
// When the normal path of ListAllProcessThreads below is taken,
// we fully suspend the threads right here before any liveness checking
// and keep them suspended for the whole time of liveness checking
- // inside of the IgnoreLiveThreads callback.
+ // inside of the IgnoreLiveThreadsLocked callback.
// (The threads can't (de)allocate due to lock on the delete hook but
// if not suspended they could still mess with the pointer
// graph while we walk it).
int r = want_and_can_run_in_main_thread
- ? IgnoreLiveThreads(NULL, 1, &self_thread_pid, dummy_ap)
- : ListAllProcessThreads(NULL, IgnoreLiveThreads);
+ ? IgnoreLiveThreadsLocked(NULL, 1, &self_thread_pid, dummy_ap)
+ : ListAllProcessThreads(NULL, IgnoreLiveThreadsLocked);
need_to_ignore_non_thread_objects = r < 0;
if (r < 0) {
RAW_LOG(WARNING, "Thread finding failed with %d errno=%d", r, errno);
@@ -1141,7 +1184,7 @@ static const size_t kUnicodeStringAlignmentMask = kUnicodeStringOffset - 1;
// When checking if a byte sequence points to a heap object we use
// HeapProfileTable::FindInsideAlloc to handle both pointers to
// the start and inside of heap-allocated objects.
-// The "inside" case needs to be checked to support
+// The "inside" case needs to be checked to support
// at least the following relatively common cases:
// - C++ arrays allocated with new FooClass[size] for classes
// with destructors have their size recorded in a sizeof(int) field
@@ -1161,8 +1204,10 @@ static const size_t kUnicodeStringAlignmentMask = kUnicodeStringOffset - 1;
// By finding these additional objects here
// we slightly increase the chance to mistake random memory bytes
// for a pointer and miss a leak in a particular run of a binary.
-void HeapLeakChecker::IgnoreLiveObjectsLocked(const char* name,
- const char* name2) {
+//
+/*static*/ void HeapLeakChecker::IgnoreLiveObjectsLocked(const char* name,
+ const char* name2) {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
int64 live_object_count = 0;
int64 live_byte_count = 0;
while (!live_objects->empty()) {
@@ -1180,8 +1225,7 @@ void HeapLeakChecker::IgnoreLiveObjectsLocked(const char* name,
const char* const whole_object = object;
size_t const whole_size = size;
// Try interpretting any byte sequence in object,size as a heap pointer:
- const size_t remainder =
- reinterpret_cast<uintptr_t>(object) % pointer_source_alignment;
+ const size_t remainder = AsInt(object) % pointer_source_alignment;
if (remainder) {
object += pointer_source_alignment - remainder;
if (size >= pointer_source_alignment - remainder) {
@@ -1257,8 +1301,11 @@ void HeapLeakChecker::IgnoreLiveObjectsLocked(const char* name,
// HeapLeakChecker leak check disabling components
//----------------------------------------------------------------------
+// static
void HeapLeakChecker::DisableChecksUp(int stack_frames) {
- if (!heap_checker_on) return;
+ { SpinLockHolder l(&heap_checker_lock);
+ if (!heap_checker_on) return;
+ }
RAW_CHECK(stack_frames >= 1, "");
void* stack[1];
if (GetStackTrace(stack, 1, stack_frames + 1) != 1) {
@@ -1267,15 +1314,18 @@ void HeapLeakChecker::DisableChecksUp(int stack_frames) {
DisableChecksAt(stack[0]);
}
+// static
void HeapLeakChecker::DisableChecksAt(const void* address) {
+ SpinLockHolder l(&heap_checker_lock);
if (!heap_checker_on) return;
- heap_checker_lock.Lock();
DisableChecksAtLocked(address);
- heap_checker_lock.Unlock();
}
+// static
bool HeapLeakChecker::HaveDisabledChecksUp(int stack_frames) {
- if (!heap_checker_on) return false;
+ { SpinLockHolder l(&heap_checker_lock);
+ if (!heap_checker_on) return false;
+ }
RAW_CHECK(stack_frames >= 1, "");
void* stack[1];
if (GetStackTrace(stack, 1, stack_frames + 1) != 1) {
@@ -1284,26 +1334,28 @@ bool HeapLeakChecker::HaveDisabledChecksUp(int stack_frames) {
return HaveDisabledChecksAt(stack[0]);
}
+// static
bool HeapLeakChecker::HaveDisabledChecksAt(const void* address) {
+ SpinLockHolder l(&heap_checker_lock);
if (!heap_checker_on) return false;
- heap_checker_lock.Lock();
bool result = disabled_addresses != NULL &&
- disabled_addresses->
- find(reinterpret_cast<uintptr_t>(address)) !=
+ disabled_addresses->find(AsInt(address)) !=
disabled_addresses->end();
- heap_checker_lock.Unlock();
return result;
}
+// static
void HeapLeakChecker::DisableChecksIn(const char* pattern) {
+ SpinLockHolder l(&heap_checker_lock);
if (!heap_checker_on) return;
- heap_checker_lock.Lock();
DisableChecksInLocked(pattern);
- heap_checker_lock.Unlock();
}
+// static
void* HeapLeakChecker::GetDisableChecksStart() {
- if (!heap_checker_on) return NULL;
+ { SpinLockHolder l(&heap_checker_lock);
+ if (!heap_checker_on) return NULL;
+ }
void* start_address = NULL;
if (GetStackTrace(&start_address, 1, 1) != 1) {
RAW_LOG(FATAL, "Can't get stack trace");
@@ -1311,24 +1363,27 @@ void* HeapLeakChecker::GetDisableChecksStart() {
return start_address;
}
+// static
void HeapLeakChecker::DisableChecksToHereFrom(const void* start_address) {
- if (!heap_checker_on) return;
+ { SpinLockHolder l(&heap_checker_lock);
+ if (!heap_checker_on) return;
+ }
void* end_address_ptr = NULL;
if (GetStackTrace(&end_address_ptr, 1, 1) != 1) {
RAW_LOG(FATAL, "Can't get stack trace");
}
const void* end_address = end_address_ptr;
if (start_address > end_address) swap(start_address, end_address);
- heap_checker_lock.Lock();
+ SpinLockHolder l(&heap_checker_lock);
DisableChecksFromToLocked(start_address, end_address, 10000);
// practically no stack depth limit:
// our heap_profile keeps much shorter stack traces
- heap_checker_lock.Unlock();
}
+// static
void HeapLeakChecker::IgnoreObject(const void* ptr) {
+ SpinLockHolder l(&heap_checker_lock);
if (!heap_checker_on) return;
- heap_checker_lock.Lock();
size_t object_size;
if (!HaveOnHeapLocked(&ptr, &object_size)) {
RAW_LOG(ERROR, "No live heap object at %p to ignore", ptr);
@@ -1336,28 +1391,26 @@ void HeapLeakChecker::IgnoreObject(const void* ptr) {
RAW_VLOG(1, "Going to ignore live object at %p of %"PRIuS" bytes",
ptr, object_size);
if (ignored_objects == NULL) {
- ignored_objects = new (Allocator::Allocate(sizeof(IgnoredObjectsMap)))
+ ignored_objects = new(Allocator::Allocate(sizeof(IgnoredObjectsMap)))
IgnoredObjectsMap;
}
- if (!ignored_objects->insert(make_pair(reinterpret_cast<uintptr_t>(ptr),
- object_size)).second) {
+ if (!ignored_objects->insert(make_pair(AsInt(ptr), object_size)).second) {
RAW_LOG(FATAL, "Object at %p is already being ignored", ptr);
}
}
- heap_checker_lock.Unlock();
}
+// static
void HeapLeakChecker::UnIgnoreObject(const void* ptr) {
+ SpinLockHolder l(&heap_checker_lock);
if (!heap_checker_on) return;
- heap_checker_lock.Lock();
size_t object_size;
if (!HaveOnHeapLocked(&ptr, &object_size)) {
RAW_LOG(FATAL, "No live heap object at %p to un-ignore", ptr);
} else {
bool found = false;
if (ignored_objects) {
- IgnoredObjectsMap::iterator object =
- ignored_objects->find(reinterpret_cast<uintptr_t>(ptr));
+ IgnoredObjectsMap::iterator object = ignored_objects->find(AsInt(ptr));
if (object != ignored_objects->end() && object_size == object->second) {
ignored_objects->erase(object);
found = true;
@@ -1367,7 +1420,6 @@ void HeapLeakChecker::UnIgnoreObject(const void* ptr) {
}
if (!found) RAW_LOG(FATAL, "Object at %p has not been ignored", ptr);
}
- heap_checker_lock.Unlock();
}
//----------------------------------------------------------------------
@@ -1378,6 +1430,8 @@ void HeapLeakChecker::DumpProfileLocked(ProfileType profile_type,
const void* self_stack_top,
size_t* alloc_bytes,
size_t* alloc_objects) {
+ RAW_DCHECK(lock_->IsHeld(), "");
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_VLOG(1, "%s check \"%s\"%s",
(profile_type == START_PROFILE ? "Starting"
: "At an end point for"),
@@ -1390,15 +1444,17 @@ void HeapLeakChecker::DumpProfileLocked(ProfileType profile_type,
MemoryRegionMap::CheckMallocHooks();
if (MallocHook::GetNewHook() != NewHook ||
MallocHook::GetDeleteHook() != DeleteHook) {
- RAW_LOG(FATAL, "new/delete malloc hooks got changed");
+ RAW_LOG(FATAL, "Had our new/delete MallocHook-s replaced. "
+ "Are you using another MallocHook client? "
+ "Use --heap_check=\"\" to avoid this conflict.");
}
// Make the heap profile, other threads are locked out.
const int alloc_count = Allocator::alloc_count();
IgnoreAllLiveObjectsLocked(self_stack_top);
- const int len = profile_prefix->size() + strlen(name_) + 10 + 2;
+ const int len = profile_name_prefix->size() + strlen(name_) + 10 + 2;
char* file_name = reinterpret_cast<char*>(Allocator::Allocate(len));
snprintf(file_name, len, "%s.%s%s%s",
- profile_prefix->c_str(), name_,
+ profile_name_prefix->c_str(), name_,
profile_type == START_PROFILE ? "-beg" : "-end",
HeapProfileTable::kFileExt);
HeapProfileTable::Stats stats;
@@ -1416,49 +1472,49 @@ void HeapLeakChecker::DumpProfileLocked(ProfileType profile_type,
}
void HeapLeakChecker::Create(const char *name) {
- name_ = NULL;
+ SpinLockHolder l(lock_);
+ name_ = NULL; // checker is inactive
has_checked_ = false;
char* n = new char[strlen(name) + 1]; // do this before we lock
IgnoreObject(n); // otherwise it might be treated as live due to our stack
- alignment_checker_lock.Lock();
- heap_checker_lock.Lock();
- // Heap activity in other threads is paused for this whole function.
- MemoryRegionMap::Lock();
- if (heap_checker_on) {
- RAW_DCHECK(strchr(name, '/') == NULL, "must be a simple name");
- name_ = n;
- memcpy(name_, name, strlen(name) + 1);
- // Use our stack ptr to make stack data live:
- int a_local_var;
- DumpProfileLocked(START_PROFILE, &a_local_var,
- &start_inuse_bytes_, &start_inuse_allocs_);
- RAW_VLOG(1, "Start check \"%s\" profile: %"PRIuS" bytes "
- "in %"PRIuS" objects",
- name_, start_inuse_bytes_, start_inuse_allocs_);
- } else {
- RAW_LOG(WARNING, "Heap checker is not active, "
- "hence checker \"%s\" will do nothing!", name);
- RAW_LOG(WARNING, "To activate set the HEAPCHECK environment variable.\n");
+ { // Heap activity in other threads is paused for this whole scope.
+ SpinLockHolder al(&alignment_checker_lock);
+ SpinLockHolder hl(&heap_checker_lock);
+ MemoryRegionMap::LockHolder ml;
+ if (heap_checker_on && profile_name_prefix != NULL) {
+ RAW_DCHECK(strchr(name, '/') == NULL, "must be a simple name");
+ memcpy(n, name, strlen(name) + 1);
+ name_ = n; // checker is active
+ // Use our stack ptr to make stack data live:
+ int a_local_var;
+ DumpProfileLocked(START_PROFILE, &a_local_var,
+ &start_inuse_bytes_, &start_inuse_allocs_);
+ RAW_VLOG(1, "Start check \"%s\" profile: %"PRIuS" bytes "
+ "in %"PRIuS" objects",
+ name_, start_inuse_bytes_, start_inuse_allocs_);
+ } else {
+ RAW_LOG(WARNING, "Heap checker is not active, "
+ "hence checker \"%s\" will do nothing!", name);
+ RAW_LOG(WARNING, "To activate set the HEAPCHECK environment variable.\n");
+ }
}
- MemoryRegionMap::Unlock();
- heap_checker_lock.Unlock();
- alignment_checker_lock.Unlock();
if (name_ == NULL) {
UnIgnoreObject(n);
delete[] n; // must be done after we unlock
}
}
-HeapLeakChecker::HeapLeakChecker(const char *name) {
+HeapLeakChecker::HeapLeakChecker(const char *name) : lock_(new SpinLock) {
RAW_DCHECK(strcmp(name, "_main_") != 0, "_main_ is reserved");
Create(name);
}
-HeapLeakChecker::HeapLeakChecker() {
+HeapLeakChecker::HeapLeakChecker() : lock_(new SpinLock) {
Create("_main_");
}
ssize_t HeapLeakChecker::BytesLeaked() const {
+ SpinLockHolder l(lock_);
if (!has_checked_) {
RAW_LOG(FATAL, "*NoLeaks|SameHeap must execute before this call");
}
@@ -1466,6 +1522,7 @@ ssize_t HeapLeakChecker::BytesLeaked() const {
}
ssize_t HeapLeakChecker::ObjectsLeaked() const {
+ SpinLockHolder l(lock_);
if (!has_checked_) {
RAW_LOG(FATAL, "*NoLeaks|SameHeap must execute before this call");
}
@@ -1488,6 +1545,7 @@ static void MakeCommand(const char* basename,
bool check_type_is_no_leaks,
bool use_initial_profile,
const string& prefix,
+ const string& pprof_path,
string* beg_profile,
string* end_profile,
string* command) {
@@ -1497,7 +1555,7 @@ static void MakeCommand(const char* basename,
ignore_re += disabled_regexp->c_str();
ignore_re += "$'";
}
- *command += *flags_heap_profile_pprof;
+ *command += pprof_path;
if (use_initial_profile) {
// compare against initial profile only if need to
*beg_profile = prefix + "." + basename +
@@ -1510,7 +1568,8 @@ static void MakeCommand(const char* basename,
invocation_path() +
" \"" + *end_profile + "\"" + ignore_re + " --inuse_objects";
if (!FLAGS_heap_check_identify_leaks) {
- *command += " --lines"; // important to catch leaks when !see_leaks
+ *command += " --lines"; // important to catch leaks when we do not see them
+ // via allocated byte/object count differences
} else {
*command += " --addresses"; // stronger than --lines and prints
// unresolvable object addresses
@@ -1577,15 +1636,16 @@ static void RawLogLines(LogSeverity severity, const string& str) {
bool HeapLeakChecker::DoNoLeaks(CheckType check_type,
CheckFullness fullness,
ReportMode report_mode) {
+ SpinLockHolder l(lock_);
// The locking also helps us keep the messages
// for the two checks close together.
- alignment_checker_lock.Lock();
+ SpinLockHolder al(&alignment_checker_lock);
bool result;
if (FLAGS_heap_check_test_pointer_alignment) {
pointer_source_alignment = 1;
- bool result_wo_align = DoNoLeaksOnce(check_type, fullness, NO_REPORT);
+ bool result_wo_align = DoNoLeaksOnceLocked(check_type, fullness, NO_REPORT);
pointer_source_alignment = kPointerSourceAlignment;
- result = DoNoLeaksOnce(check_type, fullness, report_mode);
+ result = DoNoLeaksOnceLocked(check_type, fullness, report_mode);
if (!result) {
if (result_wo_align) {
RAW_LOG(WARNING, "Found no leaks without pointer alignment: "
@@ -1599,7 +1659,7 @@ bool HeapLeakChecker::DoNoLeaks(CheckType check_type,
}
}
} else {
- result = DoNoLeaksOnce(check_type, fullness, report_mode);
+ result = DoNoLeaksOnceLocked(check_type, fullness, report_mode);
if (!result) {
if (!FLAGS_heap_check_identify_leaks) {
RAW_LOG(INFO, "setenv HEAP_CHECK_IDENTIFY_LEAKS=1 and rerun to identify "
@@ -1619,170 +1679,212 @@ bool HeapLeakChecker::DoNoLeaks(CheckType check_type,
"Do you use pointers with larger than that offsets "
"pointing in the middle of heap-allocated objects?");
}
- alignment_checker_lock.Unlock();
return result;
}
-bool HeapLeakChecker::DoNoLeaksOnce(CheckType check_type,
- CheckFullness fullness,
- ReportMode report_mode) {
- // Heap activity in other threads is paused for this function
- // until we got all profile difference info.
- heap_checker_lock.Lock();
- MemoryRegionMap::Lock();
- if (heap_checker_on) {
- if (name_ == NULL) {
- RAW_LOG(FATAL, "Heap profiling must be not turned on "
- "after construction of a HeapLeakChecker");
+// What DoLeakCheckLocked below returns.
+struct HeapLeakChecker::LeakCheckResult {
+ bool did_nothing;
+ const string* profile_prefix;
+ const string* pprof_path;
+ int64 live_objects;
+ int64 live_bytes;
+ bool use_initial_profile;
+};
+
+// Part of DoNoLeaksOnceLocked below:
+// does the things requiring heap_checker_lock and MemoryRegionMap::Lock
+// and returns some info to be accessed w/o these locks.
+HeapLeakChecker::LeakCheckResult HeapLeakChecker::DoLeakCheckLocked() {
+ RAW_DCHECK(lock_->IsHeld(), "");
+ RAW_DCHECK(alignment_checker_lock.IsHeld(), "");
+ LeakCheckResult result;
+ // Heap activity in other threads is paused during this function
+ // (i.e. until we got all profile difference info).
+ SpinLockHolder l(&heap_checker_lock);
+ if (heap_checker_on == false) {
+ if (name_ != NULL) { // had leak checking enabled when created the checker
+ RAW_LOG(FATAL, "Heap leak checker must stay enabled during "
+ "the lifetime of a HeapLeakChecker object");
}
- // Use our stack ptr to make stack data live:
- int a_local_var;
- size_t end_inuse_bytes;
- size_t end_inuse_allocs;
- DumpProfileLocked(END_PROFILE, &a_local_var,
- &end_inuse_bytes, &end_inuse_allocs);
- // DumpProfileLocked via IgnoreAllLiveObjectsLocked sets these:
- const int64 live_objects = live_objects_total;
- const int64 live_bytes = live_bytes_total;
- const bool use_initial_profile =
- !(FLAGS_heap_check_before_constructors && this == main_heap_checker);
- if (!use_initial_profile) { // compare against empty initial profile
- start_inuse_bytes_ = 0;
- start_inuse_allocs_ = 0;
+ result.did_nothing = true;
+ return result;
+ }
+ result.did_nothing = false;
+ if (name_ == NULL) {
+ RAW_LOG(FATAL, "Heap profiling must be not turned on "
+ "after construction of a HeapLeakChecker");
+ }
+ MemoryRegionMap::LockHolder ml;
+ // Use our stack ptr to make stack data live:
+ int a_local_var;
+ size_t end_inuse_bytes;
+ size_t end_inuse_allocs;
+ DumpProfileLocked(END_PROFILE, &a_local_var,
+ &end_inuse_bytes, &end_inuse_allocs);
+ // DumpProfileLocked via IgnoreAllLiveObjectsLocked sets these:
+ result.live_objects = live_objects_total;
+ result.live_bytes = live_bytes_total;
+ result.use_initial_profile =
+ !(FLAGS_heap_check_before_constructors && this == main_heap_checker);
+ if (!result.use_initial_profile) { // compare against empty initial profile
+ start_inuse_bytes_ = 0;
+ start_inuse_allocs_ = 0;
+ }
+ RAW_VLOG(1, "End check \"%s\" profile: %"PRIuS" bytes in %"PRIuS" objects",
+ name_, end_inuse_bytes, end_inuse_allocs);
+ inuse_bytes_increase_ = static_cast<ssize_t>(end_inuse_bytes -
+ start_inuse_bytes_);
+ inuse_allocs_increase_ = static_cast<ssize_t>(end_inuse_allocs -
+ start_inuse_allocs_);
+ has_checked_ = true;
+ // These two will not disappear or change after the heap_checker_lock is
+ // released getting out of this scope
+ // (they are set once before control can get here):
+ result.profile_prefix = profile_name_prefix;
+ result.pprof_path = flags_heap_profile_pprof;
+ return result;
+}
+
+// Expects lock_ and alignment_checker_lock held, and acquires
+// heap_checker_lock and MemoryRegionMap::Lock via DoLeakCheckLocked.
+// Used only by DoNoLeaks above.
+bool HeapLeakChecker::DoNoLeaksOnceLocked(CheckType check_type,
+ CheckFullness fullness,
+ ReportMode report_mode) {
+ RAW_DCHECK(lock_->IsHeld(), "");
+ RAW_DCHECK(alignment_checker_lock.IsHeld(), "");
+
+ const LeakCheckResult result = DoLeakCheckLocked();
+ if (result.did_nothing) return true;
+
+ // Now we see from the two allocated byte/object differences
+ // and the dumped heap profiles if there are really leaks
+ // and report the result:
+
+ bool see_leaks =
+ check_type == SAME_HEAP
+ ? (inuse_bytes_increase_ != 0 || inuse_allocs_increase_ != 0)
+ : (inuse_bytes_increase_ > 0 || inuse_allocs_increase_ > 0);
+ bool reported_no_leaks = false;
+ if (see_leaks || fullness == USE_PPROF) {
+ const bool pprof_can_ignore = disabled_regexp != NULL;
+ string beg_profile;
+ string end_profile;
+ string base_command;
+ MakeCommand(name_, check_type == NO_LEAKS,
+ result.use_initial_profile,
+ *result.profile_prefix, *result.pprof_path,
+ &beg_profile, &end_profile, &base_command);
+ // Make the two command lines out of the base command, with
+ // appropriate mode options
+ string command = base_command + " --text";
+ string gv_command;
+ gv_command = base_command;
+ gv_command +=
+ " --edgefraction=1e-10 --nodefraction=1e-10 --heapcheck --gv";
+
+ if (see_leaks) {
+ RAW_LOG(ERROR, "Heap memory leaks of %"PRIdS" bytes and/or "
+ "%"PRIdS" allocations detected by check \"%s\".",
+ inuse_bytes_increase_, inuse_allocs_increase_, name_);
+ RAW_LOG(ERROR, "TO INVESTIGATE leaks RUN e.g. THIS shell command:\n"
+ "\n%s\n", gv_command.c_str());
}
- RAW_VLOG(1, "End check \"%s\" profile: %"PRIuS" bytes in %"PRIuS" objects",
- name_, end_inuse_bytes, end_inuse_allocs);
- inuse_bytes_increase_ = static_cast<ssize_t>(end_inuse_bytes -
- start_inuse_bytes_);
- inuse_allocs_increase_ = static_cast<ssize_t>(end_inuse_allocs -
- start_inuse_allocs_);
- has_checked_ = true;
- MemoryRegionMap::Unlock();
- heap_checker_lock.Unlock();
- bool see_leaks =
- check_type == SAME_HEAP
- ? (inuse_bytes_increase_ != 0 || inuse_allocs_increase_ != 0)
- : (inuse_bytes_increase_ > 0 || inuse_allocs_increase_ > 0);
- if (see_leaks || fullness == USE_PPROF) {
- const bool pprof_can_ignore = disabled_regexp != NULL;
- string beg_profile;
- string end_profile;
- string base_command;
- MakeCommand(name_, check_type == NO_LEAKS,
- use_initial_profile, *profile_prefix,
- &beg_profile, &end_profile, &base_command);
- // Make the two command lines out of the base command, with
- // appropriate mode options
- string command = base_command + " --text";
- string gv_command;
- gv_command = base_command;
- gv_command +=
- " --edgefraction=1e-10 --nodefraction=1e-10 --heapcheck --gv";
-
- if (see_leaks) {
- RAW_LOG(ERROR, "Heap memory leaks of %"PRIdS" bytes and/or "
- "%"PRIdS" allocations detected by check \"%s\".",
- inuse_bytes_increase_, inuse_allocs_increase_, name_);
- RAW_LOG(ERROR, "TO INVESTIGATE leaks RUN e.g. THIS shell command:\n"
- "\n%s\n", gv_command.c_str());
- }
- string output;
- bool checked_leaks = true;
- if ((see_leaks && report_mode == PPROF_REPORT) ||
- fullness == USE_PPROF) {
- if (access(flags_heap_profile_pprof->c_str(), X_OK|R_OK) != 0) {
- RAW_LOG(WARNING, "Skipping pprof check: could not run it at %s",
- flags_heap_profile_pprof->c_str());
- checked_leaks = false;
- } else {
- // We don't care about pprof's stderr as long as it
- // succeeds with empty report:
- checked_leaks = GetStatusOutput((command + " 2>/dev/null").c_str(),
- &output) == 0;
- }
- if (see_leaks && pprof_can_ignore && output.empty() && checked_leaks) {
- RAW_LOG(WARNING, "These must be leaks that we disabled"
- " (pprof succeeded)! This check WILL FAIL"
- " if the binary is strip'ped!");
- see_leaks = false;
- }
- // do not fail the check just due to us being a stripped binary
- if (!see_leaks && strstr(output.c_str(), "nm: ") != NULL &&
- strstr(output.c_str(), ": no symbols") != NULL) output.clear();
- }
- // Make sure the profiles we created are still there.
- // They can get deleted e.g. if the program forks/executes itself
- // and FLAGS_cleanup_old_heap_profiles was kept as true.
- if (access(end_profile.c_str(), R_OK) != 0 ||
- (!beg_profile.empty() && access(beg_profile.c_str(), R_OK) != 0)) {
- RAW_LOG(FATAL, "One of the heap profiles is gone: %s %s",
- beg_profile.c_str(), end_profile.c_str());
+ string output;
+ bool checked_leaks = true;
+ if ((see_leaks && report_mode == PPROF_REPORT) ||
+ fullness == USE_PPROF) {
+ if (access(result.pprof_path->c_str(), X_OK|R_OK) != 0) {
+ RAW_LOG(WARNING, "Skipping pprof check: could not run it at %s",
+ result.pprof_path->c_str());
+ checked_leaks = false;
+ } else {
+ // We don't care about pprof's stderr as long as it
+ // succeeds with empty report:
+ checked_leaks = GetStatusOutput((command + " 2>/dev/null").c_str(),
+ &output) == 0;
}
- if (!(see_leaks || checked_leaks)) {
- // Crash if something went wrong with executing pprof
- // and we rely on pprof to do its work:
- RAW_LOG(FATAL, "The pprof command failed: %s", command.c_str());
+ if (see_leaks && pprof_can_ignore && output.empty() && checked_leaks) {
+ RAW_LOG(WARNING, "These must be leaks that we disabled"
+ " (pprof succeeded)! This check WILL FAIL"
+ " if the binary is strip'ped!");
+ see_leaks = false;
+ reported_no_leaks = true;
}
- if (see_leaks && use_initial_profile) {
+ // do not fail the check just due to us being a stripped binary
+ if (!see_leaks && strstr(output.c_str(), "nm: ") != NULL &&
+ strstr(output.c_str(), ": no symbols") != NULL) output.clear();
+ }
+ // Make sure the profiles we created are still there.
+ // They can get deleted e.g. if the program forks/executes itself
+ // and FLAGS_cleanup_old_heap_profiles was kept as true.
+ if (access(end_profile.c_str(), R_OK) != 0 ||
+ (!beg_profile.empty() && access(beg_profile.c_str(), R_OK) != 0)) {
+ RAW_LOG(FATAL, "One of the heap profiles is gone: %s %s",
+ beg_profile.c_str(), end_profile.c_str());
+ }
+ if (!(see_leaks || checked_leaks)) {
+ // Crash if something went wrong with executing pprof
+ // and we rely on pprof to do its work:
+ RAW_LOG(FATAL, "The pprof command failed: %s", command.c_str());
+ }
+ if (see_leaks && result.use_initial_profile) {
+ RAW_LOG(WARNING, "CAVEAT: Some of the reported leaks might have "
+ "occurred before check \"%s\" was started!", name_);
+ }
+ if (!see_leaks && !output.empty()) {
+ RAW_LOG(WARNING, "Tricky heap memory leaks of "
+ "no bytes and no allocations "
+ "detected by check \"%s\".", name_);
+ RAW_LOG(WARNING, "TO INVESTIGATE leaks RUN e.g. THIS shell command:\n"
+ "\n%s\n", gv_command.c_str());
+ if (result.use_initial_profile) {
RAW_LOG(WARNING, "CAVEAT: Some of the reported leaks might have "
"occurred before check \"%s\" was started!", name_);
}
- bool tricky_leaks = !output.empty();
- if (!see_leaks && tricky_leaks) {
- RAW_LOG(WARNING, "Tricky heap memory leaks of"
- " no bytes and no allocations "
- "detected by check \"%s\".", name_);
- RAW_LOG(WARNING, "TO INVESTIGATE leaks RUN e.g. THIS shell command:\n"
- "\n%s\n", gv_command.c_str());
- if (use_initial_profile) {
- RAW_LOG(WARNING, "CAVEAT: Some of the reported leaks might have "
- "occurred before check \"%s\" was started!", name_);
- }
- see_leaks = true;
- }
- if (see_leaks && report_mode == PPROF_REPORT) {
- if (checked_leaks) {
- RAW_LOG(ERROR, "Below is (less informative) textual version "
- "of this pprof command's output:");
- RawLogLines(ERROR, output);
- } else {
- RAW_LOG(ERROR, "The pprof command has failed");
- }
- }
- } else {
- RAW_LOG(INFO, "No leaks found for check \"%s\" "
- "(but no 100%% guarantee that there aren't any): "
- "found %"PRId64" reachable heap objects of %"PRId64" bytes",
- name_, live_objects, live_bytes);
+ see_leaks = true;
}
- return !see_leaks;
- } else {
- if (name_ != NULL) {
- RAW_LOG(FATAL, "Profiling must stay enabled during leak checking");
+ if (see_leaks && report_mode == PPROF_REPORT) {
+ if (checked_leaks) {
+ RAW_LOG(ERROR, "Below is (less informative) textual version "
+ "of this pprof command's output:");
+ RawLogLines(ERROR, output);
+ } else {
+ RAW_LOG(ERROR, "The pprof command has failed");
+ }
}
- MemoryRegionMap::Unlock();
- heap_checker_lock.Unlock();
- return true;
}
+ if (!see_leaks && !reported_no_leaks) {
+ RAW_VLOG(heap_checker_info_level,
+ "No leaks found for check \"%s\" "
+ "(but no 100%% guarantee that there aren't any): "
+ "found %"PRId64" reachable heap objects of %"PRId64" bytes",
+ name_, result.live_objects, result.live_bytes);
+ }
+ return !see_leaks;
}
HeapLeakChecker::~HeapLeakChecker() {
if (name_ != NULL) { // had leak checking enabled when created the checker
if (!has_checked_) {
RAW_LOG(FATAL, "Some *NoLeaks|SameHeap method"
- " must be called on any created checker");
+ " must be called on any created HeapLeakChecker");
}
UnIgnoreObject(name_);
delete[] name_;
name_ = NULL;
}
+ delete lock_;
}
//----------------------------------------------------------------------
// HeapLeakChecker overall heap check components
//----------------------------------------------------------------------
+// static
bool HeapLeakChecker::IsActive() {
+ SpinLockHolder l(&heap_checker_lock);
return heap_checker_on;
}
@@ -1810,14 +1912,14 @@ void HeapCleaner::RunHeapCleanups() {
// Program exit heap cleanup registered with atexit().
// Will not get executed when we crash on a signal.
-void HeapLeakChecker::RunHeapCleanups() {
- if (heap_checker_pid == getpid()) { // can get here (via forks?)
- // with other pids
- HeapCleaner::RunHeapCleanups();
- if (!FLAGS_heap_check_after_destructors && do_main_heap_check) {
- DoMainHeapCheck();
- }
+//
+/*static*/ void HeapLeakChecker::RunHeapCleanups() {
+ { SpinLockHolder l(&heap_checker_lock);
+ // can get here (via forks?) with other pids
+ if (heap_checker_pid != getpid()) return;
}
+ HeapCleaner::RunHeapCleanups();
+ if (!FLAGS_heap_check_after_destructors) DoMainHeapCheck();
}
// defined below
@@ -1827,14 +1929,24 @@ static bool internal_init_start_has_run = false;
// Called exactly once, before main() (but hopefully just before).
// This picks a good unique name for the dumped leak checking heap profiles.
-void HeapLeakChecker::InternalInitStart() {
- RAW_CHECK(!internal_init_start_has_run, "Only one call is expected");
- internal_init_start_has_run = true;
-
- if (FLAGS_heap_check.empty()) {
- // turns out we do not need checking in the end; can stop profiling
- TurnItselfOff();
- return;
+//
+// Because we crash when InternalInitStart is called more than once,
+// it's fine that we hold heap_checker_lock only around pieces of
+// this function: this is still enough for thread-safety w.r.t. other functions
+// of this module.
+// We can't hold heap_checker_lock throughout because it would deadlock
+// on a memory allocation since our new/delete hooks can be on.
+//
+/*static*/ void HeapLeakChecker::InternalInitStart() {
+ { SpinLockHolder l(&heap_checker_lock);
+ RAW_CHECK(!internal_init_start_has_run, "Only one call is expected");
+ internal_init_start_has_run = true;
+
+ if (FLAGS_heap_check.empty()) {
+ // turns out we do not need checking in the end; can stop profiling
+ TurnItselfOffLocked();
+ return;
+ }
}
// Changing this to false can be useful when debugging heap-checker itself:
@@ -1842,29 +1954,36 @@ void HeapLeakChecker::InternalInitStart() {
// See if heap checker should turn itself off because we are
// running under gdb (to avoid conflicts over ptrace-ing rights):
char name_buf[15+15];
- snprintf(name_buf, sizeof(name_buf), "/proc/%d/cmdline", int(getppid()));
+ snprintf(name_buf, sizeof(name_buf),
+ "/proc/%d/cmdline", static_cast<int>(getppid()));
char cmdline[1024*8];
int size = GetCommandLineFrom(name_buf, cmdline, sizeof(cmdline)-1);
cmdline[size] = '\0';
// look for "gdb" in the executable's name:
const char* last = strrchr(cmdline, '/');
- if (last) last += 1;
- else last = cmdline;
+ if (last) last += 1;
+ else last = cmdline;
if (strncmp(last, "gdb", 3) == 0) {
RAW_LOG(WARNING, "We seem to be running under gdb; will turn itself off");
- TurnItselfOff();
+ SpinLockHolder l(&heap_checker_lock);
+ TurnItselfOffLocked();
return;
}
}
- if (!constructor_heap_profiling) {
- RAW_LOG(FATAL, "Can not start so late. You have to enable heap checking "
- "with HEAPCHECK=<mode>.");
+ { SpinLockHolder l(&heap_checker_lock);
+ if (!constructor_heap_profiling) {
+ RAW_LOG(FATAL, "Can not start so late. You have to enable heap checking "
+ "with HEAPCHECK=<mode>.");
+ }
}
// make an indestructible copy for heap leak checking
// happening after global variable destruction
- flags_heap_profile_pprof = new string(FLAGS_heap_profile_pprof);
+ string* pprof_path = new string(FLAGS_heap_profile_pprof);
+ { SpinLockHolder l(&heap_checker_lock);
+ flags_heap_profile_pprof = pprof_path;
+ }
// Set all flags
if (FLAGS_heap_check == "minimal") {
@@ -1909,31 +2028,39 @@ void HeapLeakChecker::InternalInitStart() {
RAW_LOG(FATAL, "Unsupported heap_check flag: %s",
FLAGS_heap_check.c_str());
}
- RAW_DCHECK(heap_checker_pid == getpid(), "");
- heap_checker_on = true;
- RAW_DCHECK(heap_profile, "");
- heap_checker_lock.Lock();
- ProcMapsResult pm_result = UseProcMapsLocked(DISABLE_LIBRARY_ALLOCS);
- // might neeed to do this more than once
- // if one later dynamically loads libraries that we want disabled
- heap_checker_lock.Unlock();
- if (pm_result != PROC_MAPS_USED) { // can't function
- TurnItselfOff();
- return;
+ { SpinLockHolder l(&heap_checker_lock);
+ RAW_DCHECK(heap_checker_pid == getpid(), "");
+ heap_checker_on = true;
+ RAW_DCHECK(heap_profile, "");
+ ProcMapsResult pm_result = UseProcMapsLocked(DISABLE_LIBRARY_ALLOCS);
+ // might neeed to do this more than once
+ // if one later dynamically loads libraries that we want disabled
+ if (pm_result != PROC_MAPS_USED) { // can't function
+ TurnItselfOffLocked();
+ return;
+ }
}
// make a good place and name for heap profile leak dumps
- profile_prefix = new string(FLAGS_heap_check_dump_directory);
- *profile_prefix += "/";
- *profile_prefix += invocation_name();
+ string* profile_prefix =
+ new string(FLAGS_heap_check_dump_directory + "/" + invocation_name());
HeapProfileTable::CleanupOldProfiles(profile_prefix->c_str());
// Finalize prefix for dumping leak checking profiles.
+ const int32 our_pid = getpid(); // safest to call getpid() outside lock
+ { SpinLockHolder l(&heap_checker_lock);
+ // main_thread_pid might still be 0 if this function is being called before
+ // global constructors. In that case, our pid *is* the main pid.
+ if (main_thread_pid == 0)
+ main_thread_pid = our_pid;
+ }
char pid_buf[15];
- if (main_thread_pid == 0) // possible if we're called before constructors
- main_thread_pid = getpid();
snprintf(pid_buf, sizeof(pid_buf), ".%d", main_thread_pid);
*profile_prefix += pid_buf;
+ { SpinLockHolder l(&heap_checker_lock);
+ RAW_DCHECK(profile_name_prefix == NULL, "");
+ profile_name_prefix = profile_prefix;
+ }
// Make sure new/delete hooks are installed properly
// and heap profiler is indeed able to keep track
@@ -1941,30 +2068,43 @@ void HeapLeakChecker::InternalInitStart() {
// We test this to make sure we are indeed checking for leaks.
char* test_str = new char[5];
size_t size;
- RAW_CHECK(heap_profile->FindAlloc(test_str, &size),
- "our own new/delete not linked?");
+ { SpinLockHolder l(&heap_checker_lock);
+ RAW_CHECK(heap_profile->FindAlloc(test_str, &size),
+ "our own new/delete not linked?");
+ }
delete[] test_str;
- RAW_CHECK(!heap_profile->FindAlloc(test_str, &size),
- "our own new/delete not linked?");
+ { SpinLockHolder l(&heap_checker_lock);
+ // This check can fail when it should not if another thread allocates
+ // into this same spot right this moment,
+ // which is unlikely since this code runs in InitGoogle.
+ RAW_CHECK(!heap_profile->FindAlloc(test_str, &size),
+ "our own new/delete not linked?");
+ }
// If we crash in the above code, it probably means that
// "nm <this_binary> | grep new" will show that tcmalloc's new/delete
// implementation did not get linked-in into this binary
// (i.e. nm will list __builtin_new and __builtin_vec_new as undefined).
// If this happens, it is a BUILD bug to be fixed.
- RAW_LOG(WARNING, "Heap leak checker is active -- Performance may suffer");
+ RAW_VLOG(heap_checker_info_level,
+ "WARNING: Heap leak checker is active "
+ "-- Performance may suffer");
if (FLAGS_heap_check != "local") {
// Schedule registered heap cleanup
atexit(RunHeapCleanups);
+ HeapLeakChecker* main_hc = new HeapLeakChecker();
+ SpinLockHolder l(&heap_checker_lock);
RAW_DCHECK(main_heap_checker == NULL,
"Repeated creation of main_heap_checker");
- main_heap_checker = new HeapLeakChecker();
+ main_heap_checker = main_hc;
do_main_heap_check = true;
}
- RAW_CHECK(heap_checker_on && constructor_heap_profiling,
- "Leak checking is expected to be fully turned on now");
+ { SpinLockHolder l(&heap_checker_lock);
+ RAW_CHECK(heap_checker_on && constructor_heap_profiling,
+ "Leak checking is expected to be fully turned on now");
+ }
}
// We want this to run early as well, but not so early as
@@ -1972,11 +2112,16 @@ void HeapLeakChecker::InternalInitStart() {
// happened, for instance). Initializer-registration does the trick.
REGISTER_MODULE_INITIALIZER(init_start, HeapLeakChecker::InternalInitStart());
-void HeapLeakChecker::DoMainHeapCheck() {
- RAW_DCHECK(heap_checker_pid == getpid() && do_main_heap_check, "");
+// static
+bool HeapLeakChecker::DoMainHeapCheck() {
if (FLAGS_heap_check_delay_seconds > 0) {
sleep(FLAGS_heap_check_delay_seconds);
}
+ { SpinLockHolder l(&heap_checker_lock);
+ if (!do_main_heap_check) return false;
+ RAW_DCHECK(heap_checker_pid == getpid(), "");
+ do_main_heap_check = false; // will do it now; no need to do it more
+ }
if (!NoGlobalLeaks()) {
if (FLAGS_heap_check_identify_leaks) {
RAW_LOG(FATAL, "Whole-program memory leaks found.");
@@ -1985,34 +2130,39 @@ void HeapLeakChecker::DoMainHeapCheck() {
"because of whole-program memory leaks");
_exit(1); // we don't want to call atexit() routines!
}
- do_main_heap_check = false; // just did it
+ return true;
}
+// static
HeapLeakChecker* HeapLeakChecker::GlobalChecker() {
+ SpinLockHolder l(&heap_checker_lock);
return main_heap_checker;
}
+// static
bool HeapLeakChecker::NoGlobalLeaks() {
- bool result = true;
- HeapLeakChecker* main_hc = main_heap_checker;
+ // we never delete or change main_heap_checker once it's set:
+ HeapLeakChecker* main_hc = GlobalChecker();
if (main_hc) {
CheckType check_type = FLAGS_heap_check_strict_check ? SAME_HEAP : NO_LEAKS;
- if (FLAGS_heap_check_before_constructors) check_type = SAME_HEAP;
+ if (FLAGS_heap_check_before_constructors) check_type = SAME_HEAP;
// NO_LEAKS here just would make it slower in this case
// (we don't use the starting profile anyway)
CheckFullness fullness = check_type == NO_LEAKS ? USE_PPROF : USE_COUNTS;
// use pprof if it can help ignore false leaks
ReportMode report_mode = FLAGS_heap_check_report ? PPROF_REPORT : NO_REPORT;
RAW_VLOG(1, "Checking for whole-program memory leaks");
- result = main_hc->DoNoLeaks(check_type, fullness, report_mode);
+ return main_hc->DoNoLeaks(check_type, fullness, report_mode);
}
- return result;
+ return true;
}
+// static
void HeapLeakChecker::CancelGlobalCheck() {
+ SpinLockHolder l(&heap_checker_lock);
if (do_main_heap_check) {
- RAW_VLOG(0, "Canceling the automatic at-exit "
- "whole-program memory leak check");
+ RAW_VLOG(heap_checker_info_level,
+ "Canceling the automatic at-exit whole-program memory leak check");
do_main_heap_check = false;
}
}
@@ -2023,6 +2173,9 @@ void HeapLeakChecker::CancelGlobalCheck() {
static bool in_initial_malloc_hook = false;
+// Cancel our InitialMallocHook_* if present.
+static void CancelInitialMallocHooks(); // defined below
+
#ifdef HAVE___ATTRIBUTE__ // we need __attribute__((weak)) for this to work
#define INSTALLED_INITIAL_MALLOC_HOOKS
@@ -2030,12 +2183,16 @@ void HeapLeakChecker_BeforeConstructors(); // below
// Helper for InitialMallocHook_* below
static inline void InitHeapLeakCheckerFromMallocHook() {
- RAW_CHECK(!in_initial_malloc_hook,
- "Something did not reset initial MallocHook-s");
- in_initial_malloc_hook = true;
+ { SpinLockHolder l(&heap_checker_lock);
+ RAW_CHECK(!in_initial_malloc_hook,
+ "Something did not reset initial MallocHook-s");
+ in_initial_malloc_hook = true;
+ }
// Initialize heap checker on the very first allocation/mmap/sbrk call:
HeapLeakChecker_BeforeConstructors();
- in_initial_malloc_hook = false;
+ { SpinLockHolder l(&heap_checker_lock);
+ in_initial_malloc_hook = false;
+ }
}
// These will owerwrite the weak definitions in malloc_hook.cc:
@@ -2068,52 +2225,70 @@ extern void InitialMallocHook_Sbrk(const void* result, ptrdiff_t increment) {
MallocHook::InvokeSbrkHook(result, increment);
}
-#endif
+// static
+void CancelInitialMallocHooks() {
+ if (MallocHook::GetNewHook() == InitialMallocHook_New) {
+ MallocHook::SetNewHook(NULL);
+ }
+ RAW_DCHECK(MallocHook::GetNewHook() == NULL, "");
+ if (MallocHook::GetMmapHook() == InitialMallocHook_MMap) {
+ MallocHook::SetMmapHook(NULL);
+ }
+ RAW_DCHECK(MallocHook::GetMmapHook() == NULL, "");
+ if (MallocHook::GetSbrkHook() == InitialMallocHook_Sbrk) {
+ MallocHook::SetSbrkHook(NULL);
+ }
+ RAW_DCHECK(MallocHook::GetSbrkHook() == NULL, "");
+}
+
+#else
+
+// static
+void CancelInitialMallocHooks() {}
-// Optional silencing, it must be called shortly after leak checker activates
-// in HeapLeakChecker::BeforeConstructors not to let logging messages through,
-// but it can't be called when BeforeConstructors() is called from within
-// the first mmap/sbrk/alloc call (something deadlocks in this case).
-// Hence we arrange for this to be called from the first global c-tor
-// that calls HeapLeakChecker_BeforeConstructors.
-static void HeapLeakChecker_MaybeMakeSilent() {
-#if 0 // TODO(csilvers): see if we can get something like this to work
- if (!VLOG_IS_ON(1)) // not on a verbose setting
- FLAGS_verbose = WARNING; // only log WARNING and ERROR and FATAL
#endif
-}
-void HeapLeakChecker::BeforeConstructors() {
+// static
+void HeapLeakChecker::BeforeConstructorsLocked() {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_CHECK(!constructor_heap_profiling,
- "BeforeConstructors called multiple times");
- // set hooks early to crash if 'new' gets called before we make heap_profile:
- MallocHook::SetNewHook(NewHook);
- MallocHook::SetDeleteHook(DeleteHook);
+ "BeforeConstructorsLocked called multiple times");
+ CancelInitialMallocHooks();
+ // Set hooks early to crash if 'new' gets called before we make heap_profile,
+ // and make sure no other hooks existed:
+ if (MallocHook::SetNewHook(NewHook) != NULL ||
+ MallocHook::SetDeleteHook(DeleteHook) != NULL) {
+ RAW_LOG(FATAL, "Had other new/delete MallocHook-s set. "
+ "Somehow leak checker got activated "
+ "after something else have set up these hooks.");
+ }
constructor_heap_profiling = true;
- MemoryRegionMap::Init(); // set up MemoryRegionMap
- // (important that it's done before HeapProfileTable creation below)
+ MemoryRegionMap::Init(1);
+ // Set up MemoryRegionMap with (at least) one caller stack frame to record
+ // (important that it's done before HeapProfileTable creation below).
Allocator::Init();
RAW_CHECK(heap_profile == NULL, "");
- heap_checker_lock.Lock(); // Allocator expects it
- heap_profile = new (Allocator::Allocate(sizeof(HeapProfileTable)))
+ heap_profile = new(Allocator::Allocate(sizeof(HeapProfileTable)))
HeapProfileTable(&Allocator::Allocate, &Allocator::Free);
- heap_checker_lock.Unlock();
RAW_VLOG(1, "Starting tracking the heap");
heap_checker_on = true;
- // Run silencing if we are called from the first global c-tor,
- // not from the first mmap/sbrk/alloc call:
- if (!in_initial_malloc_hook) HeapLeakChecker_MaybeMakeSilent();
}
-void HeapLeakChecker::TurnItselfOff() {
+// static
+void HeapLeakChecker::TurnItselfOffLocked() {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
FLAGS_heap_check = ""; // for users who test for it
if (constructor_heap_profiling) {
RAW_CHECK(heap_checker_on, "");
- RAW_LOG(INFO, "Turning heap leak checking off");
+ RAW_VLOG(heap_checker_info_level, "Turning heap leak checking off");
heap_checker_on = false;
- MallocHook::SetNewHook(NULL);
- MallocHook::SetDeleteHook(NULL);
- heap_checker_lock.Lock(); // Allocator expects it
+ // Unset our hooks checking they were the ones set:
+ if (MallocHook::SetNewHook(NULL) != NewHook ||
+ MallocHook::SetDeleteHook(NULL) != DeleteHook) {
+ RAW_LOG(FATAL, "Had our new/delete MallocHook-s replaced. "
+ "Are you using another MallocHook client? "
+ "Use --heap_check=\"\" to avoid this conflict.");
+ }
Allocator::DeleteAndNull(&heap_profile);
// free our optional global data:
Allocator::DeleteAndNullIfNot(&disabled_regexp);
@@ -2121,7 +2296,6 @@ void HeapLeakChecker::TurnItselfOff() {
Allocator::DeleteAndNullIfNot(&disabled_addresses);
Allocator::DeleteAndNullIfNot(&disabled_ranges);
Allocator::DeleteAndNullIfNot(&global_region_caller_ranges);
- heap_checker_lock.Unlock();
Allocator::Shutdown();
MemoryRegionMap::Shutdown();
}
@@ -2170,17 +2344,15 @@ static int GetCommandLineFrom(const char* file, char* cmdline, int size) {
extern bool heap_leak_checker_bcad_variable; // in heap-checker-bcad.cc
-static bool has_called_BeforeConstructors = false;
+static bool has_called_before_constructors = false;
void HeapLeakChecker_BeforeConstructors() {
+ SpinLockHolder l(&heap_checker_lock);
// We can be called from several places: the first mmap/sbrk/alloc call
// or the first global c-tor from heap-checker-bcad.cc:
- if (has_called_BeforeConstructors) {
- // Make sure silencing is done when we are called from first global c-tor:
- if (heap_checker_on) HeapLeakChecker_MaybeMakeSilent();
- return; // do not re-execure initialization
- }
- has_called_BeforeConstructors = true;
+ // Do not re-execute initialization:
+ if (has_called_before_constructors) return;
+ has_called_before_constructors = true;
heap_checker_pid = getpid(); // set it always
heap_leak_checker_bcad_variable = true;
@@ -2211,32 +2383,28 @@ void HeapLeakChecker_BeforeConstructors() {
}
#endif
if (need_heap_check) {
- HeapLeakChecker::BeforeConstructors();
+ HeapLeakChecker::BeforeConstructorsLocked();
} else { // cancel our initial hooks
-#ifdef INSTALLED_INITIAL_MALLOC_HOOKS
- if (MallocHook::GetNewHook() == &InitialMallocHook_New)
- MallocHook::SetNewHook(NULL);
- if (MallocHook::GetMmapHook() == &InitialMallocHook_MMap)
- MallocHook::SetMmapHook(NULL);
- if (MallocHook::GetSbrkHook() == &InitialMallocHook_Sbrk)
- MallocHook::SetSbrkHook(NULL);
-#endif
+ CancelInitialMallocHooks();
}
}
// This function is executed after all global object destructors run.
void HeapLeakChecker_AfterDestructors() {
- if (heap_checker_pid == getpid()) { // can get here (via forks?)
- // with other pids
- if (FLAGS_heap_check_after_destructors && do_main_heap_check) {
- HeapLeakChecker::DoMainHeapCheck();
+ { SpinLockHolder l(&heap_checker_lock);
+ // can get here (via forks?) with other pids
+ if (heap_checker_pid != getpid()) return;
+ }
+ if (FLAGS_heap_check_after_destructors) {
+ if (HeapLeakChecker::DoMainHeapCheck()) {
poll(NULL, 0, 500);
// Need this hack to wait for other pthreads to exit.
// Otherwise tcmalloc find errors
// on a free() call from pthreads.
}
- RAW_CHECK(!do_main_heap_check, "should have done it");
}
+ SpinLockHolder l(&heap_checker_lock);
+ RAW_CHECK(!do_main_heap_check, "should have done it");
}
//----------------------------------------------------------------------
@@ -2245,10 +2413,12 @@ void HeapLeakChecker_AfterDestructors() {
// These functions are at the end of the file to prevent their inlining:
+// static
void HeapLeakChecker::DisableChecksInLocked(const char* pattern) {
+ RAW_DCHECK(heap_checker_lock.IsHeld(), "");
// make disabled_regexp
if (disabled_regexp == NULL) {
- disabled_regexp = new (Allocator::Allocate(sizeof(HCL_string))) HCL_string;
+ disabled_regexp = new(Allocator::Allocate(sizeof(HCL_string))) HCL_string;
}
RAW_VLOG(1, "Disabling leak checking in stack traces "
"under frames maching \"%s\"", pattern);
@@ -2256,56 +2426,58 @@ void HeapLeakChecker::DisableChecksInLocked(const char* pattern) {
*disabled_regexp += pattern;
}
+// static
void HeapLeakChecker::DisableChecksFromToLocked(const void* start_address,
const void* end_address,
int max_depth) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
RAW_DCHECK(start_address < end_address, "");
if (disabled_ranges == NULL) {
- disabled_ranges = new (Allocator::Allocate(sizeof(DisabledRangeMap)))
+ disabled_ranges = new(Allocator::Allocate(sizeof(DisabledRangeMap)))
DisabledRangeMap;
}
RangeValue value;
- value.start_address = reinterpret_cast<uintptr_t>(start_address);
+ value.start_address = AsInt(start_address);
value.max_depth = max_depth;
- if (disabled_ranges->
- insert(make_pair(reinterpret_cast<uintptr_t>(end_address),
- value)).second) {
+ if (disabled_ranges->insert(make_pair(AsInt(end_address), value)).second) {
RAW_VLOG(1, "Disabling leak checking in stack traces "
"under frame addresses between %p..%p",
start_address, end_address);
} else { // check that this is just a verbatim repetition
- RangeValue const& val =
- disabled_ranges->find(reinterpret_cast<uintptr_t>(end_address))->second;
+ RangeValue const& val = disabled_ranges->find(AsInt(end_address))->second;
if (val.max_depth != value.max_depth ||
val.start_address != value.start_address) {
RAW_LOG(FATAL, "Two DisableChecksToHereFrom calls conflict: "
"(%p, %p, %d) vs. (%p, %p, %d)",
- (void*)val.start_address, end_address, val.max_depth,
+ AsPtr(val.start_address), end_address, val.max_depth,
start_address, end_address, max_depth);
}
}
}
+// static
void HeapLeakChecker::DisableChecksAtLocked(const void* address) {
RAW_DCHECK(heap_checker_lock.IsHeld(), "");
if (disabled_addresses == NULL) {
- disabled_addresses = new (Allocator::Allocate(sizeof(DisabledAddressSet)))
+ disabled_addresses = new(Allocator::Allocate(sizeof(DisabledAddressSet)))
DisabledAddressSet;
}
// disable the requested address
- if (disabled_addresses->insert(reinterpret_cast<uintptr_t>(address)).second) {
+ if (disabled_addresses->insert(AsInt(address)).second) {
RAW_VLOG(1, "Disabling leak checking in stack traces "
"under frame address %p", address);
}
}
+// static
inline bool HeapLeakChecker::HaveOnHeapLocked(const void** ptr,
size_t* object_size) {
- const uintptr_t addr = reinterpret_cast<uintptr_t>(*ptr);
+ // Commented-out because HaveOnHeapLocked is very performance-critical:
+ // RAW_DCHECK(heap_checker_lock.IsHeld(), "");
+ const uintptr_t addr = AsInt(*ptr);
if (heap_profile->FindInsideAlloc(
*ptr, max_heap_object_size, ptr, object_size)) {
- const size_t offset = addr - reinterpret_cast<uintptr_t>(*ptr);
+ const size_t offset = addr - AsInt(*ptr);
// must be aligned to kPointerDestAlignmentMask,
// kUnicodeStringOffset is a special case.
if ((offset & kPointerDestAlignmentMask) == 0 ||
@@ -2320,12 +2492,13 @@ inline bool HeapLeakChecker::HaveOnHeapLocked(const void** ptr,
return false;
}
+// static
const void* HeapLeakChecker::GetAllocCaller(void* ptr) {
// this is used only in the unittest, so the heavy checks are fine
HeapProfileTable::AllocInfo info;
- heap_checker_lock.Lock();
- RAW_CHECK(heap_profile->FindAllocDetails(ptr, &info), "");
- heap_checker_lock.Unlock();
+ { SpinLockHolder l(&heap_checker_lock);
+ RAW_CHECK(heap_profile->FindAllocDetails(ptr, &info), "");
+ }
RAW_CHECK(info.stack_depth >= 1, "");
return info.call_stack[0];
}
diff --git a/src/heap-profile-table.cc b/src/heap-profile-table.cc
index b090fc7..26eca4e 100644
--- a/src/heap-profile-table.cc
+++ b/src/heap-profile-table.cc
@@ -78,7 +78,7 @@ static const char kProcSelfMapsHeader[] = "\nMAPPED_LIBRARIES:\n";
const char HeapProfileTable::kFileExt[] = ".heap";
const int HeapProfileTable::kHashTableSize;
-const int HeapProfileTable::kMaxStackTrace;
+const int HeapProfileTable::kMaxStackDepth;
//----------------------------------------------------------------------
@@ -184,11 +184,8 @@ HeapProfileTable::~HeapProfileTable() {
table_ = NULL;
}
-HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int skip_count) {
- // Get raw stack trace
- void* key[kMaxStackTrace];
- int depth =
- MallocHook::GetCallerStackTrace(key, kMaxStackTrace, skip_count+1);
+HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth,
+ const void* const key[]) {
// Make hash-value
uintptr_t h = 0;
for (int i = 0; i < depth; i++) {
@@ -226,7 +223,16 @@ HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int skip_count) {
void HeapProfileTable::RecordAlloc(const void* ptr, size_t bytes,
int skip_count) {
- Bucket* b = GetBucket(kStripFrames + skip_count + 1);
+ void* key[kMaxStackDepth];
+ int depth = MallocHook::GetCallerStackTrace(
+ key, kMaxStackDepth, kStripFrames + skip_count + 1);
+ RecordAllocWithStack(ptr, bytes, depth, key);
+}
+
+void HeapProfileTable::RecordAllocWithStack(
+ const void* ptr, size_t bytes, int stack_depth,
+ const void* const call_stack[]) {
+ Bucket* b = GetBucket(stack_depth, call_stack);
b->allocs++;
b->alloc_size += bytes;
total_.allocs++;
@@ -374,11 +380,11 @@ void HeapProfileTable::DumpNonLiveIterator(const void* ptr, AllocValue* v,
memset(&b, 0, sizeof(b));
b.allocs = 1;
b.alloc_size = v->bytes;
- const void* stack[kMaxStackTrace + 1];
- b.depth = v->bucket()->depth + int(args.dump_alloc_addresses);
+ const void* stack[kMaxStackDepth + 1];
+ b.depth = v->bucket()->depth + static_cast<int>(args.dump_alloc_addresses);
b.stack = stack;
if (args.dump_alloc_addresses) stack[0] = ptr;
- memcpy(stack + int(args.dump_alloc_addresses),
+ memcpy(stack + static_cast<int>(args.dump_alloc_addresses),
v->bucket()->stack, sizeof(stack[0]) * v->bucket()->depth);
char buf[1024];
int len = UnparseBucket(b, buf, 0, sizeof(buf), args.profile_stats);
diff --git a/src/heap-profile-table.h b/src/heap-profile-table.h
index 3ad4a5d..5348197 100644
--- a/src/heap-profile-table.h
+++ b/src/heap-profile-table.h
@@ -51,6 +51,9 @@ class HeapProfileTable {
// Extension to be used for heap pforile files.
static const char kFileExt[];
+ // Longest stack trace we record.
+ static const int kMaxStackDepth = 32;
+
// data types ----------------------------
// Profile stats.
@@ -59,6 +62,12 @@ class HeapProfileTable {
int32 frees; // Number of free calls
int64 alloc_size; // Total size of all allocated objects so far
int64 free_size; // Total size of all freed objects so far
+
+ // semantic equality
+ bool Equivalent(const Stats& x) const {
+ return allocs - frees == x.allocs - x.frees &&
+ alloc_size - free_size == x.alloc_size - x.free_size;
+ }
};
// Info we can return about an allocation.
@@ -69,8 +78,8 @@ class HeapProfileTable {
};
// Memory (de)allocator interface we'll use.
- typedef void* (*Allocator)(size_t);
- typedef void (*DeAllocator)(void*);
+ typedef void* (*Allocator)(size_t size);
+ typedef void (*DeAllocator)(void* ptr);
// interface ---------------------------
@@ -82,6 +91,11 @@ class HeapProfileTable {
// and the memory allocation function that was asked to do the allocation.
void RecordAlloc(const void* ptr, size_t bytes, int skip_count);
+ // Direct version of RecordAlloc when the caller stack to use
+ // is already known: call_stack of depth stack_depth.
+ void RecordAllocWithStack(const void* ptr, size_t bytes,
+ int stack_depth, const void* const call_stack[]);
+
// Record the deallocation of memory at 'ptr'.
void RecordFree(const void* ptr);
@@ -123,7 +137,7 @@ class HeapProfileTable {
// of currently allocated bytes.
// We do not provision for 0-terminating 'buf'.
int FillOrderedProfile(char buf[], int size) const;
-
+
// Allocation data dump filtering callback:
// gets passed object pointer and size
// needs to return true iff the object is to be filtered out of the dump.
@@ -198,9 +212,9 @@ class HeapProfileTable {
char* buf, int buflen, int bufsize,
Stats* profile_stats);
- // Get the bucket for the current stack trace creating one if needed
- // (skip "skip_count" most recent frames).
- Bucket* GetBucket(int skip_count);
+ // Get the bucket for the caller stack trace 'key' of depth 'depth'
+ // creating the bucket if needed.
+ Bucket* GetBucket(int depth, const void* const key[]);
// Helper for IterateAllocs to do callback signature conversion
// from AllocationMap::Iterate to AllocIterator.
@@ -223,9 +237,6 @@ class HeapProfileTable {
// Size for table_.
static const int kHashTableSize = 179999;
- // Longest stack trace we record.
- static const int kMaxStackTrace = 32;
-
// Memory (de)allocator that we use.
Allocator alloc_;
DeAllocator dealloc_;
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index 3c59262..547d698 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -59,6 +59,7 @@
#include "base/low_level_alloc.h"
#include "base/sysinfo.h" // for GetUniquePathFromEnv()
#include "heap-profile-table.h"
+#include "memory_region_map.h"
#ifndef PATH_MAX
@@ -90,7 +91,12 @@ DEFINE_bool(mmap_log,
"Should mmap/munmap calls be logged?");
DEFINE_bool(mmap_profile,
EnvToBool("HEAP_PROFILE_MMAP", false),
- "If heap-profiling on, also profile mmaps");
+ "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
+DEFINE_bool(only_mmap_profile,
+ EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
+ "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
+ "do not profile malloc/new/etc");
+
//----------------------------------------------------------------------
// Locking
@@ -135,32 +141,81 @@ static HeapProfileTable* heap_profile = NULL; // the heap profile table
// Profile generation
//----------------------------------------------------------------------
-extern "C" char* GetHeapProfile() {
+enum AddOrRemove { ADD, REMOVE };
+
+// Add or remove all MMap-allocated regions to/from *heap_profile.
+// Assumes heap_lock is held.
+static void AddRemoveMMapDataLocked(AddOrRemove mode) {
+ RAW_DCHECK(heap_lock.IsHeld(), "");
+ if (!FLAGS_mmap_profile || !is_on) return;
+ if (!FLAGS_mmap_log) MemoryRegionMap::CheckMallocHooks();
+ // MemoryRegionMap maintained all the data we need for all
+ // mmap-like allocations, so we just use it here:
+ MemoryRegionMap::LockHolder l;
+ for (MemoryRegionMap::RegionIterator r = MemoryRegionMap::BeginRegionLocked();
+ r != MemoryRegionMap::EndRegionLocked(); ++r) {
+ if (mode == ADD) {
+ heap_profile->RecordAllocWithStack(
+ reinterpret_cast<const void*>(r->start_addr),
+ r->end_addr - r->start_addr,
+ r->call_stack_depth, r->call_stack);
+ } else {
+ heap_profile->RecordFree(reinterpret_cast<void*>(r->start_addr));
+ }
+ }
+}
+
+static char* DoGetHeapProfile(void* (*alloc_func)(size_t)) {
// We used to be smarter about estimating the required memory and
// then capping it to 1MB and generating the profile into that.
// However it should not cost us much to allocate 1MB every time.
static const int size = 1 << 20;
- // This is intended to be normal malloc: we return it to the user to free it
- char* buf = reinterpret_cast<char*>(malloc(size));
+ char* buf = reinterpret_cast<char*>((*alloc_func)(size));
if (buf == NULL) return NULL;
// Grab the lock and generate the profile.
- heap_lock.Lock();
+ SpinLockHolder l(&heap_lock);
+ HeapProfileTable::Stats const stats = heap_profile->total();
+ AddRemoveMMapDataLocked(ADD);
int buflen = is_on ? heap_profile->FillOrderedProfile(buf, size - 1) : 0;
buf[buflen] = '\0';
+ // FillOrderedProfile should not reduce the set of active mmap-ed regions,
+ // hence MemoryRegionMap will let us remove everything we've added above:
+ AddRemoveMMapDataLocked(REMOVE);
+ RAW_DCHECK(stats.Equivalent(heap_profile->total()), "");
+ // if this fails, we somehow removed by AddRemoveMMapDataLocked
+ // more than we have added.
RAW_DCHECK(buflen == strlen(buf), "");
- heap_lock.Unlock();
return buf;
}
+extern "C" char* GetHeapProfile() {
+ // Use normal malloc: we return the profile to the user to free it:
+ return DoGetHeapProfile(&malloc);
+}
+
+// defined below
+static void NewHook(const void* ptr, size_t size);
+static void DeleteHook(const void* ptr);
+
// Helper for HeapProfilerDump.
static void DumpProfileLocked(const char* reason) {
+ RAW_DCHECK(heap_lock.IsHeld(), "");
RAW_DCHECK(is_on, "");
RAW_DCHECK(!dumping, "");
if (filename_prefix == NULL) return; // we do not yet need dumping
+ if (FLAGS_only_mmap_profile == false) {
+ if (MallocHook::GetNewHook() != NewHook ||
+ MallocHook::GetDeleteHook() != DeleteHook) {
+ RAW_LOG(FATAL, "Had our new/delete MallocHook-s replaced. "
+ "Are you using another MallocHook client? "
+ "Do not use --heap_profile=... to avoid this conflict.");
+ }
+ }
+
dumping = true;
// Make file name
@@ -177,9 +232,10 @@ static void DumpProfileLocked(const char* reason) {
RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
FILE* f = fopen(file_name, "w");
if (f != NULL) {
- const char* profile = GetHeapProfile();
+ // Use ProfilerMalloc not to put info about profile itself into itself:
+ char* profile = DoGetHeapProfile(&ProfilerMalloc);
fputs(profile, f);
- free(const_cast<char*>(profile)); // was made with normal malloc
+ ProfilerFree(profile);
fclose(f);
} else {
RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
@@ -196,7 +252,7 @@ static void DumpProfileLocked(const char* reason) {
// Record an allocation in the profile.
static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
- heap_lock.Lock();
+ SpinLockHolder l(&heap_lock);
if (is_on) {
heap_profile->RecordAlloc(ptr, bytes, skip_count + 1);
const HeapProfileTable::Stats& total = heap_profile->total();
@@ -213,7 +269,7 @@ static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
need_to_dump = true;
} else if (inuse_bytes >
high_water_mark + FLAGS_heap_profile_inuse_interval) {
- sprintf(buf, "%"PRId64" MB in use", inuse_bytes >> 20);
+ snprintf(buf, sizeof(buf), "%"PRId64" MB in use", inuse_bytes >> 20);
// Track that we made a "high water mark" dump
high_water_mark = inuse_bytes;
need_to_dump = true;
@@ -223,28 +279,27 @@ static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
}
}
}
- heap_lock.Unlock();
}
// Record a deallocation in the profile.
static void RecordFree(const void* ptr) {
- heap_lock.Lock();
+ SpinLockHolder l(&heap_lock);
if (is_on) {
heap_profile->RecordFree(ptr);
}
- heap_lock.Unlock();
}
-
//----------------------------------------------------------------------
// Allocation/deallocation hooks for MallocHook
//----------------------------------------------------------------------
-static void NewHook(const void* ptr, size_t size) {
+// static
+void NewHook(const void* ptr, size_t size) {
if (ptr != NULL) RecordAlloc(ptr, size, 0);
}
-static void DeleteHook(const void* ptr) {
+// static
+void DeleteHook(const void* ptr) {
if (ptr != NULL) RecordFree(ptr);
}
@@ -256,10 +311,15 @@ static void RawInfoStackDumper(const char* message, void*) {
}
#endif
+// Saved MemoryRegionMap's hooks to daisy-chain calls to.
+MallocHook::MmapHook saved_mmap_hook = NULL;
+MallocHook::MremapHook saved_mremap_hook = NULL;
+MallocHook::MunmapHook saved_munmap_hook = NULL;
+MallocHook::SbrkHook saved_sbrk_hook = NULL;
+
static void MmapHook(const void* result, const void* start, size_t size,
int prot, int flags, int fd, off_t offset) {
- // Log the mmap if necessary
- if (FLAGS_mmap_log) {
+ if (FLAGS_mmap_log) { // log it
// We use PRIxS not just '%p' to avoid deadlocks
// in pretty-printing of NULL as "nil".
// TODO(maxim): instead should use a safe snprintf reimplementation
@@ -272,23 +332,60 @@ static void MmapHook(const void* result, const void* start, size_t size,
DumpStackTrace(1, RawInfoStackDumper, NULL);
#endif
}
+ if (saved_mmap_hook) {
+ // Call MemoryRegionMap's hook: it will record needed info about the mmap
+ // for us w/o deadlocks:
+ (*saved_mmap_hook)(result, start, size, prot, flags, fd, offset);
+ }
+}
- // Record mmap in profile if appropriate
- if (FLAGS_mmap_profile && result != (void*) MAP_FAILED) {
- RecordAlloc(result, size, 0);
+static void MremapHook(const void* result, const void* old_addr,
+ size_t old_size, size_t new_size,
+ int flags, const void* new_addr) {
+ if (FLAGS_mmap_log) { // log it
+ // We use PRIxS not just '%p' to avoid deadlocks
+ // in pretty-printing of NULL as "nil".
+ // TODO(maxim): instead should use a safe snprintf reimplementation
+ RAW_LOG(INFO,
+ "mremap(old_addr=0x%"PRIxS", old_size=%"PRIuS", new_size=%"PRIuS", "
+ "flags=0x%x, new_addr=0x%"PRIxS") = 0x%"PRIxS"",
+ (uintptr_t) old_addr, old_size, new_size, flags,
+ (uintptr_t) new_addr, (uintptr_t) result);
+#ifdef TODO_REENABLE_STACK_TRACING
+ DumpStackTrace(1, RawInfoStackDumper, NULL);
+#endif
+ }
+ if (saved_mremap_hook) { // call MemoryRegionMap's hook
+ (*saved_mremap_hook)(result, old_addr, old_size, new_size, flags, new_addr);
}
}
static void MunmapHook(const void* ptr, size_t size) {
- if (FLAGS_mmap_profile) {
- RecordFree(ptr);
- }
- if (FLAGS_mmap_log) {
+ if (FLAGS_mmap_log) { // log it
// We use PRIxS not just '%p' to avoid deadlocks
// in pretty-printing of NULL as "nil".
// TODO(maxim): instead should use a safe snprintf reimplementation
RAW_LOG(INFO, "munmap(start=0x%"PRIxS", len=%"PRIuS")",
(uintptr_t) ptr, size);
+#ifdef TODO_REENABLE_STACK_TRACING
+ DumpStackTrace(1, RawInfoStackDumper, NULL);
+#endif
+ }
+ if (saved_munmap_hook) { // call MemoryRegionMap's hook
+ (*saved_munmap_hook)(ptr, size);
+ }
+}
+
+static void SbrkHook(const void* result, ptrdiff_t increment) {
+ if (FLAGS_mmap_log) { // log it
+ RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxS"",
+ increment, (uintptr_t) result);
+#ifdef TODO_REENABLE_STACK_TRACING
+ DumpStackTrace(1, RawInfoStackDumper, NULL);
+#endif
+ }
+ if (saved_sbrk_hook) { // call MemoryRegionMap's hook
+ (*saved_sbrk_hook)(result, increment);
}
}
@@ -297,53 +394,93 @@ static void MunmapHook(const void* ptr, size_t size) {
//----------------------------------------------------------------------
extern "C" void HeapProfilerStart(const char* prefix) {
- heap_lock.Lock();
+ SpinLockHolder l(&heap_lock);
- if (filename_prefix != NULL) return;
+ if (is_on) return;
- RAW_DCHECK(!is_on, "");
+ is_on = true;
+
+ RAW_VLOG(0, "Starting tracking the heap");
+
+ if (FLAGS_only_mmap_profile) {
+ FLAGS_mmap_profile = true;
+ }
+
+ if (FLAGS_mmap_profile) {
+ // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
+ // call stack traces of at least size kMaxStackDepth:
+ MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth);
+ }
+
+ if (FLAGS_mmap_log) {
+ // Install our hooks to do the logging
+ // and maybe save MemoryRegionMap's hooks to call:
+ saved_mmap_hook = MallocHook::SetMmapHook(MmapHook);
+ saved_mremap_hook = MallocHook::SetMremapHook(MremapHook);
+ saved_munmap_hook = MallocHook::SetMunmapHook(MunmapHook);
+ saved_sbrk_hook = MallocHook::SetSbrkHook(SbrkHook);
+ }
heap_profiler_memory =
LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
- heap_profile = new (ProfilerMalloc(sizeof(HeapProfileTable)))
+ heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
HeapProfileTable(ProfilerMalloc, ProfilerFree);
- is_on = true;
-
last_dump = 0;
// We do not reset dump_count so if the user does a sequence of
// HeapProfilerStart/HeapProfileStop, we will get a continuous
// sequence of profiles.
- // Now set the hooks that capture mallocs/frees
- MallocHook::SetNewHook(NewHook);
- MallocHook::SetDeleteHook(DeleteHook);
- RAW_VLOG(0, "Starting tracking the heap");
+ if (FLAGS_only_mmap_profile == false) {
+ // Now set the hooks that capture new/delete and malloc/free
+ // and check that these are the only hooks:
+ if (MallocHook::SetNewHook(NewHook) != NULL ||
+ MallocHook::SetDeleteHook(DeleteHook) != NULL) {
+ RAW_LOG(FATAL, "Had other new/delete MallocHook-s set. "
+ "Are you using the heap leak checker? "
+ "Use --heap_check=\"\" to avoid this conflict.");
+ }
+ }
// Copy filename prefix
+ RAW_DCHECK(filename_prefix == NULL, "");
const int prefix_length = strlen(prefix);
filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
memcpy(filename_prefix, prefix, prefix_length);
filename_prefix[prefix_length] = '\0';
- heap_lock.Unlock();
-
// This should be done before the hooks are set up, since it should
// call new, and we want that to be accounted for correctly.
MallocExtension::Initialize();
}
extern "C" void HeapProfilerStop() {
- heap_lock.Lock();
+ SpinLockHolder l(&heap_lock);
if (!is_on) return;
- filename_prefix = NULL;
-
- MallocHook::SetNewHook(NULL);
- MallocHook::SetDeleteHook(NULL);
+ if (FLAGS_only_mmap_profile == false) {
+ // Unset our new/delete hooks, checking they were the ones set:
+ if (MallocHook::SetNewHook(NULL) != NewHook ||
+ MallocHook::SetDeleteHook(NULL) != DeleteHook) {
+ RAW_LOG(FATAL, "Had our new/delete MallocHook-s replaced. "
+ "Are you using another MallocHook client? "
+ "Do not use --heap_profile=... to avoid this conflict.");
+ }
+ }
+ if (FLAGS_mmap_log) {
+ // Restore mmap/sbrk hooks, checking that our hooks were the ones set:
+ if (MallocHook::SetMmapHook(saved_mmap_hook) != MmapHook ||
+ MallocHook::SetMremapHook(saved_mremap_hook) != MremapHook ||
+ MallocHook::SetMunmapHook(saved_munmap_hook) != MunmapHook ||
+ MallocHook::SetSbrkHook(saved_sbrk_hook) != SbrkHook) {
+ RAW_LOG(FATAL, "Had our mmap/mremap/munmap/sbrk MallocHook-s replaced. "
+ "Are you using another MallocHook client? "
+ "Do not use --heap_profile=... to avoid this conflict.");
+ }
+ }
// free profile
heap_profile->~HeapProfileTable();
@@ -358,17 +495,18 @@ extern "C" void HeapProfilerStop() {
RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
}
- is_on = false;
+ if (FLAGS_mmap_profile) {
+ MemoryRegionMap::Shutdown();
+ }
- heap_lock.Unlock();
+ is_on = false;
}
extern "C" void HeapProfilerDump(const char *reason) {
- heap_lock.Lock();
+ SpinLockHolder l(&heap_lock);
if (is_on && !dumping) {
DumpProfileLocked(reason);
}
- heap_lock.Unlock();
}
//----------------------------------------------------------------------
@@ -377,11 +515,6 @@ extern "C" void HeapProfilerDump(const char *reason) {
// Initialization code
static void HeapProfilerInit() {
- if (FLAGS_mmap_profile || FLAGS_mmap_log) {
- MallocHook::SetMmapHook(MmapHook);
- MallocHook::SetMunmapHook(MunmapHook);
- }
-
// Everything after this point is for setting up the profiler based on envvar
char fname[PATH_MAX];
if (!GetUniquePathFromEnv("HEAPPROFILE", fname)) {
diff --git a/src/internal_logging.cc b/src/internal_logging.cc
index edd10a2..237922e 100644
--- a/src/internal_logging.cc
+++ b/src/internal_logging.cc
@@ -48,6 +48,17 @@ void TCMalloc_MESSAGE(const char* format, ...) {
write(STDERR_FILENO, buf, strlen(buf));
}
+void TCMalloc_CRASH(const char* format, ...) {
+ va_list ap;
+ va_start(ap, format);
+ char buf[800];
+ vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ write(STDERR_FILENO, buf, strlen(buf));
+
+ abort();
+}
+
void TCMalloc_Printer::printf(const char* format, ...) {
if (left_ > 0) {
va_list ap;
diff --git a/src/internal_logging.h b/src/internal_logging.h
index 1b1487d..9091143 100644
--- a/src/internal_logging.h
+++ b/src/internal_logging.h
@@ -54,13 +54,21 @@ extern void TCMalloc_MESSAGE(const char* format, ...)
// Short form for convenience
#define MESSAGE TCMalloc_MESSAGE
+// Dumps the specified message and then calls abort()
+extern void TCMalloc_CRASH(const char* format, ...)
+#ifdef HAVE___ATTRIBUTE__
+ __attribute__ ((__format__ (__printf__, 1, 2)))
+#endif
+;
+
+#define CRASH TCMalloc_CRASH
+
// Like assert(), but executed even in NDEBUG mode
#undef CHECK_CONDITION
#define CHECK_CONDITION(cond) \
do { \
if (!(cond)) { \
- MESSAGE("%s:%d: assertion failed: %s\n", __FILE__, __LINE__, #cond); \
- abort(); \
+ CRASH("%s:%d: assertion failed: %s\n", __FILE__, __LINE__, #cond); \
} \
} while (0)
diff --git a/src/memory_region_map.cc b/src/memory_region_map.cc
index 064b1f3..1a19618 100644
--- a/src/memory_region_map.cc
+++ b/src/memory_region_map.cc
@@ -31,6 +31,70 @@
* Author: Maxim Lifantsev
*/
+//
+// Background and key design points of MemoryRegionMap.
+//
+// MemoryRegionMap is a low-level module with quite atypical requirements that
+// result in some degree of non-triviality of the implementation and design.
+//
+// MemoryRegionMap collects info about *all* memory regions created with
+// mmap, munmap, mremap, sbrk.
+// They key word above is 'all': all that are happening in a process
+// during its lifetime frequently starting even before global object
+// constructor execution.
+//
+// This is needed by the primary client of MemoryRegionMap:
+// HeapLeakChecker uses the regions and the associated stack traces
+// to figure out what part of the memory is the heap:
+// if MemoryRegionMap were to miss some (early) regions, leak checking would
+// stop working correctly.
+//
+// To accomplish the goal of functioning before/during global object
+// constructor execution MemoryRegionMap is done as a singleton service
+// that relies on own on-demand initialized static constructor-less data,
+// and only relies on other low-level modules that can also function properly
+// even before global object constructors run.
+//
+// Accomplishing the goal of collecting data about all mmap, munmap, mremap,
+// sbrk occurrences is a more involved: conceptually to do this one needs to
+// record some bits of data in particular about any mmap or sbrk call,
+// but to do that one needs to allocate memory for that data at some point,
+// but all memory allocations in the end themselves come from an mmap
+// or sbrk call (that's how the address space of the process grows).
+//
+// Also note that we need to do all the above recording from
+// within an mmap/sbrk hook which is sometimes/frequently is made by a memory
+// allocator, including the allocator MemoryRegionMap itself must rely on.
+// In the case of heap-checker usage this includes even the very first
+// mmap/sbrk call happening in the program: heap-checker gets activated due to
+// a link-time installed mmap/sbrk hook and it initializes MemoryRegionMap
+// and asks it to record info about this very first call right from that
+// very first hook invocation.
+//
+// MemoryRegionMap is doing its memory allocations via LowLevelAlloc:
+// unlike more complex standard memory allocator, LowLevelAlloc cooperates with
+// MemoryRegionMap by not holding any of its own locks while it calls mmap
+// to get memory, thus we are able to call LowLevelAlloc from
+// our mmap/sbrk hooks without causing a deadlock in it.
+// For the same reason of deadlock prevention the locking in MemoryRegionMap
+// itself is write-recursive which is an exception to Google's mutex usage.
+//
+// We still need to break the infinite cycle of mmap calling our hook,
+// which asks LowLevelAlloc for memory to record this mmap,
+// which (sometimes) causes mmap, which calls our hook, and so on.
+// We do this as follows: on a recursive call of MemoryRegionMap's
+// mmap/sbrk/mremap hook we record the data about the allocation in a
+// static fixed-sized stack (saved_regions), when the recursion unwinds
+// but before returning from the outer hook call we unwind this stack and
+// move the data from saved_regions to its permanent place in the RegionSet,
+// which can cause more allocations and mmap-s and recursion and unwinding,
+// but the whole process ends eventually due to the fact that for the small
+// allocations we are doing LowLevelAlloc reuses one mmap call and parcels out
+// the memory it created to satisfy several of our allocation requests.
+//
+
+// ========================================================================= //
+
#include "config.h"
#ifdef HAVE_UNISTD_H
@@ -61,14 +125,20 @@
# define MREMAP_FIXED 0
#endif
+using std::max;
+
// ========================================================================= //
-bool MemoryRegionMap::have_initialized_ = false;
+const int MemoryRegionMap::kMaxStackDepth;
+int MemoryRegionMap::client_count_ = 0;
+int MemoryRegionMap::max_stack_depth_ = 0;
MemoryRegionMap::RegionSet* MemoryRegionMap::regions_ = NULL;
LowLevelAlloc::Arena* MemoryRegionMap::arena_ = NULL;
SpinLock MemoryRegionMap::lock_(SpinLock::LINKER_INITIALIZED);
-int MemoryRegionMap::recursion_count_ = 0;
-pthread_t MemoryRegionMap::self_tid_;
+SpinLock MemoryRegionMap::owner_lock_( // ACQUIRED_AFTER(lock_)
+ SpinLock::LINKER_INITIALIZED);
+int MemoryRegionMap::recursion_count_ = 0; // GUARDED_BY(owner_lock_)
+pthread_t MemoryRegionMap::lock_owner_tid_; // GUARDED_BY(owner_lock_)
// ========================================================================= //
@@ -86,12 +156,16 @@ static inline bool current_thread_is(pthread_t should_be) {
// ========================================================================= //
+// Constructor-less place-holder to store a RegionSet in.
union MemoryRegionMap::RegionSetRep {
char rep[sizeof(RegionSet)];
- void* align_it;
+ void* align_it; // do not need a better alignment for 'rep' than this
+ RegionSet* region_set() { return reinterpret_cast<RegionSet*>(rep); }
};
-// The bytes where MemoryRegionMap::regions_ will point to
+// The bytes where MemoryRegionMap::regions_ will point to.
+// We use RegionSetRep with noop c-tor so that global construction
+// does not interfere.
static MemoryRegionMap::RegionSetRep regions_rep;
// ========================================================================= //
@@ -100,22 +174,42 @@ static MemoryRegionMap::RegionSetRep regions_rep;
// (or rather should we *not* use regions_ to record a hooked mmap).
static bool recursive_insert = false;
-void MemoryRegionMap::Init() {
+void MemoryRegionMap::Init(int max_stack_depth) {
RAW_VLOG(2, "MemoryRegionMap Init");
+ RAW_CHECK(max_stack_depth >= 0, "");
+ // Make sure we don't overflow the memory in region stacks:
+ RAW_CHECK(max_stack_depth <= kMaxStackDepth,
+ "need to increase kMaxStackDepth?");
Lock();
- if (have_initialized_) {
+ client_count_ += 1;
+ max_stack_depth_ = max(max_stack_depth_, max_stack_depth);
+ if (client_count_ > 1) {
+ // not first client: already did initialization-proper
Unlock();
+ RAW_VLOG(2, "MemoryRegionMap Init increment done");
return;
}
- MallocHook::SetMmapHook(MmapHook);
- MallocHook::SetMremapHook(MremapHook);
- MallocHook::SetSbrkHook(SbrkHook);
- recursive_insert = true; // to buffer the mmap info caused by NewArena
+ // Set our hooks and make sure no other hooks existed:
+ if (MallocHook::SetMmapHook(MmapHook) != NULL ||
+ MallocHook::SetMremapHook(MremapHook) != NULL ||
+ MallocHook::SetSbrkHook(SbrkHook) != NULL ||
+ MallocHook::SetMunmapHook(MunmapHook) != NULL) {
+ RAW_LOG(FATAL, "Had other mmap/mremap/munmap/sbrk MallocHook-s set. "
+ "Make sure only one of MemoryRegionMap and the other "
+ "client is active.");
+ }
+ // We need to set recursive_insert since the NewArena call itself
+ // will already do some allocations with mmap which our hooks will catch
+ // recursive_insert allows us to buffer info about these mmap calls.
+ // Note that Init() can be (and is) sometimes called
+ // already from within an mmap/sbrk hook.
+ recursive_insert = true;
arena_ = LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());
recursive_insert = false;
HandleSavedRegionsLocked(&InsertRegionLocked); // flush the buffered ones
- MallocHook::SetMunmapHook(MunmapHook);
- have_initialized_ = true;
+ // Can't instead use HandleSavedRegionsLocked(&DoInsertRegionLocked) before
+ // recursive_insert = false; as InsertRegionLocked will also construct
+ // regions_ on demand for us.
Unlock();
RAW_VLOG(2, "MemoryRegionMap Init done");
}
@@ -123,8 +217,14 @@ void MemoryRegionMap::Init() {
bool MemoryRegionMap::Shutdown() {
RAW_VLOG(2, "MemoryRegionMap Shutdown");
Lock();
- RAW_CHECK(have_initialized_, "");
- CheckMallocHooks();
+ RAW_CHECK(client_count_ > 0, "");
+ client_count_ -= 1;
+ if (client_count_ != 0) { // not last client; need not really shutdown
+ Unlock();
+ RAW_VLOG(2, "MemoryRegionMap Shutdown decrement done");
+ return true;
+ }
+ CheckMallocHooks(); // we assume no other hooks
MallocHook::SetMmapHook(NULL);
MallocHook::SetMremapHook(NULL);
MallocHook::SetSbrkHook(NULL);
@@ -137,7 +237,6 @@ bool MemoryRegionMap::Shutdown() {
} else {
RAW_LOG(WARNING, "Can't delete LowLevelAlloc arena: it's being used");
}
- have_initialized_ = false;
Unlock();
RAW_VLOG(2, "MemoryRegionMap Shutdown done");
return deleted_arena;
@@ -148,65 +247,109 @@ void MemoryRegionMap::CheckMallocHooks() {
MallocHook::GetMunmapHook() != MunmapHook ||
MallocHook::GetMremapHook() != MremapHook ||
MallocHook::GetSbrkHook() != SbrkHook) {
- RAW_LOG(FATAL, "Some malloc hooks got changed");
+ RAW_LOG(FATAL, "Our mmap/mremap/munmap/sbrk MallocHook-s got changed.");
}
}
+// Invariants (once libpthread_initialized is true):
+// * While lock_ is not held, recursion_count_ is 0 (and
+// lock_owner_tid_ is the previous owner, but we don't rely on
+// that).
+// * recursion_count_ and lock_owner_tid_ are only written while
+// both lock_ and owner_lock_ are held. They may be read under
+// just owner_lock_.
+// * At entry and exit of Lock() and Unlock(), the current thread
+// owns lock_ iff pthread_equal(lock_owner_tid_, pthread_self())
+// && recursion_count_ > 0.
void MemoryRegionMap::Lock() {
- if (recursion_count_ == 0 || !current_thread_is(self_tid_)) {
- lock_.Lock();
+ {
+ SpinLockHolder l(&owner_lock_);
+ if (recursion_count_ > 0 && current_thread_is(lock_owner_tid_)) {
+ RAW_CHECK(lock_.IsHeld(), "Invariants violated");
+ recursion_count_++;
+ RAW_CHECK(recursion_count_ <= 5,
+ "recursive lock nesting unexpectedly deep");
+ return;
+ }
+ }
+ lock_.Lock();
+ {
+ SpinLockHolder l(&owner_lock_);
+ RAW_CHECK(recursion_count_ == 0,
+ "Last Unlock didn't reset recursion_count_");
if (libpthread_initialized)
- self_tid_ = pthread_self();
+ lock_owner_tid_ = pthread_self();
+ recursion_count_ = 1;
}
- recursion_count_++;
- RAW_CHECK(recursion_count_ <= 5, "recursive lock nesting unexpectedly deep");
}
void MemoryRegionMap::Unlock() {
+ SpinLockHolder l(&owner_lock_);
RAW_CHECK(recursion_count_ > 0, "unlock when not held");
- RAW_CHECK(current_thread_is(self_tid_), "unlock by non-holder");
+ RAW_CHECK(lock_.IsHeld(),
+ "unlock when not held, and recursion_count_ is wrong");
+ RAW_CHECK(current_thread_is(lock_owner_tid_), "unlock by non-holder");
recursion_count_--;
if (recursion_count_ == 0) {
lock_.Unlock();
}
}
-bool MemoryRegionMap::LockIsHeldByThisThread() {
- return lock_.IsHeld() && current_thread_is(self_tid_);
+bool MemoryRegionMap::LockIsHeld() {
+ SpinLockHolder l(&owner_lock_);
+ return lock_.IsHeld() && current_thread_is(lock_owner_tid_);
}
-bool MemoryRegionMap::FindStackRegion(uintptr_t stack_top, Region* result) {
- bool found = false;
- Lock();
+const MemoryRegionMap::Region*
+MemoryRegionMap::DoFindRegionLocked(uintptr_t addr) {
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
if (regions_ != NULL) {
Region sample;
- sample.end_addr = stack_top;
+ sample.SetRegionSetKey(addr);
RegionSet::iterator region = regions_->lower_bound(sample);
if (region != regions_->end()) {
- RAW_CHECK(stack_top <= region->end_addr, "");
- if (region->start_addr <= stack_top && stack_top < region->end_addr) {
- RAW_VLOG(2, "Stack at %p is inside region %p..%p",
- reinterpret_cast<void*>(stack_top),
- reinterpret_cast<void*>(region->start_addr),
- reinterpret_cast<void*>(region->end_addr));
- const_cast<Region*>(&*region)->is_stack = true; // now we know
- *result = *region;
- found = true;
+ RAW_CHECK(addr <= region->end_addr, "");
+ if (region->start_addr <= addr && addr < region->end_addr) {
+ return &(*region);
}
}
}
+ return NULL;
+}
+
+bool MemoryRegionMap::FindRegion(uintptr_t addr, Region* result) {
+ Lock();
+ const Region* region = DoFindRegionLocked(addr);
+ if (region != NULL) *result = *region; // create it as an independent copy
Unlock();
- return found;
+ return region != NULL;
+}
+
+bool MemoryRegionMap::FindAndMarkStackRegion(uintptr_t stack_top,
+ Region* result) {
+ Lock();
+ const Region* region = DoFindRegionLocked(stack_top);
+ if (region != NULL) {
+ RAW_VLOG(2, "Stack at %p is inside region %p..%p",
+ reinterpret_cast<void*>(stack_top),
+ reinterpret_cast<void*>(region->start_addr),
+ reinterpret_cast<void*>(region->end_addr));
+ const_cast<Region*>(region)->set_is_stack(); // now we know
+ // cast is safe (set_is_stack does not change the set ordering key)
+ *result = *region; // create *result as an independent copy
+ }
+ Unlock();
+ return region != NULL;
}
MemoryRegionMap::RegionIterator MemoryRegionMap::BeginRegionLocked() {
- RAW_CHECK(LockIsHeldByThisThread(), "should be held (by this thread)");
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
RAW_CHECK(regions_ != NULL, "");
return regions_->begin();
}
MemoryRegionMap::RegionIterator MemoryRegionMap::EndRegionLocked() {
- RAW_CHECK(LockIsHeldByThisThread(), "should be held (by this thread)");
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
RAW_CHECK(regions_ != NULL, "");
return regions_->end();
}
@@ -217,7 +360,7 @@ inline void MemoryRegionMap::DoInsertRegionLocked(const Region& region) {
RAW_CHECK(i == regions_->end() || !region.Overlaps(*i),
"Wow, overlapping memory regions");
Region sample;
- sample.end_addr = region.start_addr;
+ sample.SetRegionSetKey(region.start_addr);
i = regions_->lower_bound(sample);
RAW_CHECK(i == regions_->end() || !region.Overlaps(*i),
"Wow, overlapping memory regions");
@@ -225,7 +368,10 @@ inline void MemoryRegionMap::DoInsertRegionLocked(const Region& region) {
RAW_VLOG(4, "Inserting region %p..%p from %p",
reinterpret_cast<void*>(region.start_addr),
reinterpret_cast<void*>(region.end_addr),
- reinterpret_cast<void*>(region.caller));
+ reinterpret_cast<void*>(region.caller()));
+ region.AssertIsConsistent(); // just making sure
+ // This inserts and allocates permanent storage for region
+ // and its call stack data: it's safe to do it now:
regions_->insert(region);
RAW_VLOG(4, "Inserted region %p..%p :",
reinterpret_cast<void*>(region.start_addr),
@@ -237,48 +383,56 @@ inline void MemoryRegionMap::DoInsertRegionLocked(const Region& region) {
// and MemoryRegionMap::HandleSavedRegionsLocked()
// and are file-level to ensure that they are initialized at load time.
-// No. of unprocessed inserts
+// Number of unprocessed region inserts.
static int saved_regions_count = 0;
+
// Unprocessed inserts (must be big enough to hold all allocations that can
// be caused by a InsertRegionLocked call).
+// Region has no constructor, so that c-tor execution does not interfere
+// with the any-time use of the static memory behind saved_regions.
static MemoryRegionMap::Region saved_regions[10];
inline void MemoryRegionMap::HandleSavedRegionsLocked(
void (*insert_func)(const Region& region)) {
while (saved_regions_count > 0) {
- // Making a copy of the region argument is important:
+ // Making a local-var copy of the region argument to insert_func
+ // including its stack (w/o doing any memory allocations) is important:
// in many cases the memory in saved_regions
// will get written-to during the (*insert_func)(r) call below.
- Region r(saved_regions[--saved_regions_count]);
+ Region r = saved_regions[--saved_regions_count];
(*insert_func)(r);
}
}
inline void MemoryRegionMap::InsertRegionLocked(const Region& region) {
- RAW_CHECK(LockIsHeldByThisThread(), "should be held (by this thread)");
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
// We can be called recursively, because RegionSet constructor
// and DoInsertRegionLocked() (called below) can call the allocator.
// recursive_insert tells us if that's the case. When this happens,
// region insertion information is recorded in saved_regions[],
// and taken into account when the recursion unwinds.
// Do the insert:
- if (recursive_insert) { // recursion
+ if (recursive_insert) { // recursion: save in saved_regions
RAW_VLOG(4, "Saving recursive insert of region %p..%p from %p",
reinterpret_cast<void*>(region.start_addr),
reinterpret_cast<void*>(region.end_addr),
- reinterpret_cast<void*>(region.caller));
+ reinterpret_cast<void*>(region.caller()));
RAW_CHECK(saved_regions_count < arraysize(saved_regions), "");
+ // Copy 'region' to saved_regions[saved_regions_count]
+ // together with the contents of its call_stack,
+ // then increment saved_regions_count.
saved_regions[saved_regions_count++] = region;
} else { // not a recusrive call
if (regions_ == NULL) { // init regions_
RAW_VLOG(4, "Initializing region set");
- regions_ = reinterpret_cast<RegionSet*>(&regions_rep.rep);
+ regions_ = regions_rep.region_set();
recursive_insert = true;
new(regions_) RegionSet();
HandleSavedRegionsLocked(&DoInsertRegionLocked);
recursive_insert = false;
}
recursive_insert = true;
+ // Do the actual insertion work to put new regions into regions_:
DoInsertRegionLocked(region);
HandleSavedRegionsLocked(&DoInsertRegionLocked);
recursive_insert = false;
@@ -294,34 +448,32 @@ static const int kStripFrames = 3;
#endif
void MemoryRegionMap::RecordRegionAddition(const void* start, size_t size) {
+ // Record start/end info about this memory acquisition call in a new region:
Region region;
- // Record data about this memory acquisition call:
- region.start_addr = reinterpret_cast<uintptr_t>(start);
- region.end_addr = region.start_addr + size;
- region.is_stack = false;
- void* caller;
+ region.Create(start, size);
+ // First get the call stack info into the local varible 'region':
const int depth =
- MallocHook::GetCallerStackTrace(&caller, 1, kStripFrames + 1);
- if (depth != 1) {
- // If we weren't able to get the stack frame, that's ok. This
- // usually happens in recursive calls, when the stack-unwinder
- // calls mmap() which in turn calls the stack-unwinder.
- caller = NULL;
- }
- region.caller = reinterpret_cast<uintptr_t>(caller);
+ max_stack_depth_ > 0
+ ? MallocHook::GetCallerStackTrace(const_cast<void**>(region.call_stack),
+ max_stack_depth_, kStripFrames + 1)
+ : 0;
+ region.set_call_stack_depth(depth); // record stack info fully
RAW_VLOG(2, "New global region %p..%p from %p",
reinterpret_cast<void*>(region.start_addr),
reinterpret_cast<void*>(region.end_addr),
- reinterpret_cast<void*>(region.caller));
+ reinterpret_cast<void*>(region.caller()));
+ // Note: none of the above allocates memory.
Lock(); // recursively lock
InsertRegionLocked(region);
+ // This will (eventually) allocate storage for and copy over the stack data
+ // from region.call_stack_data_ that is pointed by region.call_stack().
Unlock();
}
void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
Lock();
HandleSavedRegionsLocked(&InsertRegionLocked);
- // first handle saved regions if any
+ // first handle adding saved regions if any
uintptr_t start_addr = reinterpret_cast<uintptr_t>(start);
uintptr_t end_addr = start_addr + size;
// subtract start_addr, end_addr from all the regions
@@ -329,11 +481,11 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
reinterpret_cast<void*>(start_addr),
reinterpret_cast<void*>(end_addr),
regions_->size());
- Region start_point;
- memset(&start_point, 0, sizeof(start_point)); // zero out don't-care fields
- start_point.start_addr = start_point.end_addr = start_addr;
- for (RegionSet::iterator region = regions_->lower_bound(start_point);
- region != regions_->end() && region->start_addr < end_addr;
+ Region sample;
+ sample.SetRegionSetKey(start_addr);
+ // Only iterate over the regions that might overlap start_addr..end_addr:
+ for (RegionSet::iterator region = regions_->lower_bound(sample);
+ region != regions_->end() && region->start_addr < end_addr;
/*noop*/) {
RAW_VLOG(5, "Looking at region %p..%p",
reinterpret_cast<void*>(region->start_addr),
@@ -355,20 +507,17 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
// Make another region for the start portion:
// The new region has to be the start portion because we can't
// just modify region->end_addr as it's the sorting key.
- Region r;
- r.start_addr = region->start_addr;
- r.end_addr = start_addr;
- r.caller = region->caller;
- r.is_stack = region->is_stack;
+ Region r = *region;
+ r.set_end_addr(start_addr);
InsertRegionLocked(r);
- // cut region from start
- const_cast<Region*>(&*region)->start_addr = end_addr;
+ // cut *region from start:
+ const_cast<Region&>(*region).set_start_addr(end_addr);
} else if (end_addr > region->start_addr &&
start_addr <= region->start_addr) { // cut from start
RAW_VLOG(4, "Start-chopping region %p..%p",
reinterpret_cast<void*>(region->start_addr),
reinterpret_cast<void*>(region->end_addr));
- const_cast<Region*>(&*region)->start_addr = end_addr;
+ const_cast<Region&>(*region).set_start_addr(end_addr);
} else if (start_addr > region->start_addr &&
start_addr < region->end_addr) { // cut from end
RAW_VLOG(4, "End-chopping region %p..%p",
@@ -376,9 +525,11 @@ void MemoryRegionMap::RecordRegionRemoval(const void* start, size_t size) {
reinterpret_cast<void*>(region->end_addr));
// Can't just modify region->end_addr (it's the sorting key):
Region r = *region;
- r.end_addr = start_addr;
+ r.set_end_addr(start_addr);
RegionSet::iterator d = region;
++region;
+ // It's safe to erase before inserting since r is independent of *d:
+ // r contains an own copy of the call stack:
regions_->erase(d);
InsertRegionLocked(r);
continue;
@@ -449,14 +600,14 @@ void MemoryRegionMap::SbrkHook(const void* result, ptrdiff_t increment) {
}
void MemoryRegionMap::LogAllLocked() {
- RAW_CHECK(LockIsHeldByThisThread(), "should be held (by this thread)");
+ RAW_CHECK(LockIsHeld(), "should be held (by this thread)");
RAW_LOG(INFO, "List of regions:");
uintptr_t previous = 0;
for (RegionSet::const_iterator r = regions_->begin();
r != regions_->end(); ++r) {
RAW_LOG(INFO, "Memory region 0x%"PRIxS"..0x%"PRIxS" "
"from 0x%"PRIxS" stack=%d",
- r->start_addr, r->end_addr, r->caller, r->is_stack);
+ r->start_addr, r->end_addr, r->caller(), r->is_stack);
RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order");
// this must be caused by uncontrolled recursive operations on regions_
previous = r->end_addr;
diff --git a/src/memory_region_map.h b/src/memory_region_map.h
index f6df6af..7b7f052 100644
--- a/src/memory_region_map.h
+++ b/src/memory_region_map.h
@@ -44,27 +44,52 @@
#include "base/spinlock.h"
#include "base/low_level_alloc.h"
-/// TODO(maxim): add a unittest
-
-// Class to collect and query the map of all memory regions
-// in a process that have been created with mmap, munmap, mremap, sbrk.
+// TODO(maxim): add a unittest:
+// execute a bunch of mmaps and compare memory map what strace logs
+// execute a bunch of mmap/munmup and compare memory map with
+// own accounting of what those mmaps generated
+
+// Class to collect and query the map of all memory regions in a process
+// that have been created with mmap, munmap, mremap, sbrk.
+// For each memory region, we keep track of (and provide to users)
+// the stack trace that allocated that memory region.
+// The recorded stack trace depth is bounded by
+// a user-supplied max_stack_depth parameter of Init().
// After initialization with Init()
// (which can happened even before global object constructor execution)
// we collect the map by installing and monitoring MallocHook-s
// to mmap, munmap, mremap, sbrk.
// At any time one can query this map via provided interface.
+// For more details on the design of MemoryRegionMap
+// see the comment at the top of our .cc file.
class MemoryRegionMap {
- public: // interface
+ public:
+
+ // interface ================================================================
+
+ // Every client of MemoryRegionMap must call Init() before first use,
+ // and Shutdown() after last use. This allows us to reference count
+ // this (singleton) class properly.
- // Start up this module (can be called more than once w/o harm).
- // Will install mmap, munmap, mremap, sbrk hooks
+ // Initialize this module to record memory allocation stack traces.
+ // Stack traces that have more than "max_stack_depth" frames
+ // are automatically shrunk to "max_stack_depth" when they are recorded.
+ // Init() can be called more than once w/o harm, largest max_stack_depth
+ // will be the effective one.
+ // It will install mmap, munmap, mremap, sbrk hooks
// and initialize arena_ and our hook and locks, hence one can use
// MemoryRegionMap::Lock()/Unlock() to manage the locks.
// Uses Lock/Unlock inside.
- static void Init();
+ static void Init(int max_stack_depth);
+
+ // Max call stack recording depth supported by Init().
+ // Set it to be high enough for all our clients.
+ static const int kMaxStackDepth = 32;
// Try to shutdown this module undoing what Init() did.
- // Returns iff could do full shutdown.
+ // Returns true iff could do full shutdown (or it was not attempted).
+ // Full shutdown is attempted when the number of Shutdown() calls equals
+ // the number of Init() calls.
static bool Shutdown();
// Check that our hooks are still in place and crash if not.
@@ -72,34 +97,121 @@ class MemoryRegionMap {
static void CheckMallocHooks();
// Locks to protect our internal data structures.
- // These also protect use of arena_
- // if our Init() has been done.
+ // These also protect use of arena_ if our Init() has been done.
// The lock is recursive.
static void Lock();
static void Unlock();
- // Whether the lock is held by this thread.
- static bool LockIsHeldByThisThread();
+
+ // Returns true when the lock is held by this thread (for use in RAW_CHECK-s).
+ static bool LockIsHeld();
+
+ // Locker object that acquires the MemoryRegionMap::Lock
+ // for the duration of its lifetime (a C++ scope).
+ class LockHolder {
+ public:
+ LockHolder() { Lock(); }
+ ~LockHolder() { Unlock(); }
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(LockHolder);
+ };
// A memory region that we know about through malloc_hook-s.
+ // This is essentially an interface through which MemoryRegionMap
+ // exports the collected data to its clients.
struct Region {
uintptr_t start_addr; // region start address
uintptr_t end_addr; // region end address
- uintptr_t caller; // who called this region's allocation function
- // (return address in the stack of the immediate caller)
- // NULL if could not get it
- bool is_stack; // does this region contain a thread's stack
+ int call_stack_depth; // number of caller stack frames that we saved
+ const void* call_stack[kMaxStackDepth]; // caller address stack array
+ // filled to call_stack_depth size
+ bool is_stack; // does this region contain a thread's stack:
+ // a user of MemoryRegionMap supplies this info
+
+ // Convenience accessor for call_stack[0],
+ // i.e. (the program counter of) the immediate caller
+ // of this region's allocation function,
+ // but it also returns NULL when call_stack_depth is 0,
+ // i.e whe we weren't able to get the call stack.
+ // This usually happens in recursive calls, when the stack-unwinder
+ // calls mmap() which in turn calls the stack-unwinder.
+ uintptr_t caller() const {
+ return reinterpret_cast<uintptr_t>(call_stack_depth >= 1
+ ? call_stack[0] : NULL);
+ }
+ // Return true iff this region overlaps region x.
bool Overlaps(const Region& x) const {
return start_addr < x.end_addr && end_addr > x.start_addr;
}
+
+ private: // helpers for MemoryRegionMap
+ friend class MemoryRegionMap;
+
+ // The ways we create Region-s:
+ void Create(const void* start, size_t size) {
+ start_addr = reinterpret_cast<uintptr_t>(start);
+ end_addr = start_addr + size;
+ is_stack = false; // not a stack till marked such
+ call_stack_depth = 0;
+ AssertIsConsistent();
+ }
+ void set_call_stack_depth(int depth) {
+ RAW_DCHECK(call_stack_depth == 0, ""); // only one such set is allowed
+ call_stack_depth = depth;
+ AssertIsConsistent();
+ }
+
+ // The ways we modify Region-s:
+ void set_is_stack() { is_stack = true; }
+ void set_start_addr(uintptr_t addr) {
+ start_addr = addr;
+ AssertIsConsistent();
+ }
+ void set_end_addr(uintptr_t addr) {
+ end_addr = addr;
+ AssertIsConsistent();
+ }
+
+ // Verifies that *this contains consistent data, crashes if not the case.
+ void AssertIsConsistent() const {
+ RAW_DCHECK(start_addr < end_addr, "");
+ RAW_DCHECK(call_stack_depth >= 0 &&
+ call_stack_depth <= kMaxStackDepth, "");
+ }
+
+ // Post-default construction helper to make a Region suitable
+ // for searching in RegionSet regions_.
+ void SetRegionSetKey(uintptr_t addr) {
+ // make sure *this has no usable data:
+ if (DEBUG_MODE) memset(this, 0xFF, sizeof(*this));
+ end_addr = addr;
+ }
+
+ // Note: call_stack[kMaxStackDepth] as a member lets us make Region
+ // a simple self-contained struct with correctly behaving bit-vise copying.
+ // This simplifies the code of this module but wastes some memory:
+ // in most-often use case of this module (leak checking)
+ // only one call_stack element out of kMaxStackDepth is actually needed.
+ // Making the storage for call_stack variable-sized,
+ // substantially complicates memory management for the Region-s:
+ // as they need to be created and manipulated for some time
+ // w/o any memory allocations, yet are also given out to the users.
};
+ // Find the region that covers addr and write its data into *result if found,
+ // in which case *result gets filled so that it stays fully functional
+ // even when the underlying region gets removed from MemoryRegionMap.
+ // Returns success. Uses Lock/Unlock inside.
+ static bool FindRegion(uintptr_t addr, Region* result);
+
// Find the region that contains stack_top, mark that region as
- // a stack region, and write its data into *result.
+ // a stack region, and write its data into *result if found,
+ // in which case *result gets filled so that it stays fully functional
+ // even when the underlying region gets removed from MemoryRegionMap.
// Returns success. Uses Lock/Unlock inside.
- static bool FindStackRegion(uintptr_t stack_top, Region* result);
+ static bool FindAndMarkStackRegion(uintptr_t stack_top, Region* result);
- private: // our internal types
+ private: // our internal types ==============================================
// Region comparator for sorting with STL
struct RegionCmp {
@@ -113,37 +225,43 @@ class MemoryRegionMap {
static void *Allocate(size_t n) {
return LowLevelAlloc::AllocWithArena(n, arena_);
}
- static void Free(void *p) { LowLevelAlloc::Free(p); }
+ static void Free(const void *p) {
+ LowLevelAlloc::Free(const_cast<void*>(p));
+ }
};
// Set of the memory regions
typedef std::set<Region, RegionCmp,
STL_Allocator<Region, MyAllocator> > RegionSet;
- public: // more in-depth interface
+ public: // more in-depth interface ==========================================
// STL iterator with values of Region
typedef RegionSet::const_iterator RegionIterator;
// Return the begin/end iterators to all the regions.
- // Ideally these need Lock/Unlock protection around their whole usage (loop),
- // but LockOther/UnlockOther is usually sufficient:
- // in this case of single-threaded mutability (achieved by (Un)lockOther)
- // via region additions/modifications
- // the loop iterator will still be valid as log as its region
- // has not been deleted and EndRegionLocked should be
+ // These need Lock/Unlock protection around their whole usage (loop).
+ // Even when the same thread causes modifications during such a loop
+ // (which are permitted due to recursive locking)
+ // the loop iterator will still be valid as long as its region
+ // has not been deleted, but EndRegionLocked should be
// re-evaluated whenever the set of regions has changed.
static RegionIterator BeginRegionLocked();
static RegionIterator EndRegionLocked();
- public: // effectively private type
+ // Effectively private type from our .cc =================================
+ // public to let us declare global objects:
+ union RegionSetRep;
+
+ private:
- union RegionSetRep; // in .cc
+ // representation ===========================================================
- private: // representation
+ // Counter of clients of this module that have called Init().
+ static int client_count_;
- // If have initialized this module
- static bool have_initialized_;
+ // Maximal number of caller stack frames to save (>= 0).
+ static int max_stack_depth_;
// Arena used for our allocations in regions_.
static LowLevelAlloc::Arena* arena_;
@@ -158,16 +276,22 @@ class MemoryRegionMap {
// Lock to protect regions_ variable and the data behind.
static SpinLock lock_;
+ // Lock to protect the recursive lock itself.
+ static SpinLock owner_lock_;
// Recursion count for the recursive lock.
static int recursion_count_;
// The thread id of the thread that's inside the recursive lock.
- static pthread_t self_tid_;
+ static pthread_t lock_owner_tid_;
+
+ // helpers ==================================================================
- private: // helpers
+ // Helper for FindRegion and FindAndMarkStackRegion:
+ // returns the region covering 'addr' or NULL; assumes our lock_ is held.
+ static const Region* DoFindRegionLocked(uintptr_t addr);
// Verifying wrapper around regions_->insert(region)
- // To be called for InsertRegionLocked only!
+ // To be called to do InsertRegionLocked's work only!
inline static void DoInsertRegionLocked(const Region& region);
// Handle regions saved by InsertRegionLocked into a tmp static array
// by calling insert_func on them.
diff --git a/src/pprof b/src/pprof
index dba0f7a..2e1276f 100755
--- a/src/pprof
+++ b/src/pprof
@@ -72,7 +72,7 @@ use strict;
use warnings;
use Getopt::Long;
-my $PPROF_VERSION = "0.93";
+my $PPROF_VERSION = "0.97";
# These are the object tools we use, which come from various sources.
# We want to invoke them directly, rather than via users' aliases and/or
@@ -430,11 +430,12 @@ sub Init() {
# Parse profile file/location arguments
foreach my $farg (@ARGV) {
- if ($farg =~ m/(.*)\@([0-9]+)/ ) {
+ if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) {
my $machine = $1;
my $num_machines = $2;
+ my $path = $3;
for (my $i = 0; $i < $num_machines; $i++) {
- unshift(@main::pfile_args, "$i.$machine");
+ unshift(@main::pfile_args, "$i.$machine$path");
}
} else {
unshift(@main::pfile_args, $farg);
@@ -1786,8 +1787,8 @@ sub IsProfileURL {
sub ParseProfileURL {
my $profile_name = shift;
if (defined($profile_name) &&
- $profile_name =~ m,^(http://|)([^/:]+):(\d+)(|/|$PROFILE_PAGE|$HEAP_PAGE|$GROWTH_PAGE|$CONTENTION_PAGE)$,o) {
- return ($2, $3, $4);
+ $profile_name =~ m,^(http://|)([^/:]+):(\d+)(|\@\d+)(|/|$PROFILE_PAGE|$HEAP_PAGE|$GROWTH_PAGE|$CONTENTION_PAGE)$,o) {
+ return ($2, $3, $5);
}
return ();
}
diff --git a/src/stacktrace_libunwind-inl.h b/src/stacktrace_libunwind-inl.h
index 529f309..5eb958d 100644
--- a/src/stacktrace_libunwind-inl.h
+++ b/src/stacktrace_libunwind-inl.h
@@ -116,6 +116,7 @@ int GetStackFrames(void** pcs, int* sizes, int max_depth, int skip_count) {
int n = 0;
unw_cursor_t cursor;
unw_context_t uc;
+ unw_word_t sp = 0, next_sp = 0;
if (!libunwind_lock.TryLock()) {
return 0;
@@ -125,23 +126,28 @@ int GetStackFrames(void** pcs, int* sizes, int max_depth, int skip_count) {
RAW_CHECK(unw_init_local(&cursor, &uc) >= 0, "unw_init_local failed");
skip_count++; // Do not include the "GetStackFrames" frame
+ while (skip_count--) {
+ if (unw_step(&cursor) <= 0 ||
+ unw_get_reg(&cursor, UNW_REG_SP, &next_sp) < 0) {
+ goto out;
+ }
+ }
while (n < max_depth) {
- int ret = unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip);
- if (ret < 0)
+ sp = next_sp;
+ if (unw_get_reg(&cursor, UNW_REG_IP, (unw_word_t *) &ip) < 0)
break;
- if (skip_count > 0) {
- skip_count--;
- } else {
+ if (unw_step(&cursor) <= 0 ||
+ unw_get_reg(&cursor, UNW_REG_SP, &next_sp)) {
+ // We couldn't step any further (possibly because we reached _start).
+ // Provide the last good PC we've got, and get out.
+ sizes[n] = 0;
pcs[n++] = ip;
- }
- ret = unw_step(&cursor);
- if (ret <= 0)
break;
+ }
+ sizes[n] = next_sp - sp;
+ pcs[n++] = ip;
}
-
- // No implementation for finding out the stack frame sizes yet.
- memset(sizes, 0, sizeof(*sizes) * n);
-
+ out:
libunwind_lock.Unlock();
return n;
}
diff --git a/src/stacktrace_x86-inl.h b/src/stacktrace_x86-inl.h
index 794aaf5..af8f529 100644
--- a/src/stacktrace_x86-inl.h
+++ b/src/stacktrace_x86-inl.h
@@ -34,6 +34,12 @@
#include <stdint.h> // for uintptr_t
#include <stdlib.h> // for NULL
+
+#if !defined(WIN32)
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
#include "google/stacktrace.h"
// Given a pointer to a stack frame, locate and return the calling
@@ -67,6 +73,19 @@ static void **NextStackFrame(void **old_sp) {
// last two pages in the address space
if ((uintptr_t)new_sp >= 0xffffe000) return NULL;
#endif
+#if !defined(WIN32)
+ if (!STRICT_UNWINDING) {
+ // Lax sanity checks cause a crash on AMD-based machines with
+ // VDSO-enabled kernels.
+ // Make an extra sanity check to insure new_sp is readable.
+ // Note: NextStackFrame<false>() is only called while the program
+ // is already on its last leg, so it's ok to be slow here.
+ static int page_size = getpagesize();
+ void *new_sp_aligned = (void *)((uintptr_t)new_sp & ~(page_size - 1));
+ if (msync(new_sp_aligned, page_size, MS_ASYNC) == -1)
+ return NULL;
+ }
+#endif
return new_sp;
}
diff --git a/src/tcmalloc.cc b/src/tcmalloc.cc
index a3f8733..0b4e715 100644
--- a/src/tcmalloc.cc
+++ b/src/tcmalloc.cc
@@ -420,12 +420,10 @@ static int NumMoveSize(size_t size) {
static void InitSizeClasses() {
// Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
if (ClassIndex(0) < 0) {
- MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
- abort();
+ CRASH("Invalid class index %d for size 0\n", ClassIndex(0));
}
if (ClassIndex(kMaxSize) >= sizeof(class_array)) {
- MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
- abort();
+ CRASH("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
}
// Compute the size classes we want to use
@@ -475,9 +473,8 @@ static void InitSizeClasses() {
sc++;
}
if (sc != kNumClasses) {
- MESSAGE("wrong number of size classes: found %d instead of %d\n",
- sc, int(kNumClasses));
- abort();
+ CRASH("wrong number of size classes: found %d instead of %d\n",
+ sc, int(kNumClasses));
}
// Initialize the mapping arrays
@@ -494,26 +491,21 @@ static void InitSizeClasses() {
for (size_t size = 0; size <= kMaxSize; size++) {
const int sc = SizeClass(size);
if (sc == 0) {
- MESSAGE("Bad size class %d for %" PRIuS "\n", sc, size);
- abort();
+ CRASH("Bad size class %d for %" PRIuS "\n", sc, size);
}
if (sc > 1 && size <= class_to_size[sc-1]) {
- MESSAGE("Allocating unnecessarily large class %d for %" PRIuS
- "\n", sc, size);
- abort();
+ CRASH("Allocating unnecessarily large class %d for %" PRIuS
+ "\n", sc, size);
}
if (sc >= kNumClasses) {
- MESSAGE("Bad size class %d for %" PRIuS "\n", sc, size);
- abort();
+ CRASH("Bad size class %d for %" PRIuS "\n", sc, size);
}
const size_t s = class_to_size[sc];
if (size > s) {
- MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
- abort();
+ CRASH("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
}
if (s == 0) {
- MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
- abort();
+ CRASH("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
}
}
@@ -596,7 +588,7 @@ class PageHeapAllocator {
if (free_avail_ < kAlignedSize) {
// Need more room
free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
- if (free_area_ == NULL) abort();
+ CHECK_CONDITION(free_area_ != NULL);
free_avail_ = kAllocIncrement;
}
result = free_area_;
diff --git a/src/tests/heap-checker-death_unittest.sh b/src/tests/heap-checker-death_unittest.sh
index 4f4242c..0b4de37 100755
--- a/src/tests/heap-checker-death_unittest.sh
+++ b/src/tests/heap-checker-death_unittest.sh
@@ -155,6 +155,14 @@ Test 20 1 "Exiting .* because of .* leaks$" "" \
Test 20 1 "Exiting .* because of .* leaks$" "" \
HEAP_CHECKER_TEST_TEST_LOOP_LEAK=1 HEAP_CHECKER_TEST_NO_THREADS=1 || exit 9
+# Test that very early log messages are present and controllable:
+Test 60 1 "Starting tracking the heap$" "" \
+ HEAP_CHECKER_TEST_TEST_LEAK=1 HEAP_CHECKER_TEST_NO_THREADS=1 PERFTOOLS_VERBOSE=1 \
+ || exit 9
+Test 60 1 "" "Starting tracking the heap" \
+ HEAP_CHECKER_TEST_TEST_LEAK=1 HEAP_CHECKER_TEST_NO_THREADS=1 PERFTOOLS_VERBOSE=-1 \
+ || exit 10
+
cd / # so we're not in TMPDIR when we delete it
rm -rf $TMPDIR
diff --git a/src/tests/heap-checker_unittest.cc b/src/tests/heap-checker_unittest.cc
index ac38fb0..36ebf81 100644
--- a/src/tests/heap-checker_unittest.cc
+++ b/src/tests/heap-checker_unittest.cc
@@ -373,7 +373,7 @@ static void DoDeAllocHidden(void** ptr) {
void* p = *ptr;
VLOG(2) << "Deallocating hidden " << p;
UnHide(&p);
- delete [] (char*)p;
+ delete [] reinterpret_cast<char*>(p);
}
static void DeAllocHidden(void** ptr) {
@@ -435,8 +435,8 @@ static void TestHeapLeakCheckerDeathSimple() {
static void MakeDeathLoop(void** arr1, void** arr2) {
void** a1 = new(initialized) void*[2];
void** a2 = new(initialized) void*[2];
- a1[1] = (void*)a2;
- a2[1] = (void*)a1;
+ a1[1] = reinterpret_cast<void*>(a2);
+ a2[1] = reinterpret_cast<void*>(a1);
Hide(&a1);
Hide(&a2);
Use(&a1);
@@ -474,7 +474,7 @@ static void TestHeapLeakCheckerDeathInverse() {
LogHidden("Leaking", foo);
DeAllocHidden(&bar);
Pause();
- VerifyLeaks(&check, SAME_HEAP, -150 * int64(sizeof(int)), 0);
+ VerifyLeaks(&check, SAME_HEAP, -150 * static_cast<int64>(sizeof(int)), 0);
DeAllocHidden(&foo);
}
@@ -672,10 +672,10 @@ static void ThreadDisabledLeaks() {
if (FLAGS_no_threads) return;
pthread_t tid;
pthread_attr_t attr;
- CHECK(pthread_attr_init(&attr) == 0);
- CHECK(pthread_create(&tid, &attr, RunDisabledLeaks, NULL) == 0);
+ CHECK_EQ(pthread_attr_init(&attr), 0);
+ CHECK_EQ(pthread_create(&tid, &attr, RunDisabledLeaks, NULL), 0);
void* res;
- CHECK(pthread_join(tid, &res) == 0);
+ CHECK_EQ(pthread_join(tid, &res), 0);
}
// different disabled leaks (some in threads)
@@ -764,9 +764,9 @@ static void TestSTLAllocInverse() {
template<class Alloc>
static void DirectTestSTLAlloc(Alloc allocator, const char* name) {
HeapLeakChecker check((string("direct_stl-") + name).c_str());
- const int size = 1000;
- typename Alloc::pointer ptrs[size];
- for (int i = 0; i < size; ++i) {
+ static const int kSize = 1000;
+ typename Alloc::pointer ptrs[kSize];
+ for (int i = 0; i < kSize; ++i) {
typename Alloc::pointer p = allocator.allocate(i*3+1);
HeapLeakChecker::IgnoreObject(p);
// This will crash if p is not known to heap profiler:
@@ -774,7 +774,7 @@ static void DirectTestSTLAlloc(Alloc allocator, const char* name) {
HeapLeakChecker::UnIgnoreObject(p);
ptrs[i] = p;
}
- for (int i = 0; i < size; ++i) {
+ for (int i = 0; i < kSize; ++i) {
allocator.deallocate(ptrs[i], i*3+1);
ptrs[i] = NULL;
}
@@ -786,7 +786,7 @@ static const int kKeys = 50;
static pthread_key_t key[kKeys];
static void KeyFree(void* ptr) {
- delete [] (char*)ptr;
+ delete [] reinterpret_cast<char*>(ptr);
}
static bool key_init_has_run = false;
@@ -929,7 +929,7 @@ static void RunHeapBusyThreads() {
pthread_t tid;
pthread_attr_t attr;
- CHECK(pthread_attr_init(&attr) == 0);
+ CHECK_EQ(pthread_attr_init(&attr), 0);
// make them and let them run
for (int i = 0; i < n; ++i) {
VLOG(0) << "Creating extra thread " << i + 1;
@@ -1018,10 +1018,10 @@ static void ThreadNamedDisabledLeaks() {
if (FLAGS_no_threads) return;
pthread_t tid;
pthread_attr_t attr;
- CHECK(pthread_attr_init(&attr) == 0);
- CHECK(pthread_create(&tid, &attr, RunNamedDisabledLeaks, NULL) == 0);
+ CHECK_EQ(pthread_attr_init(&attr), 0);
+ CHECK_EQ(pthread_create(&tid, &attr, RunNamedDisabledLeaks, NULL), 0);
void* res;
- CHECK(pthread_join(tid, &res) == 0);
+ CHECK_EQ(pthread_join(tid, &res), 0);
}
// test leak disabling via function names
@@ -1092,7 +1092,7 @@ static void TestPointerReach(ObjMakerFunc obj_maker) {
HeapLeakChecker::IgnoreObject(obj);
HeapLeakChecker::UnIgnoreObject(obj); // test UnIgnoreObject
HeapLeakChecker::IgnoreObject(obj); // not to need deletion for obj
-
+
live_objects->push_back(obj_maker()); // test reachability at leak check
}
@@ -1181,7 +1181,7 @@ REGISTER_OBJ_MAKER(set,
class ClassA {
public:
- ClassA(int a) : ptr(NULL) { }
+ explicit ClassA(int a) : ptr(NULL) { }
mutable char* ptr;
};
static const ClassA live_leak_mutable(1);
@@ -1189,7 +1189,7 @@ static const ClassA live_leak_mutable(1);
template<class C>
class TClass {
public:
- TClass(int a) : ptr(NULL) { }
+ explicit TClass(int a) : ptr(NULL) { }
mutable C val;
mutable C* ptr;
};
@@ -1285,16 +1285,16 @@ class ClassMltD2 : public InterfaceA, public InterfaceB, public ClassB {
// to specifically test heap reachability under
// inerface-only multiple inheritance (some use inside-object pointers):
-REGISTER_OBJ_MAKER(MltD1, ClassMltD1* p = new (initialized) ClassMltD1;)
-REGISTER_OBJ_MAKER(MltD1_as_B, ClassB* p = new (initialized) ClassMltD1;)
-REGISTER_OBJ_MAKER(MltD1_as_IA, InterfaceA* p = new (initialized) ClassMltD1;)
-REGISTER_OBJ_MAKER(MltD1_as_IB, InterfaceB* p = new (initialized) ClassMltD1;)
-REGISTER_OBJ_MAKER(MltD1_as_IC, InterfaceC* p = new (initialized) ClassMltD1;)
+REGISTER_OBJ_MAKER(MltD1, ClassMltD1* p = new(initialized) ClassMltD1;)
+REGISTER_OBJ_MAKER(MltD1_as_B, ClassB* p = new(initialized) ClassMltD1;)
+REGISTER_OBJ_MAKER(MltD1_as_IA, InterfaceA* p = new(initialized) ClassMltD1;)
+REGISTER_OBJ_MAKER(MltD1_as_IB, InterfaceB* p = new(initialized) ClassMltD1;)
+REGISTER_OBJ_MAKER(MltD1_as_IC, InterfaceC* p = new(initialized) ClassMltD1;)
-REGISTER_OBJ_MAKER(MltD2, ClassMltD2* p = new (initialized) ClassMltD2;)
-REGISTER_OBJ_MAKER(MltD2_as_B, ClassB* p = new (initialized) ClassMltD2;)
-REGISTER_OBJ_MAKER(MltD2_as_IA, InterfaceA* p = new (initialized) ClassMltD2;)
-REGISTER_OBJ_MAKER(MltD2_as_IB, InterfaceB* p = new (initialized) ClassMltD2;)
+REGISTER_OBJ_MAKER(MltD2, ClassMltD2* p = new(initialized) ClassMltD2;)
+REGISTER_OBJ_MAKER(MltD2_as_B, ClassB* p = new(initialized) ClassMltD2;)
+REGISTER_OBJ_MAKER(MltD2_as_IA, InterfaceA* p = new(initialized) ClassMltD2;)
+REGISTER_OBJ_MAKER(MltD2_as_IB, InterfaceB* p = new(initialized) ClassMltD2;)
// to mimic UnicodeString defined in third_party/icu,
// which store a platform-independent-sized refcount in the first
@@ -1354,7 +1354,7 @@ static void* Mmapper(uintptr_t* addr_after_mmap_call) {
}
// to trick complier into preventing inlining
-static void* (*mmapper_addr)(uintptr_t*) = &Mmapper;
+static void* (*mmapper_addr)(uintptr_t* addr) = &Mmapper;
// TODO(maxim): copy/move this to memory_region_map_unittest
// TODO(maxim): expand this test to include mmap64, mremap and sbrk calls.
@@ -1362,16 +1362,16 @@ static void VerifyMemoryRegionMapStackGet() {
uintptr_t caller_addr_limit;
void* addr = (*mmapper_addr)(&caller_addr_limit);
uintptr_t caller = 0;
- MemoryRegionMap::Lock();
- for (MemoryRegionMap::RegionIterator
- i = MemoryRegionMap::BeginRegionLocked();
- i != MemoryRegionMap::EndRegionLocked(); ++i) {
- if (i->start_addr == reinterpret_cast<uintptr_t>(addr)) {
- CHECK(caller == 0);
- caller = i->caller;
+ { MemoryRegionMap::LockHolder l;
+ for (MemoryRegionMap::RegionIterator
+ i = MemoryRegionMap::BeginRegionLocked();
+ i != MemoryRegionMap::EndRegionLocked(); ++i) {
+ if (i->start_addr == reinterpret_cast<uintptr_t>(addr)) {
+ CHECK_EQ(caller, 0);
+ caller = i->caller();
+ }
}
}
- MemoryRegionMap::Unlock();
// caller must point into Mmapper function:
if (!(reinterpret_cast<uintptr_t>(mmapper_addr) <= caller &&
caller < caller_addr_limit)) {
@@ -1395,7 +1395,7 @@ static void* Mallocer(uintptr_t* addr_after_malloc_call) {
}
// to trick complier into preventing inlining
-static void* (*mallocer_addr)(uintptr_t*) = &Mallocer;
+static void* (*mallocer_addr)(uintptr_t* addr) = &Mallocer;
// non-static for friendship with HeapProfiler
// TODO(maxim): expand this test to include
@@ -1535,7 +1535,8 @@ int main(int argc, char** argv) {
// Test that various STL allocators work. Some of these are redundant, but
// we don't know how STL might change in the future. For example,
// http://wiki/Main/StringNeStdString.
-#define DTSL(a) { DirectTestSTLAlloc(a, #a); Pause(); }
+#define DTSL(a) { DirectTestSTLAlloc(a, #a); \
+ Pause(); }
DTSL(std::allocator<char>());
DTSL(std::allocator<int>());
DTSL(std::string().get_allocator());
diff --git a/src/tests/heap-profiler_unittest.cc b/src/tests/heap-profiler_unittest.cc
index 472c845..9828cdd 100644
--- a/src/tests/heap-profiler_unittest.cc
+++ b/src/tests/heap-profiler_unittest.cc
@@ -46,7 +46,7 @@
#include <sys/wait.h> // for wait()
#include <google/heap-profiler.h>
-const static int kMaxCount = 100000;
+static const int kMaxCount = 100000;
int* g_array[kMaxCount]; // an array of int-vectors
static void Allocate(int start, int end, int size) {
diff --git a/src/tests/low_level_alloc_unittest.cc b/src/tests/low_level_alloc_unittest.cc
index c1de5ae..8029419 100644
--- a/src/tests/low_level_alloc_unittest.cc
+++ b/src/tests/low_level_alloc_unittest.cc
@@ -195,7 +195,7 @@ int main(int argc, char *argv[]) {
}
}
printf("PASS\n");
- MallocHook::SetNewHook(old_alloc_hook);
- MallocHook::SetDeleteHook(old_free_hook);
+ CHECK_EQ(MallocHook::SetNewHook(old_alloc_hook), AllocHook);
+ CHECK_EQ(MallocHook::SetDeleteHook(old_free_hook), FreeHook);
return 0;
}
diff --git a/src/tests/maybe_threads_unittest.sh b/src/tests/maybe_threads_unittest.sh
index 6e0e5f9..f2ba73d 100755
--- a/src/tests/maybe_threads_unittest.sh
+++ b/src/tests/maybe_threads_unittest.sh
@@ -67,6 +67,10 @@ if [ -e "$UNITTEST_DIR/libtcmalloc_minimal.so" ]; then
LIB_PATH="$UNITTEST_DIR/libtcmalloc_minimal.so"
elif [ -e "$UNITTEST_DIR/.libs/libtcmalloc_minimal.so" ]; then
LIB_PATH="$UNITTEST_DIR/.libs/libtcmalloc_minimal.so"
+elif [ -e "$UNITTEST_DIR/libtcmalloc_minimal.dylib" ]; then # for os x
+ LIB_PATH="$UNITTEST_DIR/libtcmalloc_minimal.dylib"
+elif [ -e "$UNITTEST_DIR/.libs/libtcmalloc_minimal.dylib" ]; then
+ LIB_PATH="$UNITTEST_DIR/.libs/libtcmalloc_minimal.dylib"
else
echo "Cannot run $0: cannot find libtcmalloc_minimal.so"
exit 2
diff --git a/src/windows/port.cc b/src/windows/port.cc
index 87aab9a..3aaa206 100644
--- a/src/windows/port.cc
+++ b/src/windows/port.cc
@@ -55,6 +55,8 @@ int safe_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
return _vsnprintf(str, size-1, format, ap);
}
+// mingw defines its own snprintf, though msvc does not
+#ifndef __MINGW32__
int snprintf(char *str, size_t size, const char *format, ...) {
va_list ap;
va_start(ap, format);
@@ -62,6 +64,7 @@ int snprintf(char *str, size_t size, const char *format, ...) {
va_end(ap);
return r;
}
+#endif
int getpagesize() {
static int pagesize = 0;