summaryrefslogtreecommitdiff
path: root/src/tcmalloc.cc
diff options
context:
space:
mode:
authorcsilvers <csilvers@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2008-12-13 01:35:42 +0000
committercsilvers <csilvers@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2008-12-13 01:35:42 +0000
commit6fa2a2574ce1c15ac12293e24691d69a41972e54 (patch)
tree606da4a80de5c91721969ade70c4d9a87cb1604a /src/tcmalloc.cc
parent16191f87ff8dc78295c0f617060460664fc444bd (diff)
downloadgperftools-6fa2a2574ce1c15ac12293e24691d69a41972e54.tar.gz
Thu Dec 11 16:01:32 2008 Google Inc. <opensource@google.com>
* google-perftools: version 1.0rc1 release * Replace API for selectively disabling heap-checker in code (sanjay) * Add a pre-mmap hook (daven, adlr) * Add MallocExtension interface to set memory-releasing rate (fikes) * Augment pprof to allow any string ending in /pprof/profile (csilvers) * PORTING: Rewrite -- and fix -- malloc patching for windows (dvitek) * PORTING: Add nm-pdb and addr2line-pdb for use by pprof (dvitek) * PORTING: Improve cygwin and mingw support (jperkins, csilvers) * PORTING: Fix pprof for mac os x, other pprof improvements (csilvers) * PORTING: Fix some PPC bugs in our locking code (anton.blanchard) * A new unittest, smapling_test, to verify tcmalloc-profiles (csilvers) * Turn off TLS for gcc < 4.1.2, due to a TLS + -fPIC bug (csilvers) * Prefer __builtin_frame_address to assembly for stacktraces (nlewycky) * Separate tcmalloc.cc out into multiple files -- finally! (kash) * Make our locking code work with -fPIC on 32-bit x86 (aruns) * Fix an initialization-ordering bug for tcmalloc/profiling (csilvers) * Use "initial exec" model of TLS to speed up tcmalloc (csilvers) * Enforce 16-byte alignment for tcmalloc, for SSE (sanjay) git-svn-id: http://gperftools.googlecode.com/svn/trunk@60 6b5cf1ce-ec42-a296-1ba9-69fdba395a50
Diffstat (limited to 'src/tcmalloc.cc')
-rw-r--r--src/tcmalloc.cc2730
1 files changed, 332 insertions, 2398 deletions
diff --git a/src/tcmalloc.cc b/src/tcmalloc.cc
index b0d9d04..5d8b225 100644
--- a/src/tcmalloc.cc
+++ b/src/tcmalloc.cc
@@ -109,64 +109,35 @@
#endif
#include <errno.h>
#include <stdarg.h>
-#include "packed-cache-inl.h"
+#include <algorithm>
#include "base/commandlineflags.h"
#include "base/basictypes.h" // gets us PRIu64
#include "base/sysinfo.h"
#include "base/spinlock.h"
+#include "common.h"
#include "malloc_hook-inl.h"
#include <google/malloc_hook.h>
#include <google/malloc_extension.h>
+#include "central_freelist.h"
#include "internal_logging.h"
+#include "linked_list.h"
+#include "maybe_threads.h"
+#include "page_heap.h"
+#include "page_heap_allocator.h"
#include "pagemap.h"
+#include "span.h"
+#include "static_vars.h"
#include "system-alloc.h"
-#include "maybe_threads.h"
-
-// This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
-// you're porting to a system where you really can't get a stacktrace.
-#ifdef NO_TCMALLOC_SAMPLES
- // We use #define so code compiles even if you #include stacktrace.h somehow.
-# define GetStackTrace(stack, depth, skip) (0)
-#else
-# include <google/stacktrace.h>
-#endif
+#include "tcmalloc_guard.h"
+#include "thread_cache.h"
-// Even if we have support for thread-local storage in the compiler
-// and linker, the OS may not support it. We need to check that at
-// runtime. Right now, we have to keep a manual set of "bad" OSes.
-#if defined(HAVE_TLS)
- static bool kernel_supports_tls = false; // be conservative
- static inline bool KernelSupportsTLS() {
- return kernel_supports_tls;
- }
-# if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
- static void CheckIfKernelSupportsTLS() {
- kernel_supports_tls = false;
- }
-# else
-# include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
- static void CheckIfKernelSupportsTLS() {
- struct utsname buf;
- if (uname(&buf) != 0) { // should be impossible
- MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
- kernel_supports_tls = false;
- } else if (strcasecmp(buf.sysname, "linux") == 0) {
- // The linux case: the first kernel to support TLS was 2.6.0
- if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
- kernel_supports_tls = false;
- else if (buf.release[0] == '2' && buf.release[1] == '.' &&
- buf.release[2] >= '0' && buf.release[2] < '6' &&
- buf.release[3] == '.') // 2.0 - 2.5
- kernel_supports_tls = false;
- else
- kernel_supports_tls = true;
- } else { // some other kernel, we'll be optimisitic
- kernel_supports_tls = true;
- }
- // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
- }
-# endif // HAVE_DECL_UNAME
-#endif // HAVE_TLS
+using tcmalloc::PageHeap;
+using tcmalloc::PageHeapAllocator;
+using tcmalloc::SizeMap;
+using tcmalloc::Span;
+using tcmalloc::StackTrace;
+using tcmalloc::Static;
+using tcmalloc::ThreadCache;
// __THROW is defined in glibc systems. It means, counter-intuitively,
// "This function will never throw an exception." It's an optional
@@ -175,98 +146,23 @@
# define __THROW // __THROW is just an optimization, so ok to make it ""
#endif
-//-------------------------------------------------------------------
-// Configuration
-//-------------------------------------------------------------------
-
-// Not all possible combinations of the following parameters make
-// sense. In particular, if kMaxSize increases, you may have to
-// increase kNumClasses as well.
-static const size_t kPageShift = 12;
-static const size_t kPageSize = 1 << kPageShift;
-static const size_t kMaxSize = 8u * kPageSize;
-static const size_t kAlignShift = 3;
-static const size_t kAlignment = 1 << kAlignShift;
-static const size_t kNumClasses = 68;
-
-// Allocates a big block of memory for the pagemap once we reach more than
-// 128MB
-static const size_t kPageMapBigAllocationThreshold = 128 << 20;
-
-// Minimum number of pages to fetch from system at a time. Must be
-// significantly bigger than kBlockSize to amortize system-call
-// overhead, and also to reduce external fragementation. Also, we
-// should keep this value big because various incarnations of Linux
-// have small limits on the number of mmap() regions per
-// address-space.
-static const int kMinSystemAlloc = 1 << (20 - kPageShift);
-
-// Number of objects to move between a per-thread list and a central
-// list in one shot. We want this to be not too small so we can
-// amortize the lock overhead for accessing the central list. Making
-// it too big may temporarily cause unnecessary memory wastage in the
-// per-thread free list until the scavenger cleans up the list.
-static int num_objects_to_move[kNumClasses];
-
-// Maximum length we allow a per-thread free-list to have before we
-// move objects from it into the corresponding central free-list. We
-// want this big to avoid locking the central free-list too often. It
-// should not hurt to make this list somewhat big because the
-// scavenging code will shrink it down when its contents are not in use.
-static const int kMaxFreeListLength = 256;
-
-// Lower and upper bounds on the per-thread cache sizes
-static const size_t kMinThreadCacheSize = kMaxSize * 2;
-static const size_t kMaxThreadCacheSize = 2 << 20;
-
-// Default bound on the total amount of thread caches
-static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
-
-// For all span-lengths < kMaxPages we keep an exact-size list.
-// REQUIRED: kMaxPages >= kMinSystemAlloc;
-static const size_t kMaxPages = kMinSystemAlloc;
-
-/* The smallest prime > 2^n */
-static unsigned int primes_list[] = {
- // Small values might cause high rates of sampling
- // and hence commented out.
- // 2, 5, 11, 17, 37, 67, 131, 257,
- // 521, 1031, 2053, 4099, 8209, 16411,
- 32771, 65537, 131101, 262147, 524309, 1048583,
- 2097169, 4194319, 8388617, 16777259, 33554467 };
-
-// Twice the approximate gap between sampling actions.
-// I.e., we take one sample approximately once every
-// tcmalloc_sample_parameter/2
-// bytes of allocation, i.e., ~ once every 128KB.
-// Must be a prime number.
-#ifdef NO_TCMALLOC_SAMPLES
-DEFINE_int64(tcmalloc_sample_parameter, 0,
- "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
-static size_t sample_period = 0;
+DECLARE_int64(tcmalloc_sample_parameter);
+DECLARE_double(tcmalloc_release_rate);
+
+// For windows, the printf we use to report large allocs is
+// potentially dangerous: it could cause a malloc that would cause an
+// infinite loop. So by default we set the threshold to a huge number
+// on windows, so this bad situation will never trigger. You can
+// always set TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD manually if you
+// want this functionality.
+#ifdef _WIN32
+const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 62;
#else
-DEFINE_int64(tcmalloc_sample_parameter,
- EnvToInt("TCMALLOC_SAMPLE_PARAMETER", 262147),
- "Twice the approximate gap between sampling actions."
- " Must be a prime number. Otherwise will be rounded up to a "
- " larger prime number");
-static size_t sample_period = EnvToInt("TCMALLOC_SAMPLE_PARAMETER", 262147);
+const int64 kDefaultLargeAllocReportThreshold = static_cast<int64>(1) << 30;
#endif
-// Protects sample_period above
-static SpinLock sample_period_lock(SpinLock::LINKER_INITIALIZED);
-
-// Parameters for controlling how fast memory is returned to the OS.
-
-DEFINE_double(tcmalloc_release_rate,
- EnvToDouble("TCMALLOC_RELEASE_RATE", 1.0),
- "Rate at which we release unused memory to the system. "
- "Zero means we never release memory back to the system. "
- "Increase this flag to return memory faster; decrease it "
- "to return memory slower. Reasonable rates are in the "
- "range [0.0,10.0]");
-
DEFINE_int64(tcmalloc_large_alloc_report_threshold,
- EnvToInt64("TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD", 1<<30),
+ EnvToInt64("TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD",
+ kDefaultLargeAllocReportThreshold),
"Allocations larger than this value cause a stack "
"trace to be dumped to stderr. The threshold for "
"dumping stack traces is increased by a factor of 1.125 "
@@ -277,2026 +173,6 @@ DEFINE_int64(tcmalloc_large_alloc_report_threshold,
"is very large and therefore you should see no extra "
"logging unless the flag is overridden.");
-//-------------------------------------------------------------------
-// Mapping from size to size_class and vice versa
-//-------------------------------------------------------------------
-
-// Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
-// array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
-// So for these larger sizes we have an array indexed by ceil(size/128).
-//
-// We flatten both logical arrays into one physical array and use
-// arithmetic to compute an appropriate index. The constants used by
-// ClassIndex() were selected to make the flattening work.
-//
-// Examples:
-// Size Expression Index
-// -------------------------------------------------------
-// 0 (0 + 7) / 8 0
-// 1 (1 + 7) / 8 1
-// ...
-// 1024 (1024 + 7) / 8 128
-// 1025 (1025 + 127 + (120<<7)) / 128 129
-// ...
-// 32768 (32768 + 127 + (120<<7)) / 128 376
-static const int kMaxSmallSize = 1024;
-static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
-static const int add_amount[2] = { 7, 127 + (120 << 7) };
-static unsigned char class_array[377];
-
-// Compute index of the class_array[] entry for a given size
-static inline int ClassIndex(int s) {
- ASSERT(0 <= s);
- ASSERT(s <= kMaxSize);
- const int i = (s > kMaxSmallSize);
- return (s + add_amount[i]) >> shift_amount[i];
-}
-
-// Mapping from size class to max size storable in that class
-static size_t class_to_size[kNumClasses];
-
-// Mapping from size class to number of pages to allocate at a time
-static size_t class_to_pages[kNumClasses];
-
-// TransferCache is used to cache transfers of num_objects_to_move[size_class]
-// back and forth between thread caches and the central cache for a given size
-// class.
-struct TCEntry {
- void *head; // Head of chain of objects.
- void *tail; // Tail of chain of objects.
-};
-// A central cache freelist can have anywhere from 0 to kNumTransferEntries
-// slots to put link list chains into. To keep memory usage bounded the total
-// number of TCEntries across size classes is fixed. Currently each size
-// class is initially given one TCEntry which also means that the maximum any
-// one class can have is kNumClasses.
-static const int kNumTransferEntries = kNumClasses;
-
-// Note: the following only works for "n"s that fit in 32-bits, but
-// that is fine since we only use it for small sizes.
-static inline int LgFloor(size_t n) {
- int log = 0;
- for (int i = 4; i >= 0; --i) {
- int shift = (1 << i);
- size_t x = n >> shift;
- if (x != 0) {
- n = x;
- log += shift;
- }
- }
- ASSERT(n == 1);
- return log;
-}
-
-// Some very basic linked list functions for dealing with using void * as
-// storage.
-
-static inline void *SLL_Next(void *t) {
- return *(reinterpret_cast<void**>(t));
-}
-
-static inline void SLL_SetNext(void *t, void *n) {
- *(reinterpret_cast<void**>(t)) = n;
-}
-
-static inline void SLL_Push(void **list, void *element) {
- SLL_SetNext(element, *list);
- *list = element;
-}
-
-static inline void *SLL_Pop(void **list) {
- void *result = *list;
- *list = SLL_Next(*list);
- return result;
-}
-
-
-// Remove N elements from a linked list to which head points. head will be
-// modified to point to the new head. start and end will point to the first
-// and last nodes of the range. Note that end will point to NULL after this
-// function is called.
-static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
- if (N == 0) {
- *start = NULL;
- *end = NULL;
- return;
- }
-
- void *tmp = *head;
- for (int i = 1; i < N; ++i) {
- tmp = SLL_Next(tmp);
- }
-
- *start = *head;
- *end = tmp;
- *head = SLL_Next(tmp);
- // Unlink range from list.
- SLL_SetNext(tmp, NULL);
-}
-
-static inline void SLL_PushRange(void **head, void *start, void *end) {
- if (!start) return;
- SLL_SetNext(end, *head);
- *head = start;
-}
-
-static inline size_t SLL_Size(void *head) {
- int count = 0;
- while (head) {
- count++;
- head = SLL_Next(head);
- }
- return count;
-}
-
-// Setup helper functions.
-
-static inline int SizeClass(int size) {
- return class_array[ClassIndex(size)];
-}
-
-// Get the byte-size for a specified class
-static inline size_t ByteSizeForClass(size_t cl) {
- return class_to_size[cl];
-}
-
-
-static int NumMoveSize(size_t size) {
- if (size == 0) return 0;
- // Use approx 64k transfers between thread and central caches.
- int num = static_cast<int>(64.0 * 1024.0 / size);
- if (num < 2) num = 2;
- // Clamp well below kMaxFreeListLength to avoid ping pong between central
- // and thread caches.
- if (num > static_cast<int>(0.8 * kMaxFreeListLength))
- num = static_cast<int>(0.8 * kMaxFreeListLength);
-
- // Also, avoid bringing in too many objects into small object free
- // lists. There are lots of such lists, and if we allow each one to
- // fetch too many at a time, we end up having to scavenge too often
- // (especially when there are lots of threads and each thread gets a
- // small allowance for its thread cache).
- //
- // TODO: Make thread cache free list sizes dynamic so that we do not
- // have to equally divide a fixed resource amongst lots of threads.
- if (num > 32) num = 32;
-
- return num;
-}
-
-// Initialize the mapping arrays
-static void InitSizeClasses() {
- // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
- if (ClassIndex(0) < 0) {
- CRASH("Invalid class index %d for size 0\n", ClassIndex(0));
- }
- if (ClassIndex(kMaxSize) >= sizeof(class_array)) {
- CRASH("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
- }
-
- // Compute the size classes we want to use
- int sc = 1; // Next size class to assign
- int alignshift = kAlignShift;
- int last_lg = -1;
- for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
- int lg = LgFloor(size);
- if (lg > last_lg) {
- // Increase alignment every so often.
- //
- // Since we double the alignment every time size doubles and
- // size >= 128, this means that space wasted due to alignment is
- // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
- // bytes, so the space wasted as a percentage starts falling for
- // sizes > 2K.
- if ((lg >= 7) && (alignshift < 8)) {
- alignshift++;
- }
- last_lg = lg;
- }
-
- // Allocate enough pages so leftover is less than 1/8 of total.
- // This bounds wasted space to at most 12.5%.
- size_t psize = kPageSize;
- while ((psize % size) > (psize >> 3)) {
- psize += kPageSize;
- }
- const size_t my_pages = psize >> kPageShift;
-
- if (sc > 1 && my_pages == class_to_pages[sc-1]) {
- // See if we can merge this into the previous class without
- // increasing the fragmentation of the previous class.
- const size_t my_objects = (my_pages << kPageShift) / size;
- const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
- / class_to_size[sc-1];
- if (my_objects == prev_objects) {
- // Adjust last class to include this size
- class_to_size[sc-1] = size;
- continue;
- }
- }
-
- // Add new class
- class_to_pages[sc] = my_pages;
- class_to_size[sc] = size;
- sc++;
- }
- if (sc != kNumClasses) {
- CRASH("wrong number of size classes: found %d instead of %d\n",
- sc, int(kNumClasses));
- }
-
- // Initialize the mapping arrays
- int next_size = 0;
- for (int c = 1; c < kNumClasses; c++) {
- const int max_size_in_class = class_to_size[c];
- for (int s = next_size; s <= max_size_in_class; s += kAlignment) {
- class_array[ClassIndex(s)] = c;
- }
- next_size = max_size_in_class + kAlignment;
- }
-
- // Double-check sizes just to be safe
- for (size_t size = 0; size <= kMaxSize; size++) {
- const int sc = SizeClass(size);
- if (sc <= 0 || sc >= kNumClasses) {
- CRASH("Bad size class %d for %" PRIuS "\n", sc, size);
- }
- if (sc > 1 && size <= class_to_size[sc-1]) {
- CRASH("Allocating unnecessarily large class %d for %" PRIuS
- "\n", sc, size);
- }
- const size_t s = class_to_size[sc];
- if (size > s) {
- CRASH("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
- }
- if (s == 0) {
- CRASH("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
- }
- }
-
- // Initialize the num_objects_to_move array.
- for (size_t cl = 1; cl < kNumClasses; ++cl) {
- num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
- }
-
- if (false) {
- // Dump class sizes and maximum external wastage per size class
- for (size_t cl = 1; cl < kNumClasses; ++cl) {
- const int alloc_size = class_to_pages[cl] << kPageShift;
- const int alloc_objs = alloc_size / class_to_size[cl];
- const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
- const int max_waste = alloc_size - min_used;
- MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
- int(cl),
- int(class_to_size[cl-1] + 1),
- int(class_to_size[cl]),
- int(class_to_pages[cl] << kPageShift),
- max_waste * 100.0 / alloc_size
- );
- }
- }
-}
-
-// -------------------------------------------------------------------------
-// Simple allocator for objects of a specified type. External locking
-// is required before accessing one of these objects.
-// -------------------------------------------------------------------------
-
-// Metadata allocator -- keeps stats about how many bytes allocated
-static uint64_t metadata_system_bytes = 0;
-static void* MetaDataAlloc(size_t bytes) {
- void* result = TCMalloc_SystemAlloc(bytes, NULL);
- if (result != NULL) {
- metadata_system_bytes += bytes;
- }
- return result;
-}
-
-template <class T>
-class PageHeapAllocator {
- private:
- // How much to allocate from system at a time
- static const int kAllocIncrement = 128 << 10;
-
- // Aligned size of T
- static const size_t kAlignedSize
- = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
-
- // Free area from which to carve new objects
- char* free_area_;
- size_t free_avail_;
-
- // Free list of already carved objects
- void* free_list_;
-
- // Number of allocated but unfreed objects
- int inuse_;
-
- public:
- void Init() {
- ASSERT(kAlignedSize <= kAllocIncrement);
- inuse_ = 0;
- free_area_ = NULL;
- free_avail_ = 0;
- free_list_ = NULL;
- // Reserve some space at the beginning to avoid fragmentation.
- Delete(New());
- }
-
- T* New() {
- // Consult free list
- void* result;
- if (free_list_ != NULL) {
- result = free_list_;
- free_list_ = *(reinterpret_cast<void**>(result));
- } else {
- if (free_avail_ < kAlignedSize) {
- // Need more room
- free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
- CHECK_CONDITION(free_area_ != NULL);
- free_avail_ = kAllocIncrement;
- }
- result = free_area_;
- free_area_ += kAlignedSize;
- free_avail_ -= kAlignedSize;
- }
- inuse_++;
- return reinterpret_cast<T*>(result);
- }
-
- void Delete(T* p) {
- *(reinterpret_cast<void**>(p)) = free_list_;
- free_list_ = p;
- inuse_--;
- }
-
- int inuse() const { return inuse_; }
-};
-
-// -------------------------------------------------------------------------
-// Span - a contiguous run of pages
-// -------------------------------------------------------------------------
-
-// Type that can hold a page number
-typedef uintptr_t PageID;
-
-// Type that can hold the length of a run of pages
-typedef uintptr_t Length;
-
-static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
-
-// Convert byte size into pages. This won't overflow, but may return
-// an unreasonably large value if bytes is huge enough.
-static inline Length pages(size_t bytes) {
- return (bytes >> kPageShift) +
- ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
-}
-
-// Information kept for a span (a contiguous run of pages).
-struct Span {
- PageID start; // Starting page number
- Length length; // Number of pages in span
- Span* next; // Used when in link list
- Span* prev; // Used when in link list
- void* objects; // Linked list of free objects
- unsigned int refcount : 16; // Number of non-free objects
- unsigned int sizeclass : 8; // Size-class for small objects (or 0)
- unsigned int location : 2; // Is the span on a freelist, and if so, which?
- unsigned int sample : 1; // Sampled object?
-
-#undef SPAN_HISTORY
-#ifdef SPAN_HISTORY
- // For debugging, we can keep a log events per span
- int nexthistory;
- char history[64];
- int value[64];
-#endif
-
- // What freelist the span is on: IN_USE if on none, or normal or returned
- enum { IN_USE, ON_NORMAL_FREELIST, ON_RETURNED_FREELIST };
-};
-
-#ifdef SPAN_HISTORY
-void Event(Span* span, char op, int v = 0) {
- span->history[span->nexthistory] = op;
- span->value[span->nexthistory] = v;
- span->nexthistory++;
- if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
-}
-#else
-#define Event(s,o,v) ((void) 0)
-#endif
-
-// Allocator/deallocator for spans
-static PageHeapAllocator<Span> span_allocator;
-static Span* NewSpan(PageID p, Length len) {
- Span* result = span_allocator.New();
- memset(result, 0, sizeof(*result));
- result->start = p;
- result->length = len;
-#ifdef SPAN_HISTORY
- result->nexthistory = 0;
-#endif
- return result;
-}
-
-static void DeleteSpan(Span* span) {
-#ifndef NDEBUG
- // In debug mode, trash the contents of deleted Spans
- memset(span, 0x3f, sizeof(*span));
-#endif
- span_allocator.Delete(span);
-}
-
-// -------------------------------------------------------------------------
-// Doubly linked list of spans.
-// -------------------------------------------------------------------------
-
-static void DLL_Init(Span* list) {
- list->next = list;
- list->prev = list;
-}
-
-static void DLL_Remove(Span* span) {
- span->prev->next = span->next;
- span->next->prev = span->prev;
- span->prev = NULL;
- span->next = NULL;
-}
-
-static inline bool DLL_IsEmpty(const Span* list) {
- return list->next == list;
-}
-
-static int DLL_Length(const Span* list) {
- int result = 0;
- for (Span* s = list->next; s != list; s = s->next) {
- result++;
- }
- return result;
-}
-
-#if 0 /* Not needed at the moment -- causes compiler warnings if not used */
-static void DLL_Print(const char* label, const Span* list) {
- MESSAGE("%-10s %p:", label, list);
- for (const Span* s = list->next; s != list; s = s->next) {
- MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
- }
- MESSAGE("\n");
-}
-#endif
-
-static void DLL_Prepend(Span* list, Span* span) {
- ASSERT(span->next == NULL);
- ASSERT(span->prev == NULL);
- span->next = list->next;
- span->prev = list;
- list->next->prev = span;
- list->next = span;
-}
-
-// -------------------------------------------------------------------------
-// Stack traces kept for sampled allocations
-// The following state is protected by pageheap_lock_.
-// -------------------------------------------------------------------------
-
-// size/depth are made the same size as a pointer so that some generic
-// code below can conveniently cast them back and forth to void*.
-static const int kMaxStackDepth = 31;
-struct StackTrace {
- uintptr_t size; // Size of object
- uintptr_t depth; // Number of PC values stored in array below
- void* stack[kMaxStackDepth];
-};
-static PageHeapAllocator<StackTrace> stacktrace_allocator;
-static Span sampled_objects;
-
-// Linked list of stack traces recorded every time we allocated memory
-// from the system. Useful for finding allocation sites that cause
-// increase in the footprint of the system. The linked list pointer
-// is stored in trace->stack[kMaxStackDepth-1].
-static StackTrace* growth_stacks = NULL;
-
-// -------------------------------------------------------------------------
-// Map from page-id to per-page data
-// -------------------------------------------------------------------------
-
-// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
-// We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
-// because sometimes the sizeclass is all the information we need.
-
-// Selector class -- general selector uses 3-level map
-template <int BITS> class MapSelector {
- public:
- typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
- typedef PackedCache<BITS-kPageShift, uint64_t> CacheType;
-};
-
-// A two-level map for 32-bit machines
-template <> class MapSelector<32> {
- public:
- typedef TCMalloc_PageMap2<32-kPageShift> Type;
- typedef PackedCache<32-kPageShift, uint16_t> CacheType;
-};
-
-// -------------------------------------------------------------------------
-// Page-level allocator
-// * Eager coalescing
-//
-// Heap for page-level allocation. We allow allocating and freeing a
-// contiguous runs of pages (called a "span").
-// -------------------------------------------------------------------------
-
-class TCMalloc_PageHeap {
- public:
- TCMalloc_PageHeap();
-
- // Allocate a run of "n" pages. Returns zero if out of memory.
- // Caller should not pass "n == 0" -- instead, n should have
- // been rounded up already.
- Span* New(Length n);
-
- // Delete the span "[p, p+n-1]".
- // REQUIRES: span was returned by earlier call to New() and
- // has not yet been deleted.
- void Delete(Span* span);
-
- // Mark an allocated span as being used for small objects of the
- // specified size-class.
- // REQUIRES: span was returned by an earlier call to New()
- // and has not yet been deleted.
- void RegisterSizeClass(Span* span, size_t sc);
-
- // Split an allocated span into two spans: one of length "n" pages
- // followed by another span of length "span->length - n" pages.
- // Modifies "*span" to point to the first span of length "n" pages.
- // Returns a pointer to the second span.
- //
- // REQUIRES: "0 < n < span->length"
- // REQUIRES: span->location == IN_USE
- // REQUIRES: span->sizeclass == 0
- Span* Split(Span* span, Length n);
-
- // Return the descriptor for the specified page.
- inline Span* GetDescriptor(PageID p) const {
- return reinterpret_cast<Span*>(pagemap_.get(p));
- }
-
- // Dump state to stderr
- void Dump(TCMalloc_Printer* out);
-
- // Return number of bytes allocated from system
- inline uint64_t SystemBytes() const { return system_bytes_; }
-
- // Return number of free bytes in heap
- uint64_t FreeBytes() const {
- return (static_cast<uint64_t>(free_pages_) << kPageShift);
- }
-
- bool Check();
- bool CheckList(Span* list, Length min_pages, Length max_pages,
- int freelist); // ON_NORMAL_FREELIST or ON_RETURNED_FREELIST
-
- // Release all pages on the free list for reuse by the OS:
- void ReleaseFreePages();
-
- // Return 0 if we have no information, or else the correct sizeclass for p.
- // Reads and writes to pagemap_cache_ do not require locking.
- // The entries are 64 bits on 64-bit hardware and 16 bits on
- // 32-bit hardware, and we don't mind raciness as long as each read of
- // an entry yields a valid entry, not a partially updated entry.
- size_t GetSizeClassIfCached(PageID p) const {
- return pagemap_cache_.GetOrDefault(p, 0);
- }
- void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
-
- private:
- // Pick the appropriate map and cache types based on pointer size
- typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
- typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
- PageMap pagemap_;
- mutable PageMapCache pagemap_cache_;
-
- // We segregate spans of a given size into two circular linked
- // lists: one for normal spans, and one for spans whose memory
- // has been returned to the system.
- struct SpanList {
- Span normal;
- Span returned;
- };
-
- // List of free spans of length >= kMaxPages
- SpanList large_;
-
- // Array mapping from span length to a doubly linked list of free spans
- SpanList free_[kMaxPages];
-
- // Number of pages kept in free lists
- uintptr_t free_pages_;
-
- // Bytes allocated from system
- uint64_t system_bytes_;
-
- bool GrowHeap(Length n);
-
- // REQUIRES: span->length >= n
- // REQUIRES: span->location != IN_USE
- // Remove span from its free list, and move any leftover part of
- // span into appropriate free lists. Also update "span" to have
- // length exactly "n" and mark it as non-free so it can be returned
- // to the client. After all that, decrease free_pages_ by n and
- // return span.
- Span* Carve(Span* span, Length n);
-
- void RecordSpan(Span* span) {
- pagemap_.set(span->start, span);
- if (span->length > 1) {
- pagemap_.set(span->start + span->length - 1, span);
- }
- }
-
- // Allocate a large span of length == n. If successful, returns a
- // span of exactly the specified length. Else, returns NULL.
- Span* AllocLarge(Length n);
-
- // Incrementally release some memory to the system.
- // IncrementalScavenge(n) is called whenever n pages are freed.
- void IncrementalScavenge(Length n);
-
- // Number of pages to deallocate before doing more scavenging
- int64_t scavenge_counter_;
-
- // Index of last free list we scavenged
- int scavenge_index_;
-};
-
-TCMalloc_PageHeap::TCMalloc_PageHeap()
- : pagemap_(MetaDataAlloc),
- pagemap_cache_(0),
- free_pages_(0),
- system_bytes_(0),
- scavenge_counter_(0),
- // Start scavenging at kMaxPages list
- scavenge_index_(kMaxPages-1) {
- COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
- DLL_Init(&large_.normal);
- DLL_Init(&large_.returned);
- for (int i = 0; i < kMaxPages; i++) {
- DLL_Init(&free_[i].normal);
- DLL_Init(&free_[i].returned);
- }
-}
-
-Span* TCMalloc_PageHeap::New(Length n) {
- ASSERT(Check());
- ASSERT(n > 0);
-
- // Find first size >= n that has a non-empty list
- for (Length s = n; s < kMaxPages; s++) {
- Span* ll = &free_[s].normal;
- // If we're lucky, ll is non-empty, meaning it has a suitable span.
- if (!DLL_IsEmpty(ll)) {
- ASSERT(ll->next->location == Span::ON_NORMAL_FREELIST);
- return Carve(ll->next, n);
- }
- // Alternatively, maybe there's a usable returned span.
- ll = &free_[s].returned;
- if (!DLL_IsEmpty(ll)) {
- ASSERT(ll->next->location == Span::ON_RETURNED_FREELIST);
- return Carve(ll->next, n);
- }
- // Still no luck, so keep looking in larger classes.
- }
-
- Span* result = AllocLarge(n);
- if (result != NULL) return result;
-
- // Grow the heap and try again
- if (!GrowHeap(n)) {
- ASSERT(Check());
- return NULL;
- }
-
- return AllocLarge(n);
-}
-
-Span* TCMalloc_PageHeap::AllocLarge(Length n) {
- // find the best span (closest to n in size).
- // The following loops implements address-ordered best-fit.
- Span *best = NULL;
-
- // Search through normal list
- for (Span* span = large_.normal.next;
- span != &large_.normal;
- span = span->next) {
- if (span->length >= n) {
- if ((best == NULL)
- || (span->length < best->length)
- || ((span->length == best->length) && (span->start < best->start))) {
- best = span;
- ASSERT(best->location == Span::ON_NORMAL_FREELIST);
- }
- }
- }
-
- // Search through released list in case it has a better fit
- for (Span* span = large_.returned.next;
- span != &large_.returned;
- span = span->next) {
- if (span->length >= n) {
- if ((best == NULL)
- || (span->length < best->length)
- || ((span->length == best->length) && (span->start < best->start))) {
- best = span;
- ASSERT(best->location == Span::ON_RETURNED_FREELIST);
- }
- }
- }
-
- return best == NULL ? NULL : Carve(best, n);
-}
-
-Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
- ASSERT(0 < n);
- ASSERT(n < span->length);
- ASSERT(span->location == Span::IN_USE);
- ASSERT(span->sizeclass == 0);
- Event(span, 'T', n);
-
- const int extra = span->length - n;
- Span* leftover = NewSpan(span->start + n, extra);
- ASSERT(leftover->location == Span::IN_USE);
- Event(leftover, 'U', extra);
- RecordSpan(leftover);
- pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
- span->length = n;
-
- return leftover;
-}
-
-Span* TCMalloc_PageHeap::Carve(Span* span, Length n) {
- ASSERT(n > 0);
- ASSERT(span->location != Span::IN_USE);
- const int old_location = span->location;
- DLL_Remove(span);
- span->location = Span::IN_USE;
- Event(span, 'A', n);
-
- const int extra = span->length - n;
- ASSERT(extra >= 0);
- if (extra > 0) {
- Span* leftover = NewSpan(span->start + n, extra);
- leftover->location = old_location;
- Event(leftover, 'S', extra);
- RecordSpan(leftover);
-
- // Place leftover span on appropriate free list
- SpanList* listpair = (extra < kMaxPages) ? &free_[extra] : &large_;
- Span* dst = (leftover->location == Span::ON_RETURNED_FREELIST
- ? &listpair->returned : &listpair->normal);
- DLL_Prepend(dst, leftover);
-
- span->length = n;
- pagemap_.set(span->start + n - 1, span);
- }
- ASSERT(Check());
- free_pages_ -= n;
- return span;
-}
-
-void TCMalloc_PageHeap::Delete(Span* span) {
- ASSERT(Check());
- ASSERT(span->location == Span::IN_USE);
- ASSERT(span->length > 0);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start + span->length - 1) == span);
- span->sizeclass = 0;
- span->sample = 0;
-
- // Coalesce -- we guarantee that "p" != 0, so no bounds checking
- // necessary. We do not bother resetting the stale pagemap
- // entries for the pieces we are merging together because we only
- // care about the pagemap entries for the boundaries.
- //
- // Note that the spans we merge into "span" may come out of
- // a "returned" list. For simplicity, we move these into the
- // "normal" list of the appropriate size class.
- const PageID p = span->start;
- const Length n = span->length;
- Span* prev = GetDescriptor(p-1);
- if (prev != NULL && prev->location != Span::IN_USE) {
- // Merge preceding span into this span
- ASSERT(prev->start + prev->length == p);
- const Length len = prev->length;
- DLL_Remove(prev);
- DeleteSpan(prev);
- span->start -= len;
- span->length += len;
- pagemap_.set(span->start, span);
- Event(span, 'L', len);
- }
- Span* next = GetDescriptor(p+n);
- if (next != NULL && next->location != Span::IN_USE) {
- // Merge next span into this span
- ASSERT(next->start == p+n);
- const Length len = next->length;
- DLL_Remove(next);
- DeleteSpan(next);
- span->length += len;
- pagemap_.set(span->start + span->length - 1, span);
- Event(span, 'R', len);
- }
-
- Event(span, 'D', span->length);
- span->location = Span::ON_NORMAL_FREELIST;
- if (span->length < kMaxPages) {
- DLL_Prepend(&free_[span->length].normal, span);
- } else {
- DLL_Prepend(&large_.normal, span);
- }
- free_pages_ += n;
-
- IncrementalScavenge(n);
- ASSERT(Check());
-}
-
-void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
- // Fast path; not yet time to release memory
- scavenge_counter_ -= n;
- if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
-
- // Never delay scavenging for more than the following number of
- // deallocated pages. With 4K pages, this comes to 4GB of
- // deallocation.
- static const int kMaxReleaseDelay = 1 << 20;
-
- // If there is nothing to release, wait for so many pages before
- // scavenging again. With 4K pages, this comes to 1GB of memory.
- static const int kDefaultReleaseDelay = 1 << 18;
-
- const double rate = FLAGS_tcmalloc_release_rate;
- if (rate <= 1e-6) {
- // Tiny release rate means that releasing is disabled.
- scavenge_counter_ = kDefaultReleaseDelay;
- return;
- }
-
- // Find index of free list to scavenge
- int index = scavenge_index_ + 1;
- for (int i = 0; i < kMaxPages+1; i++) {
- if (index > kMaxPages) index = 0;
- SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
- if (!DLL_IsEmpty(&slist->normal)) {
- // Release the last span on the normal portion of this list
- Span* s = slist->normal.prev;
- ASSERT(s->location == Span::ON_NORMAL_FREELIST);
- DLL_Remove(s);
- TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
- static_cast<size_t>(s->length << kPageShift));
- s->location = Span::ON_RETURNED_FREELIST;
- DLL_Prepend(&slist->returned, s);
-
- // Compute how long to wait until we return memory.
- // FLAGS_tcmalloc_release_rate==1 means wait for 1000 pages
- // after releasing one page.
- const double mult = 1000.0 / rate;
- double wait = mult * static_cast<double>(s->length);
- if (wait > kMaxReleaseDelay) {
- // Avoid overflow and bound to reasonable range
- wait = kMaxReleaseDelay;
- }
- scavenge_counter_ = static_cast<int64_t>(wait);
-
- scavenge_index_ = index; // Scavenge at index+1 next time
- return;
- }
- index++;
- }
-
- // Nothing to scavenge, delay for a while
- scavenge_counter_ = kDefaultReleaseDelay;
-}
-
-void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
- // Associate span object with all interior pages as well
- ASSERT(span->location == Span::IN_USE);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start+span->length-1) == span);
- Event(span, 'C', sc);
- span->sizeclass = sc;
- for (Length i = 1; i < span->length-1; i++) {
- pagemap_.set(span->start+i, span);
- }
-}
-
-static double PagesToMB(uint64_t pages) {
- return (pages << kPageShift) / 1048576.0;
-}
-
-void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
- int nonempty_sizes = 0;
- for (int s = 0; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
- nonempty_sizes++;
- }
- }
- out->printf("------------------------------------------------\n");
- out->printf("PageHeap: %d sizes; %6.1f MB free\n",
- nonempty_sizes, PagesToMB(free_pages_));
- out->printf("------------------------------------------------\n");
- uint64_t total_normal = 0;
- uint64_t total_returned = 0;
- for (int s = 0; s < kMaxPages; s++) {
- const int n_length = DLL_Length(&free_[s].normal);
- const int r_length = DLL_Length(&free_[s].returned);
- if (n_length + r_length > 0) {
- uint64_t n_pages = s * n_length;
- uint64_t r_pages = s * r_length;
- total_normal += n_pages;
- total_returned += r_pages;
- out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
- "; unmapped: %6.1f MB; %6.1f MB cum\n",
- s,
- (n_length + r_length),
- PagesToMB(n_pages + r_pages),
- PagesToMB(total_normal + total_returned),
- PagesToMB(r_pages),
- PagesToMB(total_returned));
- }
- }
-
- uint64_t n_pages = 0;
- uint64_t r_pages = 0;
- int n_spans = 0;
- int r_spans = 0;
- out->printf("Normal large spans:\n");
- for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
- out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
- s->length, PagesToMB(s->length));
- n_pages += s->length;
- n_spans++;
- }
- out->printf("Unmapped large spans:\n");
- for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
- out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
- s->length, PagesToMB(s->length));
- r_pages += s->length;
- r_spans++;
- }
- total_normal += n_pages;
- total_returned += r_pages;
- out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
- "; unmapped: %6.1f MB; %6.1f MB cum\n",
- (n_spans + r_spans),
- PagesToMB(n_pages + r_pages),
- PagesToMB(total_normal + total_returned),
- PagesToMB(r_pages),
- PagesToMB(total_returned));
-}
-
-static void RecordGrowth(size_t growth) {
- StackTrace* t = stacktrace_allocator.New();
- t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
- t->size = growth;
- t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(growth_stacks);
- growth_stacks = t;
-}
-
-bool TCMalloc_PageHeap::GrowHeap(Length n) {
- ASSERT(kMaxPages >= kMinSystemAlloc);
- if (n > kMaxValidPages) return false;
- Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
- size_t actual_size;
- void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
- if (ptr == NULL) {
- if (n < ask) {
- // Try growing just "n" pages
- ask = n;
- ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
- }
- if (ptr == NULL) return false;
- }
- ask = actual_size >> kPageShift;
- RecordGrowth(ask << kPageShift);
-
- uint64_t old_system_bytes = system_bytes_;
- system_bytes_ += (ask << kPageShift);
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- ASSERT(p > 0);
-
- // If we have already a lot of pages allocated, just pre allocate a bunch of
- // memory for the page map. This prevents fragmentation by pagemap metadata
- // when a program keeps allocating and freeing large blocks.
-
- if (old_system_bytes < kPageMapBigAllocationThreshold
- && system_bytes_ >= kPageMapBigAllocationThreshold) {
- pagemap_.PreallocateMoreMemory();
- }
-
- // Make sure pagemap_ has entries for all of the new pages.
- // Plus ensure one before and one after so coalescing code
- // does not need bounds-checking.
- if (pagemap_.Ensure(p-1, ask+2)) {
- // Pretend the new area is allocated and then Delete() it to
- // cause any necessary coalescing to occur.
- //
- // We do not adjust free_pages_ here since Delete() will do it for us.
- Span* span = NewSpan(p, ask);
- RecordSpan(span);
- Delete(span);
- ASSERT(Check());
- return true;
- } else {
- // We could not allocate memory within "pagemap_"
- // TODO: Once we can return memory to the system, return the new span
- return false;
- }
-}
-
-bool TCMalloc_PageHeap::Check() {
- ASSERT(free_[0].normal.next == &free_[0].normal);
- ASSERT(free_[0].returned.next == &free_[0].returned);
- CheckList(&large_.normal, kMaxPages, 1000000000, Span::ON_NORMAL_FREELIST);
- CheckList(&large_.returned, kMaxPages, 1000000000, Span::ON_RETURNED_FREELIST);
- for (Length s = 1; s < kMaxPages; s++) {
- CheckList(&free_[s].normal, s, s, Span::ON_NORMAL_FREELIST);
- CheckList(&free_[s].returned, s, s, Span::ON_RETURNED_FREELIST);
- }
- return true;
-}
-
-bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages,
- int freelist) {
- for (Span* s = list->next; s != list; s = s->next) {
- CHECK_CONDITION(s->location == freelist); // NORMAL or RETURNED
- CHECK_CONDITION(s->length >= min_pages);
- CHECK_CONDITION(s->length <= max_pages);
- CHECK_CONDITION(GetDescriptor(s->start) == s);
- CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
- }
- return true;
-}
-
-static void ReleaseFreeList(Span* list, Span* returned) {
- // Walk backwards through list so that when we push these
- // spans on the "returned" list, we preserve the order.
- while (!DLL_IsEmpty(list)) {
- Span* s = list->prev;
- DLL_Remove(s);
- DLL_Prepend(returned, s);
- ASSERT(s->location == Span::ON_NORMAL_FREELIST);
- s->location = Span::ON_RETURNED_FREELIST;
- TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
- static_cast<size_t>(s->length << kPageShift));
- }
-}
-
-void TCMalloc_PageHeap::ReleaseFreePages() {
- for (Length s = 0; s < kMaxPages; s++) {
- ReleaseFreeList(&free_[s].normal, &free_[s].returned);
- }
- ReleaseFreeList(&large_.normal, &large_.returned);
- ASSERT(Check());
-}
-
-//-------------------------------------------------------------------
-// Free list
-//-------------------------------------------------------------------
-
-class TCMalloc_ThreadCache_FreeList {
- private:
- void* list_; // Linked list of nodes
-
-#ifdef _LP64
- // On 64-bit hardware, manipulating 16-bit values may be slightly slow.
- // Since it won't cost any space, let's make these fields 32 bits each.
- uint32_t length_; // Current length
- uint32_t lowater_; // Low water mark for list length
-#else
- // If we aren't using 64-bit pointers then pack these into less space.
- uint16_t length_;
- uint16_t lowater_;
-#endif
-
- public:
- void Init() {
- list_ = NULL;
- length_ = 0;
- lowater_ = 0;
- }
-
- // Return current length of list
- size_t length() const {
- return length_;
- }
-
- // Is list empty?
- bool empty() const {
- return list_ == NULL;
- }
-
- // Low-water mark management
- int lowwatermark() const { return lowater_; }
- void clear_lowwatermark() { lowater_ = length_; }
-
- void Push(void* ptr) {
- SLL_Push(&list_, ptr);
- length_++;
- }
-
- void* Pop() {
- ASSERT(list_ != NULL);
- length_--;
- if (length_ < lowater_) lowater_ = length_;
- return SLL_Pop(&list_);
- }
-
- void PushRange(int N, void *start, void *end) {
- SLL_PushRange(&list_, start, end);
- length_ += N;
- }
-
- void PopRange(int N, void **start, void **end) {
- SLL_PopRange(&list_, N, start, end);
- ASSERT(length_ >= N);
- length_ -= N;
- if (length_ < lowater_) lowater_ = length_;
- }
-};
-
-//-------------------------------------------------------------------
-// Data kept per thread
-//-------------------------------------------------------------------
-
-class TCMalloc_ThreadCache {
- private:
- typedef TCMalloc_ThreadCache_FreeList FreeList;
-
- // Warning: the offset of list_ affects performance. On general
- // principles, we don't like list_[x] to span multiple L1 cache
- // lines. However, merely placing list_ at offset 0 here seems to
- // cause cache conflicts.
-
- // We sample allocations, biased by the size of the allocation
- size_t bytes_until_sample_; // Bytes until we sample next
- uint32_t rnd_; // Cheap random number generator
-
- size_t size_; // Combined size of data
- pthread_t tid_; // Which thread owns it
- FreeList list_[kNumClasses]; // Array indexed by size-class
- bool in_setspecific_; // In call to pthread_setspecific?
-
- // Allocate a new heap. REQUIRES: pageheap_lock is held.
- static inline TCMalloc_ThreadCache* NewHeap(pthread_t tid);
-
- // Use only as pthread thread-specific destructor function.
- static void DestroyThreadCache(void* ptr);
- public:
- // All ThreadCache objects are kept in a linked list (for stats collection)
- TCMalloc_ThreadCache* next_;
- TCMalloc_ThreadCache* prev_;
-
- void Init(pthread_t tid);
- void Cleanup();
-
- // Accessors (mostly just for printing stats)
- int freelist_length(size_t cl) const { return list_[cl].length(); }
-
- // Total byte size in cache
- size_t Size() const { return size_; }
-
- void* Allocate(size_t size);
- void Deallocate(void* ptr, size_t size_class);
-
- // Gets and returns an object from the central cache, and, if possible,
- // also adds some objects of that size class to this thread cache.
- void* FetchFromCentralCache(size_t cl, size_t byte_size);
-
- // Releases N items from this thread cache. Returns size_.
- size_t ReleaseToCentralCache(FreeList* src, size_t cl, int N);
-
- void Scavenge();
- void Print() const;
-
- // Record allocation of "k" bytes. Return true iff allocation
- // should be sampled
- bool SampleAllocation(size_t k);
-
- // Pick next sampling point
- void PickNextSample(size_t k);
-
- static void InitModule();
- static void InitTSD();
- static TCMalloc_ThreadCache* GetThreadHeap();
- static TCMalloc_ThreadCache* GetCache();
- static TCMalloc_ThreadCache* GetCacheIfPresent();
- static TCMalloc_ThreadCache* CreateCacheIfNecessary();
- static void DeleteCache(TCMalloc_ThreadCache* heap);
- static void BecomeIdle();
- static void RecomputeThreadCacheSize();
-};
-
-//-------------------------------------------------------------------
-// Data kept per size-class in central cache
-//-------------------------------------------------------------------
-
-class TCMalloc_Central_FreeList {
- public:
- void Init(size_t cl);
-
- // These methods all do internal locking.
-
- // Insert the specified range into the central freelist. N is the number of
- // elements in the range. RemoveRange() is the opposite operation.
- void InsertRange(void *start, void *end, int N);
-
- // Returns the actual number of fetched elements and sets *start and *end.
- int RemoveRange(void **start, void **end, int N);
-
- // Returns the number of free objects in cache.
- int length() {
- SpinLockHolder h(&lock_);
- return counter_;
- }
-
- // Returns the number of free objects in the transfer cache.
- int tc_length() {
- SpinLockHolder h(&lock_);
- return used_slots_ * num_objects_to_move[size_class_];
- }
-
- private:
- // REQUIRES: lock_ is held
- // Remove object from cache and return.
- // Return NULL if no free entries in cache.
- void* FetchFromSpans();
-
- // REQUIRES: lock_ is held
- // Remove object from cache and return. Fetches
- // from pageheap if cache is empty. Only returns
- // NULL on allocation failure.
- void* FetchFromSpansSafe();
-
- // REQUIRES: lock_ is held
- // Release a linked list of objects to spans.
- // May temporarily release lock_.
- void ReleaseListToSpans(void *start);
-
- // REQUIRES: lock_ is held
- // Release an object to spans.
- // May temporarily release lock_.
- void ReleaseToSpans(void* object);
-
- // REQUIRES: lock_ is held
- // Populate cache by fetching from the page heap.
- // May temporarily release lock_.
- void Populate();
-
- // REQUIRES: lock is held.
- // Tries to make room for a TCEntry. If the cache is full it will try to
- // expand it at the cost of some other cache size. Return false if there is
- // no space.
- bool MakeCacheSpace();
-
- // REQUIRES: lock_ for locked_size_class is held.
- // Picks a "random" size class to steal TCEntry slot from. In reality it
- // just iterates over the sizeclasses but does so without taking a lock.
- // Returns true on success.
- // May temporarily lock a "random" size class.
- static bool EvictRandomSizeClass(int locked_size_class, bool force);
-
- // REQUIRES: lock_ is *not* held.
- // Tries to shrink the Cache. If force is true it will relase objects to
- // spans if it allows it to shrink the cache. Return false if it failed to
- // shrink the cache. Decrements cache_size_ on succeess.
- // May temporarily take lock_. If it takes lock_, the locked_size_class
- // lock is released to keep the thread from holding two size class locks
- // concurrently which could lead to a deadlock.
- bool ShrinkCache(int locked_size_class, bool force);
-
- // This lock protects all the data members. cached_entries and cache_size_
- // may be looked at without holding the lock.
- SpinLock lock_;
-
- // We keep linked lists of empty and non-empty spans.
- size_t size_class_; // My size class
- Span empty_; // Dummy header for list of empty spans
- Span nonempty_; // Dummy header for list of non-empty spans
- size_t counter_; // Number of free objects in cache entry
-
- // Here we reserve space for TCEntry cache slots. Since one size class can
- // end up getting all the TCEntries quota in the system we just preallocate
- // sufficient number of entries here.
- TCEntry tc_slots_[kNumTransferEntries];
-
- // Number of currently used cached entries in tc_slots_. This variable is
- // updated under a lock but can be read without one.
- int32_t used_slots_;
- // The current number of slots for this size class. This is an
- // adaptive value that is increased if there is lots of traffic
- // on a given size class.
- int32_t cache_size_;
-};
-
-// Pad each CentralCache object to multiple of 64 bytes
-class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
- private:
- char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
-};
-
-//-------------------------------------------------------------------
-// Global variables
-//-------------------------------------------------------------------
-
-// Central cache -- a collection of free-lists, one per size-class.
-// We have a separate lock per free-list to reduce contention.
-static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
-
-// Page-level allocator
-static SpinLock pageheap_lock(SpinLock::LINKER_INITIALIZED);
-static char pageheap_memory[sizeof(TCMalloc_PageHeap)];
-static bool phinited = false;
-
-// Avoid extra level of indirection by making "pageheap" be just an alias
-// of pageheap_memory.
-#define pageheap ((TCMalloc_PageHeap*) pageheap_memory)
-
-// If TLS is available, we also store a copy
-// of the per-thread object in a __thread variable
-// since __thread variables are faster to read
-// than pthread_getspecific(). We still need
-// pthread_setspecific() because __thread
-// variables provide no way to run cleanup
-// code when a thread is destroyed.
-#ifdef HAVE_TLS
-static __thread TCMalloc_ThreadCache *threadlocal_heap;
-#endif
-// Thread-specific key. Initialization here is somewhat tricky
-// because some Linux startup code invokes malloc() before it
-// is in a good enough state to handle pthread_keycreate().
-// Therefore, we use TSD keys only after tsd_inited is set to true.
-// Until then, we use a slow path to get the heap object.
-static bool tsd_inited = false;
-static pthread_key_t heap_key;
-
-// Allocator for thread heaps
-static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
-
-// Linked list of heap objects. Protected by pageheap_lock.
-static TCMalloc_ThreadCache* thread_heaps = NULL;
-static int thread_heap_count = 0;
-
-// Overall thread cache size. Protected by pageheap_lock.
-static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
-
-// Global per-thread cache size. Writes are protected by
-// pageheap_lock. Reads are done without any locking, which should be
-// fine as long as size_t can be written atomically and we don't place
-// invariants between this variable and other pieces of state.
-static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
-
-//-------------------------------------------------------------------
-// Central cache implementation
-//-------------------------------------------------------------------
-
-void TCMalloc_Central_FreeList::Init(size_t cl) {
- size_class_ = cl;
- DLL_Init(&empty_);
- DLL_Init(&nonempty_);
- counter_ = 0;
-
- cache_size_ = 1;
- used_slots_ = 0;
- ASSERT(cache_size_ <= kNumTransferEntries);
-}
-
-void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
- while (start) {
- void *next = SLL_Next(start);
- ReleaseToSpans(start);
- start = next;
- }
-}
-
-void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
- const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
- Span* span = pageheap->GetDescriptor(p);
- ASSERT(span != NULL);
- ASSERT(span->refcount > 0);
-
- // If span is empty, move it to non-empty list
- if (span->objects == NULL) {
- DLL_Remove(span);
- DLL_Prepend(&nonempty_, span);
- Event(span, 'N', 0);
- }
-
- // The following check is expensive, so it is disabled by default
- if (false) {
- // Check that object does not occur in list
- int got = 0;
- for (void* p = span->objects; p != NULL; p = *((void**) p)) {
- ASSERT(p != object);
- got++;
- }
- ASSERT(got + span->refcount ==
- (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
- }
-
- counter_++;
- span->refcount--;
- if (span->refcount == 0) {
- Event(span, '#', 0);
- counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
- DLL_Remove(span);
-
- // Release central list lock while operating on pageheap
- lock_.Unlock();
- {
- SpinLockHolder h(&pageheap_lock);
- pageheap->Delete(span);
- }
- lock_.Lock();
- } else {
- *(reinterpret_cast<void**>(object)) = span->objects;
- span->objects = object;
- }
-}
-
-bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
- int locked_size_class, bool force) {
- static int race_counter = 0;
- int t = race_counter++; // Updated without a lock, but who cares.
- if (t >= kNumClasses) {
- while (t >= kNumClasses) {
- t -= kNumClasses;
- }
- race_counter = t;
- }
- ASSERT(t >= 0);
- ASSERT(t < kNumClasses);
- if (t == locked_size_class) return false;
- return central_cache[t].ShrinkCache(locked_size_class, force);
-}
-
-bool TCMalloc_Central_FreeList::MakeCacheSpace() {
- // Is there room in the cache?
- if (used_slots_ < cache_size_) return true;
- // Check if we can expand this cache?
- if (cache_size_ == kNumTransferEntries) return false;
- // Ok, we'll try to grab an entry from some other size class.
- if (EvictRandomSizeClass(size_class_, false) ||
- EvictRandomSizeClass(size_class_, true)) {
- // Succeeded in evicting, we're going to make our cache larger.
- cache_size_++;
- return true;
- }
- return false;
-}
-
-
-namespace {
-class LockInverter {
- private:
- SpinLock *held_, *temp_;
- public:
- inline explicit LockInverter(SpinLock* held, SpinLock *temp)
- : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
- inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
-};
-}
-
-bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
- // Start with a quick check without taking a lock.
- if (cache_size_ == 0) return false;
- // We don't evict from a full cache unless we are 'forcing'.
- if (force == false && used_slots_ == cache_size_) return false;
-
- // Grab lock, but first release the other lock held by this thread. We use
- // the lock inverter to ensure that we never hold two size class locks
- // concurrently. That can create a deadlock because there is no well
- // defined nesting order.
- LockInverter li(&central_cache[locked_size_class].lock_, &lock_);
- ASSERT(used_slots_ <= cache_size_);
- ASSERT(0 <= cache_size_);
- if (cache_size_ == 0) return false;
- if (used_slots_ == cache_size_) {
- if (force == false) return false;
- // ReleaseListToSpans releases the lock, so we have to make all the
- // updates to the central list before calling it.
- cache_size_--;
- used_slots_--;
- ReleaseListToSpans(tc_slots_[used_slots_].head);
- return true;
- }
- cache_size_--;
- return true;
-}
-
-void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
- SpinLockHolder h(&lock_);
- if (N == num_objects_to_move[size_class_] &&
- MakeCacheSpace()) {
- int slot = used_slots_++;
- ASSERT(slot >=0);
- ASSERT(slot < kNumTransferEntries);
- TCEntry *entry = &tc_slots_[slot];
- entry->head = start;
- entry->tail = end;
- return;
- }
- ReleaseListToSpans(start);
-}
-
-int TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int N) {
- ASSERT(N > 0);
- lock_.Lock();
- if (N == num_objects_to_move[size_class_] && used_slots_ > 0) {
- int slot = --used_slots_;
- ASSERT(slot >= 0);
- TCEntry *entry = &tc_slots_[slot];
- *start = entry->head;
- *end = entry->tail;
- lock_.Unlock();
- return N;
- }
-
- int result = 0;
- void* head = NULL;
- void* tail = NULL;
- // TODO: Prefetch multiple TCEntries?
- tail = FetchFromSpansSafe();
- if (tail != NULL) {
- SLL_SetNext(tail, NULL);
- head = tail;
- result = 1;
- while (result < N) {
- void *t = FetchFromSpans();
- if (!t) break;
- SLL_Push(&head, t);
- result++;
- }
- }
- lock_.Unlock();
- *start = head;
- *end = tail;
- return result;
-}
-
-
-void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
- void *t = FetchFromSpans();
- if (!t) {
- Populate();
- t = FetchFromSpans();
- }
- return t;
-}
-
-void* TCMalloc_Central_FreeList::FetchFromSpans() {
- if (DLL_IsEmpty(&nonempty_)) return NULL;
- Span* span = nonempty_.next;
-
- ASSERT(span->objects != NULL);
- span->refcount++;
- void* result = span->objects;
- span->objects = *(reinterpret_cast<void**>(result));
- if (span->objects == NULL) {
- // Move to empty list
- DLL_Remove(span);
- DLL_Prepend(&empty_, span);
- Event(span, 'E', 0);
- }
- counter_--;
- return result;
-}
-
-// Fetch memory from the system and add to the central cache freelist.
-void TCMalloc_Central_FreeList::Populate() {
- // Release central list lock while operating on pageheap
- lock_.Unlock();
- const size_t npages = class_to_pages[size_class_];
-
- Span* span;
- {
- SpinLockHolder h(&pageheap_lock);
- span = pageheap->New(npages);
- if (span) pageheap->RegisterSizeClass(span, size_class_);
- }
- if (span == NULL) {
- MESSAGE("allocation failed: %d\n", errno);
- lock_.Lock();
- return;
- }
- ASSERT(span->length == npages);
- // Cache sizeclass info eagerly. Locking is not necessary.
- // (Instead of being eager, we could just replace any stale info
- // about this span, but that seems to be no better in practice.)
- for (int i = 0; i < npages; i++) {
- pageheap->CacheSizeClass(span->start + i, size_class_);
- }
-
- // Split the block into pieces and add to the free-list
- // TODO: coloring of objects to avoid cache conflicts?
- void** tail = &span->objects;
- char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
- char* limit = ptr + (npages << kPageShift);
- const size_t size = ByteSizeForClass(size_class_);
- int num = 0;
- while (ptr + size <= limit) {
- *tail = ptr;
- tail = reinterpret_cast<void**>(ptr);
- ptr += size;
- num++;
- }
- ASSERT(ptr <= limit);
- *tail = NULL;
- span->refcount = 0; // No sub-object in use yet
-
- // Add span to list of non-empty spans
- lock_.Lock();
- DLL_Prepend(&nonempty_, span);
- counter_ += num;
-}
-
-//-------------------------------------------------------------------
-// TCMalloc_ThreadCache implementation
-//-------------------------------------------------------------------
-
-inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
- if (bytes_until_sample_ < k) {
- PickNextSample(k);
- return true;
- } else {
- bytes_until_sample_ -= k;
- return false;
- }
-}
-
-void TCMalloc_ThreadCache::Init(pthread_t tid) {
- size_ = 0;
- next_ = NULL;
- prev_ = NULL;
- tid_ = tid;
- in_setspecific_ = false;
- for (size_t cl = 0; cl < kNumClasses; ++cl) {
- list_[cl].Init();
- }
-
- // Initialize RNG -- run it for a bit to get to good values
- bytes_until_sample_ = 0;
- rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
- for (int i = 0; i < 100; i++) {
- PickNextSample(FLAGS_tcmalloc_sample_parameter * 2);
- }
-}
-
-void TCMalloc_ThreadCache::Cleanup() {
- // Put unused memory back into central cache
- for (int cl = 0; cl < kNumClasses; ++cl) {
- if (list_[cl].length() > 0) {
- ReleaseToCentralCache(&list_[cl], cl, list_[cl].length());
- }
- }
-}
-
-inline void* TCMalloc_ThreadCache::Allocate(size_t size) {
- ASSERT(size <= kMaxSize);
- const size_t cl = SizeClass(size);
- const size_t alloc_size = ByteSizeForClass(cl);
- FreeList* list = &list_[cl];
- if (list->empty()) {
- return FetchFromCentralCache(cl, alloc_size);
- }
- size_ -= alloc_size;
- return list->Pop();
-}
-
-inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
- FreeList* list = &list_[cl];
- ssize_t list_headroom =
- static_cast<ssize_t>(kMaxFreeListLength - 1) - list->length();
- size_ += ByteSizeForClass(cl);
- size_t cache_size = size_;
- ssize_t size_headroom = per_thread_cache_size - cache_size - 1;
- list->Push(ptr);
-
- // There are two relatively uncommon things that require further work.
- // In the common case we're done, and in that case we need a single branch
- // because of the bitwise-or trick that follows.
- if ((list_headroom | size_headroom) < 0) {
- if (list_headroom < 0) {
- cache_size = ReleaseToCentralCache(list, cl, num_objects_to_move[cl]);
- }
- if (cache_size >= per_thread_cache_size) Scavenge();
- }
-}
-
-// Remove some objects of class "cl" from central cache and add to thread heap.
-// On success, return the first object for immediate use; otherwise return NULL.
-void* TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t byte_size) {
- void *start, *end;
- int fetch_count = central_cache[cl].RemoveRange(&start, &end,
- num_objects_to_move[cl]);
- ASSERT((start == NULL) == (fetch_count == 0));
- if (--fetch_count >= 0) {
- size_ += byte_size * fetch_count;
- list_[cl].PushRange(fetch_count, SLL_Next(start), end);
- }
- return start;
-}
-
-// Remove some objects of class "cl" from thread heap and add to central cache
-size_t TCMalloc_ThreadCache::ReleaseToCentralCache(FreeList* src,
- size_t cl, int N) {
- ASSERT(src == &list_[cl]);
- if (N > src->length()) N = src->length();
- size_t delta_bytes = N * ByteSizeForClass(cl);
-
- // We return prepackaged chains of the correct size to the central cache.
- // TODO: Use the same format internally in the thread caches?
- int batch_size = num_objects_to_move[cl];
- while (N > batch_size) {
- void *tail, *head;
- src->PopRange(batch_size, &head, &tail);
- central_cache[cl].InsertRange(head, tail, batch_size);
- N -= batch_size;
- }
- void *tail, *head;
- src->PopRange(N, &head, &tail);
- central_cache[cl].InsertRange(head, tail, N);
- return size_ -= delta_bytes;
-}
-
-// Release idle memory to the central cache
-void TCMalloc_ThreadCache::Scavenge() {
- // If the low-water mark for the free list is L, it means we would
- // not have had to allocate anything from the central cache even if
- // we had reduced the free list size by L. We aim to get closer to
- // that situation by dropping L/2 nodes from the free list. This
- // may not release much memory, but if so we will call scavenge again
- // pretty soon and the low-water marks will be high on that call.
- //int64 start = CycleClock::Now();
-
- for (int cl = 0; cl < kNumClasses; cl++) {
- FreeList* list = &list_[cl];
- const int lowmark = list->lowwatermark();
- if (lowmark > 0) {
- const int drop = (lowmark > 1) ? lowmark/2 : 1;
- ReleaseToCentralCache(list, cl, drop);
- }
- list->clear_lowwatermark();
- }
-
- //int64 finish = CycleClock::Now();
- //CycleTimer ct;
- //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
-}
-
-void TCMalloc_ThreadCache::PickNextSample(size_t k) {
- // Make next "random" number
- // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
- static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
- uint32_t r = rnd_;
- rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
-
- // Next point is "rnd_ % (sample_period)". I.e., average
- // increment is "sample_period/2".
- const int flag_value = FLAGS_tcmalloc_sample_parameter;
- static int last_flag_value = -1;
-
- if (flag_value != last_flag_value) {
- SpinLockHolder h(&sample_period_lock);
- int i;
- for (i = 0; i < (sizeof(primes_list)/sizeof(primes_list[0]) - 1); i++) {
- if (primes_list[i] >= flag_value) {
- break;
- }
- }
- sample_period = primes_list[i];
- last_flag_value = flag_value;
- }
-
- bytes_until_sample_ += rnd_ % sample_period;
-
- if (k > (static_cast<size_t>(-1) >> 2)) {
- // If the user has asked for a huge allocation then it is possible
- // for the code below to loop infinitely. Just return (note that
- // this throws off the sampling accuracy somewhat, but a user who
- // is allocating more than 1G of memory at a time can live with a
- // minor inaccuracy in profiling of small allocations, and also
- // would rather not wait for the loop below to terminate).
- return;
- }
-
- while (bytes_until_sample_ < k) {
- // Increase bytes_until_sample_ by enough average sampling periods
- // (sample_period >> 1) to allow us to sample past the current
- // allocation.
- bytes_until_sample_ += (sample_period >> 1);
- }
-
- bytes_until_sample_ -= k;
-}
-
-void TCMalloc_ThreadCache::InitModule() {
- // There is a slight potential race here because of double-checked
- // locking idiom. However, as long as the program does a small
- // allocation before switching to multi-threaded mode, we will be
- // fine. We increase the chances of doing such a small allocation
- // by doing one in the constructor of the module_enter_exit_hook
- // object declared below.
- SpinLockHolder h(&pageheap_lock);
- if (!phinited) {
- InitSizeClasses();
- threadheap_allocator.Init();
- span_allocator.Init();
- span_allocator.New(); // Reduce cache conflicts
- span_allocator.New(); // Reduce cache conflicts
- stacktrace_allocator.Init();
- DLL_Init(&sampled_objects);
- for (int i = 0; i < kNumClasses; ++i) {
- central_cache[i].Init(i);
- }
- new ((void*)pageheap_memory) TCMalloc_PageHeap;
- phinited = 1;
- }
-}
-
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(pthread_t tid) {
- // Create the heap and add it to the linked list
- TCMalloc_ThreadCache *heap = threadheap_allocator.New();
- heap->Init(tid);
- heap->next_ = thread_heaps;
- heap->prev_ = NULL;
- if (thread_heaps != NULL) thread_heaps->prev_ = heap;
- thread_heaps = heap;
- thread_heap_count++;
- RecomputeThreadCacheSize();
- return heap;
-}
-
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
-#ifdef HAVE_TLS
- // __thread is faster, but only when the kernel supports it
- if (KernelSupportsTLS())
- return threadlocal_heap;
-#endif
- return reinterpret_cast<TCMalloc_ThreadCache *>(
- perftools_pthread_getspecific(heap_key));
-}
-
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
- TCMalloc_ThreadCache* ptr = NULL;
- if (!tsd_inited) {
- InitModule();
- } else {
- ptr = GetThreadHeap();
- }
- if (ptr == NULL) ptr = CreateCacheIfNecessary();
- return ptr;
-}
-
-// In deletion paths, we do not try to create a thread-cache. This is
-// because we may be in the thread destruction code and may have
-// already cleaned up the cache for this thread.
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
- if (!tsd_inited) return NULL;
- void* const p = GetThreadHeap();
- return reinterpret_cast<TCMalloc_ThreadCache*>(p);
-}
-
-void TCMalloc_ThreadCache::InitTSD() {
- ASSERT(!tsd_inited);
- perftools_pthread_key_create(&heap_key, DestroyThreadCache);
- tsd_inited = true;
-
- // We may have used a fake pthread_t for the main thread. Fix it.
- pthread_t zero;
- memset(&zero, 0, sizeof(zero));
- SpinLockHolder h(&pageheap_lock);
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
- if (h->tid_ == zero) {
- h->tid_ = pthread_self();
- }
- }
-}
-
-TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
- // Initialize per-thread data if necessary
- TCMalloc_ThreadCache* heap = NULL;
- {
- SpinLockHolder h(&pageheap_lock);
-
- // Early on in glibc's life, we cannot even call pthread_self()
- pthread_t me;
- if (!tsd_inited) {
- memset(&me, 0, sizeof(me));
- } else {
- me = pthread_self();
- }
-
- // This may be a recursive malloc call from pthread_setspecific()
- // In that case, the heap for this thread has already been created
- // and added to the linked list. So we search for that first.
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
- if (h->tid_ == me) {
- heap = h;
- break;
- }
- }
-
- if (heap == NULL) heap = NewHeap(me);
- }
-
- // We call pthread_setspecific() outside the lock because it may
- // call malloc() recursively. We check for the recursive call using
- // the "in_setspecific_" flag so that we can avoid calling
- // pthread_setspecific() if we are already inside pthread_setspecific().
- if (!heap->in_setspecific_ && tsd_inited) {
- heap->in_setspecific_ = true;
- perftools_pthread_setspecific(heap_key, heap);
-#ifdef HAVE_TLS
- // Also keep a copy in __thread for faster retrieval
- threadlocal_heap = heap;
-#endif
- heap->in_setspecific_ = false;
- }
- return heap;
-}
-
-void TCMalloc_ThreadCache::BecomeIdle() {
- if (!tsd_inited) return; // No caches yet
- TCMalloc_ThreadCache* heap = GetThreadHeap();
- if (heap == NULL) return; // No thread cache to remove
- if (heap->in_setspecific_) return; // Do not disturb the active caller
-
- heap->in_setspecific_ = true;
- perftools_pthread_setspecific(heap_key, NULL);
-#ifdef HAVE_TLS
- // Also update the copy in __thread
- threadlocal_heap = NULL;
-#endif
- heap->in_setspecific_ = false;
- if (GetThreadHeap() == heap) {
- // Somehow heap got reinstated by a recursive call to malloc
- // from pthread_setspecific. We give up in this case.
- return;
- }
-
- // We can now get rid of the heap
- DeleteCache(heap);
-}
-
-void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
- // Note that "ptr" cannot be NULL since pthread promises not
- // to invoke the destructor on NULL values, but for safety,
- // we check anyway.
- if (ptr == NULL) return;
-#ifdef HAVE_TLS
- // Prevent fast path of GetThreadHeap() from returning heap.
- threadlocal_heap = NULL;
-#endif
- DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
-}
-
-void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
- // Remove all memory from heap
- heap->Cleanup();
-
- // Remove from linked list
- SpinLockHolder h(&pageheap_lock);
- if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
- if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
- if (thread_heaps == heap) thread_heaps = heap->next_;
- thread_heap_count--;
- RecomputeThreadCacheSize();
-
- threadheap_allocator.Delete(heap);
-}
-
-void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
- // Divide available space across threads
- int n = thread_heap_count > 0 ? thread_heap_count : 1;
- size_t space = overall_thread_cache_size / n;
-
- // Limit to allowed range
- if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
- if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
-
- per_thread_cache_size = space;
- //MESSAGE("Threads %d => cache size %8d\n", n, int(space));
-}
-
-void TCMalloc_ThreadCache::Print() const {
- for (int cl = 0; cl < kNumClasses; ++cl) {
- MESSAGE(" %5" PRIuS " : %4" PRIuS " len; %4d lo\n",
- ByteSizeForClass(cl),
- list_[cl].length(),
- list_[cl].lowwatermark());
- }
-}
-
// Extract interesting stats
struct TCMallocStats {
uint64_t system_bytes; // Bytes alloced from system
@@ -2312,33 +188,27 @@ static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
r->central_bytes = 0;
r->transfer_bytes = 0;
for (int cl = 0; cl < kNumClasses; ++cl) {
- const int length = central_cache[cl].length();
- const int tc_length = central_cache[cl].tc_length();
- r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
- r->transfer_bytes +=
- static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
+ const int length = Static::central_cache()[cl].length();
+ const int tc_length = Static::central_cache()[cl].tc_length();
+ const size_t size = static_cast<uint64_t>(
+ Static::sizemap()->ByteSizeForClass(cl));
+ r->central_bytes += (size * length);
+ r->transfer_bytes += (size * tc_length);
if (class_count) class_count[cl] = length + tc_length;
}
// Add stats from per-thread heaps
r->thread_bytes = 0;
{ // scope
- SpinLockHolder h(&pageheap_lock);
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
- r->thread_bytes += h->Size();
- if (class_count) {
- for (int cl = 0; cl < kNumClasses; ++cl) {
- class_count[cl] += h->freelist_length(cl);
- }
- }
- }
+ SpinLockHolder h(Static::pageheap_lock());
+ ThreadCache::GetThreadStats(&r->thread_bytes, class_count);
}
{ //scope
- SpinLockHolder h(&pageheap_lock);
- r->system_bytes = pageheap->SystemBytes();
- r->metadata_bytes = metadata_system_bytes;
- r->pageheap_bytes = pageheap->FreeBytes();
+ SpinLockHolder h(Static::pageheap_lock());
+ r->system_bytes = Static::pageheap()->SystemBytes();
+ r->metadata_bytes = tcmalloc::metadata_system_bytes();
+ r->pageheap_bytes = Static::pageheap()->FreeBytes();
}
}
@@ -2348,24 +218,27 @@ static void DumpStats(TCMalloc_Printer* out, int level) {
uint64_t class_count[kNumClasses];
ExtractStats(&stats, (level >= 2 ? class_count : NULL));
+ static const double MB = 1048576.0;
+
if (level >= 2) {
out->printf("------------------------------------------------\n");
uint64_t cumulative = 0;
for (int cl = 0; cl < kNumClasses; ++cl) {
if (class_count[cl] > 0) {
- uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
+ uint64_t class_bytes =
+ class_count[cl] * Static::sizemap()->ByteSizeForClass(cl);
cumulative += class_bytes;
out->printf("class %3d [ %8" PRIuS " bytes ] : "
"%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
- cl, ByteSizeForClass(cl),
+ cl, Static::sizemap()->ByteSizeForClass(cl),
class_count[cl],
- class_bytes / 1048576.0,
- cumulative / 1048576.0);
+ class_bytes / MB,
+ cumulative / MB);
}
}
- SpinLockHolder h(&pageheap_lock);
- pageheap->Dump(out);
+ SpinLockHolder h(Static::pageheap_lock());
+ Static::pageheap()->Dump(out);
out->printf("------------------------------------------------\n");
DumpSystemAllocatorStats(out);
@@ -2378,25 +251,25 @@ static void DumpStats(TCMalloc_Printer* out, int level) {
- stats.thread_bytes;
out->printf("------------------------------------------------\n"
- "MALLOC: %12" PRIu64 " Heap size\n"
- "MALLOC: %12" PRIu64 " Bytes in use by application\n"
- "MALLOC: %12" PRIu64 " Bytes free in page heap\n"
- "MALLOC: %12" PRIu64 " Bytes free in central cache\n"
- "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
- "MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
- "MALLOC: %12" PRIu64 " Spans in use\n"
- "MALLOC: %12" PRIu64 " Thread heaps in use\n"
- "MALLOC: %12" PRIu64 " Metadata allocated\n"
+ "MALLOC: %12" PRIu64 " (%7.1f MB) Heap size\n"
+ "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes in use by application\n"
+ "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in page heap\n"
+ "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in central cache\n"
+ "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in transfer cache\n"
+ "MALLOC: %12" PRIu64 " (%7.1f MB) Bytes free in thread caches\n"
+ "MALLOC: %12" PRIu64 " Spans in use\n"
+ "MALLOC: %12" PRIu64 " Thread heaps in use\n"
+ "MALLOC: %12" PRIu64 " (%7.1f MB) Metadata allocated\n"
"------------------------------------------------\n",
- stats.system_bytes,
- bytes_in_use,
- stats.pageheap_bytes,
- stats.central_bytes,
- stats.transfer_bytes,
- stats.thread_bytes,
- uint64_t(span_allocator.inuse()),
- uint64_t(threadheap_allocator.inuse()),
- stats.metadata_bytes);
+ stats.system_bytes, stats.system_bytes / MB,
+ bytes_in_use, bytes_in_use / MB,
+ stats.pageheap_bytes, stats.pageheap_bytes / MB,
+ stats.central_bytes, stats.central_bytes / MB,
+ stats.transfer_bytes, stats.transfer_bytes / MB,
+ stats.thread_bytes, stats.thread_bytes / MB,
+ uint64_t(Static::span_allocator()->inuse()),
+ uint64_t(ThreadCache::HeapsInUse()),
+ stats.metadata_bytes, stats.metadata_bytes / MB);
}
static void PrintStats(int level) {
@@ -2412,8 +285,9 @@ static void** DumpStackTraces() {
// Count how much space we need
int needed_slots = 0;
{
- SpinLockHolder h(&pageheap_lock);
- for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
+ SpinLockHolder h(Static::pageheap_lock());
+ Span* sampled = Static::sampled_objects();
+ for (Span* s = sampled->next; s != sampled; s = s->next) {
StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
needed_slots += 3 + stack->depth;
}
@@ -2428,9 +302,10 @@ static void** DumpStackTraces() {
return NULL;
}
- SpinLockHolder h(&pageheap_lock);
+ SpinLockHolder h(Static::pageheap_lock());
int used_slots = 0;
- for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
+ Span* sampled = Static::sampled_objects();
+ for (Span* s = sampled->next; s != sampled; s = s->next) {
ASSERT(used_slots < needed_slots); // Need to leave room for terminator
StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
if (used_slots + 3 + stack->depth >= needed_slots) {
@@ -2454,10 +329,11 @@ static void** DumpHeapGrowthStackTraces() {
// Count how much space we need
int needed_slots = 0;
{
- SpinLockHolder h(&pageheap_lock);
- for (StackTrace* t = growth_stacks;
+ SpinLockHolder h(Static::pageheap_lock());
+ for (StackTrace* t = Static::growth_stacks();
t != NULL;
- t = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth-1])) {
+ t = reinterpret_cast<StackTrace*>(
+ t->stack[tcmalloc::kMaxStackDepth-1])) {
needed_slots += 3 + t->depth;
}
needed_slots += 100; // Slop in case list grows
@@ -2471,11 +347,12 @@ static void** DumpHeapGrowthStackTraces() {
return NULL;
}
- SpinLockHolder h(&pageheap_lock);
+ SpinLockHolder h(Static::pageheap_lock());
int used_slots = 0;
- for (StackTrace* t = growth_stacks;
+ for (StackTrace* t = Static::growth_stacks();
t != NULL;
- t = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth-1])) {
+ t = reinterpret_cast<StackTrace*>(
+ t->stack[tcmalloc::kMaxStackDepth-1])) {
ASSERT(used_slots < needed_slots); // Need to leave room for terminator
if (used_slots + 3 + t->depth >= needed_slots) {
// No more room
@@ -2541,14 +418,14 @@ class TCMallocImplementation : public MallocExtension {
if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
// We assume that bytes in the page heap are not fragmented too
// badly, and are therefore available for allocation.
- SpinLockHolder l(&pageheap_lock);
- *value = pageheap->FreeBytes();
+ SpinLockHolder l(Static::pageheap_lock());
+ *value = Static::pageheap()->FreeBytes();
return true;
}
if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- SpinLockHolder l(&pageheap_lock);
- *value = overall_thread_cache_size;
+ SpinLockHolder l(Static::pageheap_lock());
+ *value = ThreadCache::overall_thread_cache_size();
return true;
}
@@ -2566,13 +443,8 @@ class TCMallocImplementation : public MallocExtension {
ASSERT(name != NULL);
if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- // Clip the value to a reasonable range
- if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
- if (value > (1<<30)) value = (1<<30); // Limit to 1GB
-
- SpinLockHolder l(&pageheap_lock);
- overall_thread_cache_size = static_cast<size_t>(value);
- TCMalloc_ThreadCache::RecomputeThreadCacheSize();
+ SpinLockHolder l(Static::pageheap_lock());
+ ThreadCache::set_overall_thread_cache_size(value);
return true;
}
@@ -2580,12 +452,20 @@ class TCMallocImplementation : public MallocExtension {
}
virtual void MarkThreadIdle() {
- TCMalloc_ThreadCache::BecomeIdle();
+ ThreadCache::BecomeIdle();
}
virtual void ReleaseFreeMemory() {
- SpinLockHolder h(&pageheap_lock);
- pageheap->ReleaseFreePages();
+ SpinLockHolder h(Static::pageheap_lock());
+ Static::pageheap()->ReleaseFreePages();
+ }
+
+ virtual void SetMemoryReleaseRate(double rate) {
+ FLAGS_tcmalloc_release_rate = rate;
+ }
+
+ virtual double GetMemoryReleaseRate() {
+ return FLAGS_tcmalloc_release_rate;
}
};
@@ -2601,35 +481,33 @@ class TCMallocImplementation : public MallocExtension {
// well for STL).
//
// The destructor prints stats when the program exits.
-class TCMallocGuard {
- public:
-
- TCMallocGuard() {
+static int tcmallocguard_refcount = 0; // no lock needed: runs before main()
+TCMallocGuard::TCMallocGuard() {
+ if (tcmallocguard_refcount++ == 0) {
#ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
// Check whether the kernel also supports TLS (needs to happen at runtime)
- CheckIfKernelSupportsTLS();
+ tcmalloc::CheckIfKernelSupportsTLS();
#endif
-#ifdef WIN32 // patch the windows VirtualAlloc, etc.
+#ifdef _WIN32 // patch the windows VirtualAlloc, etc.
PatchWindowsFunctions(); // defined in windows/patch_functions.cc
#endif
free(malloc(1));
- TCMalloc_ThreadCache::InitTSD();
+ ThreadCache::InitTSD();
free(malloc(1));
MallocExtension::Register(new TCMallocImplementation);
}
+}
- ~TCMallocGuard() {
+TCMallocGuard::~TCMallocGuard() {
+ if (--tcmallocguard_refcount == 0) {
const char* env = getenv("MALLOCSTATS");
if (env != NULL) {
int level = atoi(env);
if (level < 1) level = 1;
PrintStats(level);
}
-#ifdef WIN32
- UnpatchWindowsFunctions();
-#endif
}
-};
+}
static TCMallocGuard module_enter_exit_hook;
//-------------------------------------------------------------------
@@ -2637,21 +515,20 @@ static TCMallocGuard module_enter_exit_hook;
//-------------------------------------------------------------------
static Span* DoSampledAllocation(size_t size) {
-
// Grab the stack trace outside the heap lock
StackTrace tmp;
- tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
+ tmp.depth = GetStackTrace(tmp.stack, tcmalloc::kMaxStackDepth, 1);
tmp.size = size;
- SpinLockHolder h(&pageheap_lock);
+ SpinLockHolder h(Static::pageheap_lock());
// Allocate span
- Span *span = pageheap->New(pages(size == 0 ? 1 : size));
+ Span *span = Static::pageheap()->New(tcmalloc::pages(size == 0 ? 1 : size));
if (span == NULL) {
return NULL;
}
// Allocate stack trace
- StackTrace *stack = stacktrace_allocator.New();
+ StackTrace *stack = Static::stacktrace_allocator()->New();
if (stack == NULL) {
// Sampling failed because of lack of memory
return span;
@@ -2660,16 +537,16 @@ static Span* DoSampledAllocation(size_t size) {
*stack = tmp;
span->sample = 1;
span->objects = stack;
- DLL_Prepend(&sampled_objects, span);
+ tcmalloc::DLL_Prepend(Static::sampled_objects(), span);
return span;
}
static inline bool CheckCachedSizeClass(void *ptr) {
PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- size_t cached_value = pageheap->GetSizeClassIfCached(p);
+ size_t cached_value = Static::pageheap()->GetSizeClassIfCached(p);
return cached_value == 0 ||
- cached_value == pageheap->GetDescriptor(p)->sizeclass;
+ cached_value == Static::pageheap()->GetDescriptor(p)->sizeclass;
}
static inline void* CheckedMallocResult(void *result)
@@ -2679,7 +556,7 @@ static inline void* CheckedMallocResult(void *result)
}
static inline void* SpanToMallocResult(Span *span) {
- pageheap->CacheSizeClass(span->start, 0);
+ Static::pageheap()->CacheSizeClass(span->start, 0);
return
CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
}
@@ -2692,13 +569,13 @@ static int64_t large_alloc_threshold =
static void ReportLargeAlloc(Length num_pages, void* result) {
StackTrace stack;
- stack.depth = GetStackTrace(stack.stack, kMaxStackDepth, 1);
+ stack.depth = GetStackTrace(stack.stack, tcmalloc::kMaxStackDepth, 1);
static const int N = 1000;
char buffer[N];
TCMalloc_Printer printer(buffer, N);
printer.printf("tcmalloc: large alloc %lld bytes == %p @ ",
- static_cast<long long>(num_pages << kPageShift),
+ static_cast<long long>(num_pages) << kPageShift,
result);
for (int i = 0; i < stack.depth; i++) {
printer.printf(" %p", stack.stack[i]);
@@ -2707,13 +584,27 @@ static void ReportLargeAlloc(Length num_pages, void* result) {
write(STDERR_FILENO, buffer, strlen(buffer));
}
+// These routines are called by free() and realloc() if the pointer is
+// invalid. This is a cheap (source-editing required) kind of exception
+// handling for these routines.
+namespace {
+void InvalidFree(void* ptr) {
+ CRASH("Attempt to free invalid pointer: %p\n", ptr);
+}
+
+void* InvalidRealloc(void* old_ptr, size_t new_size) {
+ CRASH("Attempt to realloc invalid pointer: %p (realloc to %" PRIuS ")\n",
+ old_ptr, new_size);
+ return NULL;
+}
+
// Helper for do_malloc().
-static inline void* do_malloc_pages(Length num_pages) {
+inline void* do_malloc_pages(Length num_pages) {
Span *span;
bool report_large = false;
{
- SpinLockHolder h(&pageheap_lock);
- span = pageheap->New(num_pages);
+ SpinLockHolder h(Static::pageheap_lock());
+ span = Static::pageheap()->New(num_pages);
const int64 threshold = large_alloc_threshold;
if (num_pages >= (threshold >> kPageShift)) {
// Increase the threshold by 1/8 every time we generate a report.
@@ -2731,11 +622,11 @@ static inline void* do_malloc_pages(Length num_pages) {
return result;
}
-static inline void* do_malloc(size_t size) {
+inline void* do_malloc(size_t size) {
void* ret = NULL;
// The following call forces module initialization
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
+ ThreadCache* heap = ThreadCache::GetCache();
if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
Span* span = DoSampledAllocation(size);
if (span != NULL) {
@@ -2746,47 +637,153 @@ static inline void* do_malloc(size_t size) {
// size-appropriate freelist, after replenishing it if it's empty.
ret = CheckedMallocResult(heap->Allocate(size));
} else {
- ret = do_malloc_pages(pages(size));
+ ret = do_malloc_pages(tcmalloc::pages(size));
}
if (ret == NULL) errno = ENOMEM;
return ret;
}
-static inline void do_free(void* ptr) {
+inline void* do_calloc(size_t n, size_t elem_size) {
+ // Overflow check
+ const size_t size = n * elem_size;
+ if (elem_size != 0 && size / elem_size != n) return NULL;
+
+ void* result = do_malloc(size);
+ if (result != NULL) {
+ memset(result, 0, size);
+ }
+ return result;
+}
+
+static inline ThreadCache* GetCacheIfPresent() {
+ void* const p = ThreadCache::GetCacheIfPresent();
+ return reinterpret_cast<ThreadCache*>(p);
+}
+
+// This lets you call back to a given function pointer if ptr is invalid.
+// It is used primarily by windows code which wants a specialized callback.
+inline void do_free_with_callback(void* ptr, void (*invalid_free_fn)(void*)) {
if (ptr == NULL) return;
- ASSERT(pageheap != NULL); // Should not call free() before malloc()
+ ASSERT(Static::pageheap() != NULL); // Should not call free() before malloc()
const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
Span* span = NULL;
- size_t cl = pageheap->GetSizeClassIfCached(p);
+ size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
if (cl == 0) {
- span = pageheap->GetDescriptor(p);
+ span = Static::pageheap()->GetDescriptor(p);
+ if (!span) {
+ // span can be NULL because the pointer passed in is invalid
+ // (not something returned by malloc or friends), or because the
+ // pointer was allocated with some other allocator besides
+ // tcmalloc. The latter can happen if tcmalloc is linked in via
+ // a dynamic library, but is not listed last on the link line.
+ // In that case, libraries after it on the link line will
+ // allocate with libc malloc, but free with tcmalloc's free.
+ (*invalid_free_fn)(ptr); // Decide how to handle the bad free request
+ return;
+ }
cl = span->sizeclass;
- pageheap->CacheSizeClass(p, cl);
+ Static::pageheap()->CacheSizeClass(p, cl);
}
if (cl != 0) {
- ASSERT(!pageheap->GetDescriptor(p)->sample);
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
+ ASSERT(!Static::pageheap()->GetDescriptor(p)->sample);
+ ThreadCache* heap = GetCacheIfPresent();
if (heap != NULL) {
heap->Deallocate(ptr, cl);
} else {
// Delete directly into central cache
- SLL_SetNext(ptr, NULL);
- central_cache[cl].InsertRange(ptr, ptr, 1);
+ tcmalloc::SLL_SetNext(ptr, NULL);
+ Static::central_cache()[cl].InsertRange(ptr, ptr, 1);
}
} else {
- SpinLockHolder h(&pageheap_lock);
+ SpinLockHolder h(Static::pageheap_lock());
ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
ASSERT(span != NULL && span->start == p);
if (span->sample) {
- DLL_Remove(span);
- stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
+ tcmalloc::DLL_Remove(span);
+ Static::stacktrace_allocator()->Delete(
+ reinterpret_cast<StackTrace*>(span->objects));
span->objects = NULL;
}
- pageheap->Delete(span);
+ Static::pageheap()->Delete(span);
+ }
+}
+
+// The default "do_free" that uses the default callback.
+inline void do_free(void* ptr) {
+ return do_free_with_callback(ptr, &InvalidFree);
+}
+
+// This lets you call back to a given function pointer if ptr is invalid.
+// It is used primarily by windows code which wants a specialized callback.
+inline void* do_realloc_with_callback(void* old_ptr, size_t new_size,
+ void* (*invalid_realloc_fn)(void*,
+ size_t)) {
+ // Get the size of the old entry
+ const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
+ size_t cl = Static::pageheap()->GetSizeClassIfCached(p);
+ Span *span = NULL;
+ size_t old_size;
+ if (cl == 0) {
+ span = Static::pageheap()->GetDescriptor(p);
+ if (!span) {
+ // span can be NULL because the pointer passed in is invalid
+ // (not something returned by malloc or friends), or because the
+ // pointer was allocated with some other allocator besides tcmalloc.
+ return InvalidRealloc(old_ptr, new_size);
+ }
+ cl = span->sizeclass;
+ Static::pageheap()->CacheSizeClass(p, cl);
+ }
+ if (cl != 0) {
+ old_size = Static::sizemap()->ByteSizeForClass(cl);
+ } else {
+ ASSERT(span != NULL);
+ old_size = span->length << kPageShift;
+ }
+
+ // Reallocate if the new size is larger than the old size,
+ // or if the new size is significantly smaller than the old size.
+ // We do hysteresis to avoid resizing ping-pongs:
+ // . If we need to grow, grow to max(new_size, old_size * 1.X)
+ // . Don't shrink unless new_size < old_size * 0.Y
+ // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5.
+ const int lower_bound_to_grow = old_size + old_size / 4;
+ const int upper_bound_to_shrink = old_size / 2;
+ if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) {
+ // Need to reallocate.
+ void* new_ptr = NULL;
+
+ if (new_size > old_size && new_size < lower_bound_to_grow) {
+ new_ptr = do_malloc(lower_bound_to_grow);
+ }
+ if (new_ptr == NULL) {
+ // Either new_size is not a tiny increment, or last do_malloc failed.
+ new_ptr = do_malloc(new_size);
+ }
+ if (new_ptr == NULL) {
+ return NULL;
+ }
+ MallocHook::InvokeNewHook(new_ptr, new_size);
+ memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
+ MallocHook::InvokeDeleteHook(old_ptr);
+ // We could use a variant of do_free() that leverages the fact
+ // that we already know the sizeclass of old_ptr. The benefit
+ // would be small, so don't bother.
+ do_free(old_ptr);
+ return new_ptr;
+ } else {
+ // We still need to call hooks to report the updated size:
+ MallocHook::InvokeDeleteHook(old_ptr);
+ MallocHook::InvokeNewHook(old_ptr, new_size);
+ return old_ptr;
}
}
+inline void* do_realloc(void* old_ptr, size_t new_size) {
+ return do_realloc_with_callback(old_ptr, new_size, &InvalidRealloc);
+}
+
// For use by exported routines below that want specific alignments
//
// Note: this code can be slow, and can significantly fragment memory.
@@ -2794,12 +791,12 @@ static inline void do_free(void* ptr) {
// not be invoked very often. This requirement simplifies our
// implementation and allows us to tune for expected allocation
// patterns.
-static void* do_memalign(size_t align, size_t size) {
+void* do_memalign(size_t align, size_t size) {
ASSERT((align & (align - 1)) == 0);
ASSERT(align > 0);
if (size + align < size) return NULL; // Overflow
- if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
+ if (Static::pageheap() == NULL) ThreadCache::InitModule();
// Allocate at least one byte to avoid boundary conditions below
if (size == 0) size = 1;
@@ -2811,30 +808,32 @@ static void* do_memalign(size_t align, size_t size) {
// are aligned at powers of two. We will waste time and space if
// we miss in the size class array, but that is deemed acceptable
// since memalign() should be used rarely.
- int cl = SizeClass(size);
- while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
+ int cl = Static::sizemap()->SizeClass(size);
+ while (cl < kNumClasses &&
+ ((Static::sizemap()->class_to_size(cl) & (align - 1)) != 0)) {
cl++;
}
if (cl < kNumClasses) {
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
- return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
+ ThreadCache* heap = ThreadCache::GetCache();
+ return CheckedMallocResult(heap->Allocate(
+ Static::sizemap()->class_to_size(cl)));
}
}
// We will allocate directly from the page heap
- SpinLockHolder h(&pageheap_lock);
+ SpinLockHolder h(Static::pageheap_lock());
if (align <= kPageSize) {
// Any page-level allocation will be fine
// TODO: We could put the rest of this page in the appropriate
// TODO: cache but it does not seem worth it.
- Span* span = pageheap->New(pages(size));
+ Span* span = Static::pageheap()->New(tcmalloc::pages(size));
return span == NULL ? NULL : SpanToMallocResult(span);
}
// Allocate extra pages and carve off an aligned portion
- const Length alloc = pages(size + align);
- Span* span = pageheap->New(alloc);
+ const Length alloc = tcmalloc::pages(size + align);
+ Span* span = Static::pageheap()->New(alloc);
if (span == NULL) return NULL;
// Skip starting portion so that we end up aligned
@@ -2844,33 +843,33 @@ static void* do_memalign(size_t align, size_t size) {
}
ASSERT(skip < alloc);
if (skip > 0) {
- Span* rest = pageheap->Split(span, skip);
- pageheap->Delete(span);
+ Span* rest = Static::pageheap()->Split(span, skip);
+ Static::pageheap()->Delete(span);
span = rest;
}
// Skip trailing portion that we do not need to return
- const Length needed = pages(size);
+ const Length needed = tcmalloc::pages(size);
ASSERT(span->length >= needed);
if (span->length > needed) {
- Span* trailer = pageheap->Split(span, needed);
- pageheap->Delete(trailer);
+ Span* trailer = Static::pageheap()->Split(span, needed);
+ Static::pageheap()->Delete(trailer);
}
return SpanToMallocResult(span);
}
// Helpers for use by exported routines below:
-static inline void do_malloc_stats() {
+inline void do_malloc_stats() {
PrintStats(1);
}
-static inline int do_mallopt(int cmd, int value) {
+inline int do_mallopt(int cmd, int value) {
return 1; // Indicates error
}
#ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
-static inline struct mallinfo do_mallinfo() {
+inline struct mallinfo do_mallinfo() {
TCMallocStats stats;
ExtractStats(&stats, NULL);
@@ -2893,22 +892,55 @@ static inline struct mallinfo do_mallinfo() {
return info;
}
+#endif // #ifndef HAVE_STRUCT_MALLINFO
+
+static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED);
+
+inline void* cpp_alloc(size_t size, bool nothrow) {
+ for (;;) {
+ void* p = do_malloc(size);
+#ifdef PREANSINEW
+ return p;
+#else
+ if (p == NULL) { // allocation failed
+ // Get the current new handler. NB: this function is not
+ // thread-safe. We make a feeble stab at making it so here, but
+ // this lock only protects against tcmalloc interfering with
+ // itself, not with other libraries calling set_new_handler.
+ std::new_handler nh;
+ {
+ SpinLockHolder h(&set_new_handler_lock);
+ nh = std::set_new_handler(0);
+ (void) std::set_new_handler(nh);
+ }
+ // If no new_handler is established, the allocation failed.
+ if (!nh) {
+ if (nothrow) return 0;
+ throw std::bad_alloc();
+ }
+ // Otherwise, try the new_handler. If it returns, retry the
+ // allocation. If it throws std::bad_alloc, fail the allocation.
+ // if it throws something else, don't interfere.
+ try {
+ (*nh)();
+ } catch (const std::bad_alloc&) {
+ if (!nothrow) throw;
+ return p;
+ }
+ } else { // allocation success
+ return p;
+ }
#endif
+ }
+}
+
+} // end unnamed namespace
//-------------------------------------------------------------------
// Exported routines
//-------------------------------------------------------------------
-// For Windows, it's not possible to override the system
-// malloc/calloc/realloc/free. Instead, we define our own version and
-// then patch the windows assembly code to have the windows code call
-// ours. This requires our functions have distinct names.
-#ifdef WIN32
-# define malloc Perftools_malloc
-# define calloc Perftools_calloc
-# define realloc Perftools_realloc
-# define free Perftools_free
-#endif
+#ifndef _WIN32 // windows doesn't allow overriding; use the do_* fns instead
// CAVEAT: The code structure below ensures that MallocHook methods are always
// called from the stack frame of the invoked allocation function.
@@ -2942,9 +974,6 @@ extern "C" {
__THROW ATTRIBUTE_SECTION(google_malloc);
}
-static void *MemalignOverride(size_t align, size_t size, const void *caller)
- __THROW ATTRIBUTE_SECTION(google_malloc);
-
void* operator new(size_t size)
ATTRIBUTE_SECTION(google_malloc);
void operator delete(void* p)
@@ -2964,6 +993,9 @@ void* operator new[](size_t size, const std::nothrow_t&)
void operator delete[](void* p, const std::nothrow_t&)
__THROW ATTRIBUTE_SECTION(google_malloc);
+static void *MemalignOverride(size_t align, size_t size, const void *caller)
+ __THROW ATTRIBUTE_SECTION(google_malloc);
+
extern "C" void* malloc(size_t size) __THROW {
void* result = do_malloc(size);
MallocHook::InvokeNewHook(result, size);
@@ -2976,15 +1008,8 @@ extern "C" void free(void* ptr) __THROW {
}
extern "C" void* calloc(size_t n, size_t elem_size) __THROW {
- // Overflow check
- const size_t size = n * elem_size;
- if (elem_size != 0 && size / elem_size != n) return NULL;
-
- void* result = do_malloc(size);
- if (result != NULL) {
- memset(result, 0, size);
- }
- MallocHook::InvokeNewHook(result, size);
+ void* result = do_calloc(n, elem_size);
+ MallocHook::InvokeNewHook(result, n * elem_size);
return result;
}
@@ -3004,100 +1029,7 @@ extern "C" void* realloc(void* old_ptr, size_t new_size) __THROW {
do_free(old_ptr);
return NULL;
}
-
- // Get the size of the old entry
- const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
- size_t cl = pageheap->GetSizeClassIfCached(p);
- Span *span = NULL;
- size_t old_size;
- if (cl == 0) {
- span = pageheap->GetDescriptor(p);
- cl = span->sizeclass;
- pageheap->CacheSizeClass(p, cl);
- }
- if (cl != 0) {
- old_size = ByteSizeForClass(cl);
- } else {
- ASSERT(span != NULL);
- old_size = span->length << kPageShift;
- }
-
- // Reallocate if the new size is larger than the old size,
- // or if the new size is significantly smaller than the old size.
- // We do hysteresis to avoid resizing ping-pongs:
- // . If we need to grow, grow to max(new_size, old_size * 1.X)
- // . Don't shrink unless new_size < old_size * 0.Y
- // X and Y trade-off time for wasted space. For now we do 1.25 and 0.5.
- const int lower_bound_to_grow = old_size + old_size / 4;
- const int upper_bound_to_shrink = old_size / 2;
- if ((new_size > old_size) || (new_size < upper_bound_to_shrink)) {
- // Need to reallocate.
- void* new_ptr = NULL;
-
- if (new_size > old_size && new_size < lower_bound_to_grow) {
- new_ptr = do_malloc(lower_bound_to_grow);
- }
- if (new_ptr == NULL) {
- // Either new_size is not a tiny increment, or last do_malloc failed.
- new_ptr = do_malloc(new_size);
- }
- if (new_ptr == NULL) {
- return NULL;
- }
- MallocHook::InvokeNewHook(new_ptr, new_size);
- memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
- MallocHook::InvokeDeleteHook(old_ptr);
- // We could use a variant of do_free() that leverages the fact
- // that we already know the sizeclass of old_ptr. The benefit
- // would be small, so don't bother.
- do_free(old_ptr);
- return new_ptr;
- } else {
- // We still need to call hooks to report the updated size:
- MallocHook::InvokeDeleteHook(old_ptr);
- MallocHook::InvokeNewHook(old_ptr, new_size);
- return old_ptr;
- }
-}
-
-static SpinLock set_new_handler_lock(SpinLock::LINKER_INITIALIZED);
-
-static inline void* cpp_alloc(size_t size, bool nothrow) {
- for (;;) {
- void* p = do_malloc(size);
-#ifdef PREANSINEW
- return p;
-#else
- if (p == NULL) { // allocation failed
- // Get the current new handler. NB: this function is not
- // thread-safe. We make a feeble stab at making it so here, but
- // this lock only protects against tcmalloc interfering with
- // itself, not with other libraries calling set_new_handler.
- std::new_handler nh;
- {
- SpinLockHolder h(&set_new_handler_lock);
- nh = std::set_new_handler(0);
- (void) std::set_new_handler(nh);
- }
- // If no new_handler is established, the allocation failed.
- if (!nh) {
- if (nothrow) return 0;
- throw std::bad_alloc();
- }
- // Otherwise, try the new_handler. If it returns, retry the
- // allocation. If it throws std::bad_alloc, fail the allocation.
- // if it throws something else, don't interfere.
- try {
- (*nh)();
- } catch (const std::bad_alloc&) {
- if (!nothrow) throw;
- return p;
- }
- } else { // allocation success
- return p;
- }
-#endif
- }
+ return do_realloc(old_ptr, new_size);
}
void* operator new(size_t size) {
@@ -3266,3 +1198,5 @@ static void *MemalignOverride(size_t align, size_t size, const void *caller)
return result;
}
void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
+
+#endif // #ifndef _WIN32