summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoralkondratenko@gmail.com <alkondratenko@gmail.com@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2013-05-06 19:50:23 +0000
committeralkondratenko@gmail.com <alkondratenko@gmail.com@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2013-05-06 19:50:23 +0000
commitf25ac4421f9efb635b88105e9c0830293ce19f9d (patch)
tree7b5d153fff6464875f4b52e7c5e20ee509036079
parent4fd762cead660d4661359ad507083ac4f4967ee4 (diff)
downloadgperftools-f25ac4421f9efb635b88105e9c0830293ce19f9d.tar.gz
issue-{368,443}: allocate metadata in big batches
It uses same approach as PageHeapAllocator. Namely allocates big chunk which is then used to satisfy smaller allocations. In issue-443 gradual heap grows causes old code that allocates metadata in smaller pieces and thus more frequently to fragment the heap. It's also causing most of 368 heap fragmentation too. New code allocates 8 megs of address space at once for metadata allocations. Most OSes will allocate actual memory only when corresponding pages are touched. Thus this change should not cause increased memory usage. I've also made sure metadata is always properly aligned in case we ever allocate something that breaks natural alignment. E.g. strings. git-svn-id: http://gperftools.googlecode.com/svn/trunk@211 6b5cf1ce-ec42-a296-1ba9-69fdba395a50
-rw-r--r--src/common.cc53
1 files changed, 49 insertions, 4 deletions
diff --git a/src/common.cc b/src/common.cc
index 972e56f..cd1e013 100644
--- a/src/common.cc
+++ b/src/common.cc
@@ -34,6 +34,7 @@
#include "config.h"
#include "common.h"
#include "system-alloc.h"
+#include "base/spinlock.h"
namespace tcmalloc {
@@ -211,12 +212,56 @@ void SizeMap::Init() {
// Metadata allocator -- keeps stats about how many bytes allocated.
static uint64_t metadata_system_bytes_ = 0;
+static const size_t kMetadataAllocChunkSize = 8*1024*1024;
+static const size_t kMetadataBigAllocThreshold = kMetadataAllocChunkSize / 8;
+// usually malloc uses larger alignments, but because metadata cannot
+// have and fancy simd types, aligning on pointer size seems fine
+static const size_t kMetadataAllignment = sizeof(void *);
+
+static char *metadata_chunk_alloc_;
+static size_t metadata_chunk_avail_;
+
+static SpinLock metadata_alloc_lock(SpinLock::LINKER_INITIALIZED);
+
void* MetaDataAlloc(size_t bytes) {
- void* result = TCMalloc_SystemAlloc(bytes, NULL);
- if (result != NULL) {
- metadata_system_bytes_ += bytes;
+ if (bytes >= kMetadataAllocChunkSize) {
+ void *rv = TCMalloc_SystemAlloc(kMetadataBigAllocThreshold,
+ NULL, kMetadataAllignment);
+ if (rv != NULL) {
+ metadata_system_bytes_ += bytes;
+ }
+ return rv;
+ }
+
+ SpinLockHolder h(&metadata_alloc_lock);
+
+ // the following works by essentially turning address to integer of
+ // log_2 kMetadataAllignment size and negating it. I.e. negated
+ // value + original value gets 0 and that's what we want modulo
+ // kMetadataAllignment. Note, we negate before masking higher bits
+ // off, otherwise we'd have to mask them off after negation anyways.
+ intptr_t alignment = -reinterpret_cast<intptr_t>(metadata_chunk_alloc_) & (kMetadataAllignment-1);
+
+ if (metadata_chunk_avail_ < bytes + alignment) {
+ size_t real_size;
+ void *ptr = TCMalloc_SystemAlloc(kMetadataAllocChunkSize,
+ &real_size, kMetadataAllignment);
+ if (ptr == NULL) {
+ return NULL;
+ }
+
+ metadata_chunk_alloc_ = static_cast<char *>(ptr);
+ metadata_chunk_avail_ = real_size;
+
+ alignment = 0;
}
- return result;
+
+ void *rv = static_cast<void *>(metadata_chunk_alloc_ + alignment);
+ bytes += alignment;
+ metadata_chunk_alloc_ += bytes;
+ metadata_chunk_avail_ -= bytes;
+ metadata_system_bytes_ += bytes;
+ return rv;
}
uint64_t metadata_system_bytes() { return metadata_system_bytes_; }