summaryrefslogtreecommitdiff
path: root/src/common.cc
diff options
context:
space:
mode:
authorcsilvers <csilvers@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2008-12-13 01:35:42 +0000
committercsilvers <csilvers@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2008-12-13 01:35:42 +0000
commit6fa2a2574ce1c15ac12293e24691d69a41972e54 (patch)
tree606da4a80de5c91721969ade70c4d9a87cb1604a /src/common.cc
parent16191f87ff8dc78295c0f617060460664fc444bd (diff)
downloadgperftools-6fa2a2574ce1c15ac12293e24691d69a41972e54.tar.gz
Thu Dec 11 16:01:32 2008 Google Inc. <opensource@google.com>
* google-perftools: version 1.0rc1 release * Replace API for selectively disabling heap-checker in code (sanjay) * Add a pre-mmap hook (daven, adlr) * Add MallocExtension interface to set memory-releasing rate (fikes) * Augment pprof to allow any string ending in /pprof/profile (csilvers) * PORTING: Rewrite -- and fix -- malloc patching for windows (dvitek) * PORTING: Add nm-pdb and addr2line-pdb for use by pprof (dvitek) * PORTING: Improve cygwin and mingw support (jperkins, csilvers) * PORTING: Fix pprof for mac os x, other pprof improvements (csilvers) * PORTING: Fix some PPC bugs in our locking code (anton.blanchard) * A new unittest, smapling_test, to verify tcmalloc-profiles (csilvers) * Turn off TLS for gcc < 4.1.2, due to a TLS + -fPIC bug (csilvers) * Prefer __builtin_frame_address to assembly for stacktraces (nlewycky) * Separate tcmalloc.cc out into multiple files -- finally! (kash) * Make our locking code work with -fPIC on 32-bit x86 (aruns) * Fix an initialization-ordering bug for tcmalloc/profiling (csilvers) * Use "initial exec" model of TLS to speed up tcmalloc (csilvers) * Enforce 16-byte alignment for tcmalloc, for SSE (sanjay) git-svn-id: http://gperftools.googlecode.com/svn/trunk@60 6b5cf1ce-ec42-a296-1ba9-69fdba395a50
Diffstat (limited to 'src/common.cc')
-rw-r--r--src/common.cc211
1 files changed, 211 insertions, 0 deletions
diff --git a/src/common.cc b/src/common.cc
new file mode 100644
index 0000000..fabcab1
--- /dev/null
+++ b/src/common.cc
@@ -0,0 +1,211 @@
+// Copyright (c) 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// ---
+// Author: Sanjay Ghemawat <opensource@google.com>
+
+#include "system-alloc.h"
+#include "config.h"
+#include "common.h"
+
+namespace tcmalloc {
+
+// Note: the following only works for "n"s that fit in 32-bits, but
+// that is fine since we only use it for small sizes.
+static inline int LgFloor(size_t n) {
+ int log = 0;
+ for (int i = 4; i >= 0; --i) {
+ int shift = (1 << i);
+ size_t x = n >> shift;
+ if (x != 0) {
+ n = x;
+ log += shift;
+ }
+ }
+ ASSERT(n == 1);
+ return log;
+}
+
+int SizeMap::NumMoveSize(size_t size) {
+ if (size == 0) return 0;
+ // Use approx 64k transfers between thread and central caches.
+ int num = static_cast<int>(64.0 * 1024.0 / size);
+ if (num < 2) num = 2;
+ // Clamp well below kMaxFreeListLength to avoid ping pong between central
+ // and thread caches.
+ if (num > static_cast<int>(0.8 * kMaxFreeListLength))
+ num = static_cast<int>(0.8 * kMaxFreeListLength);
+
+ // Also, avoid bringing in too many objects into small object free
+ // lists. There are lots of such lists, and if we allow each one to
+ // fetch too many at a time, we end up having to scavenge too often
+ // (especially when there are lots of threads and each thread gets a
+ // small allowance for its thread cache). This fixes a problem seen
+ // in the 20060502 bigtable release by both Andrew Fikes and Ben
+ // Darnell.
+ //
+ // TODO: Make thread cache free list sizes dynamic so that we do not
+ // have to equally divide a fixed resource amongst lots of threads.
+ if (num > 32) num = 32;
+
+ return num;
+}
+
+// Initialize the mapping arrays
+void SizeMap::Init() {
+ // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
+ if (ClassIndex(0) < 0) {
+ CRASH("Invalid class index %d for size 0\n", ClassIndex(0));
+ }
+ if (ClassIndex(kMaxSize) >= sizeof(class_array_)) {
+ CRASH("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
+ }
+
+ // Compute the size classes we want to use
+ int sc = 1; // Next size class to assign
+ int alignment = kAlignment;
+ CHECK_CONDITION(kAlignment <= 16);
+ int last_lg = -1;
+ for (size_t size = kAlignment; size <= kMaxSize; size += alignment) {
+ int lg = LgFloor(size);
+ if (lg > last_lg) {
+ // Increase alignment every so often to reduce number of size classes.
+ if (size >= 2048) {
+ // Cap alignment at 256 for large sizes
+ alignment = 256;
+ } else if (size >= 128) {
+ // Space wasted due to alignment is at most 1/8, i.e., 12.5%.
+ alignment = size / 8;
+ } else if (size >= 16) {
+ // We need an alignment of at least 16 bytes to satisfy
+ // requirements for some SSE types.
+ alignment = 16;
+ }
+ CHECK_CONDITION(size < 16 || alignment >= 16);
+ CHECK_CONDITION((alignment & (alignment - 1)) == 0);
+ last_lg = lg;
+ }
+ CHECK_CONDITION((size % alignment) == 0);
+
+ // Allocate enough pages so leftover is less than 1/8 of total.
+ // This bounds wasted space to at most 12.5%.
+ size_t psize = kPageSize;
+ while ((psize % size) > (psize >> 3)) {
+ psize += kPageSize;
+ }
+ const size_t my_pages = psize >> kPageShift;
+
+ if (sc > 1 && my_pages == class_to_pages_[sc-1]) {
+ // See if we can merge this into the previous class without
+ // increasing the fragmentation of the previous class.
+ const size_t my_objects = (my_pages << kPageShift) / size;
+ const size_t prev_objects = (class_to_pages_[sc-1] << kPageShift)
+ / class_to_size_[sc-1];
+ if (my_objects == prev_objects) {
+ // Adjust last class to include this size
+ class_to_size_[sc-1] = size;
+ continue;
+ }
+ }
+
+ // Add new class
+ class_to_pages_[sc] = my_pages;
+ class_to_size_[sc] = size;
+ sc++;
+ }
+ if (sc != kNumClasses) {
+ CRASH("wrong number of size classes: found %d instead of %d\n",
+ sc, int(kNumClasses));
+ }
+
+ // Initialize the mapping arrays
+ int next_size = 0;
+ for (int c = 1; c < kNumClasses; c++) {
+ const int max_size_in_class = class_to_size_[c];
+ for (int s = next_size; s <= max_size_in_class; s += kAlignment) {
+ class_array_[ClassIndex(s)] = c;
+ }
+ next_size = max_size_in_class + kAlignment;
+ }
+
+ // Double-check sizes just to be safe
+ for (size_t size = 0; size <= kMaxSize; size++) {
+ const int sc = SizeClass(size);
+ if (sc <= 0 || sc >= kNumClasses) {
+ CRASH("Bad size class %d for %" PRIuS "\n", sc, size);
+ }
+ if (sc > 1 && size <= class_to_size_[sc-1]) {
+ CRASH("Allocating unnecessarily large class %d for %" PRIuS
+ "\n", sc, size);
+ }
+ const size_t s = class_to_size_[sc];
+ if (size > s) {
+ CRASH("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
+ }
+ if (s == 0) {
+ CRASH("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
+ }
+ }
+
+ // Initialize the num_objects_to_move array.
+ for (size_t cl = 1; cl < kNumClasses; ++cl) {
+ num_objects_to_move_[cl] = NumMoveSize(ByteSizeForClass(cl));
+ }
+}
+
+void SizeMap::Dump() {
+ // Dump class sizes and maximum external wastage per size class
+ for (size_t cl = 1; cl < kNumClasses; ++cl) {
+ const int alloc_size = class_to_pages_[cl] << kPageShift;
+ const int alloc_objs = alloc_size / class_to_size_[cl];
+ const int min_used = (class_to_size_[cl-1] + 1) * alloc_objs;
+ const int max_waste = alloc_size - min_used;
+ MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
+ int(cl),
+ int(class_to_size_[cl-1] + 1),
+ int(class_to_size_[cl]),
+ int(class_to_pages_[cl] << kPageShift),
+ max_waste * 100.0 / alloc_size
+ );
+ }
+}
+
+// Metadata allocator -- keeps stats about how many bytes allocated.
+static uint64_t metadata_system_bytes_ = 0;
+void* MetaDataAlloc(size_t bytes) {
+ void* result = TCMalloc_SystemAlloc(bytes, NULL);
+ if (result != NULL) {
+ metadata_system_bytes_ += bytes;
+ }
+ return result;
+}
+
+uint64_t metadata_system_bytes() { return metadata_system_bytes_; }
+
+} // namespace tcmalloc