summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoralkondratenko@gmail.com <alkondratenko@gmail.com@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2013-05-06 19:49:48 +0000
committeralkondratenko@gmail.com <alkondratenko@gmail.com@6b5cf1ce-ec42-a296-1ba9-69fdba395a50>2013-05-06 19:49:48 +0000
commit4fd762cead660d4661359ad507083ac4f4967ee4 (patch)
tree7ffbe5df1429ac095e04c75678cee34e260d8bb7
parent99fe9944de32046fd954399e60415fba7d03eeb0 (diff)
downloadgperftools-4fd762cead660d4661359ad507083ac4f4967ee4.tar.gz
issue-368: unmap free spans and retry before growing heap
Because unmapped spans are not coalesced with normal spans it's possible that we indeed have a large enough free span, but we fail to see that because we always consider unmapped and normal spans separately. That behavior is more likely for larger spans. In order to protect programs that grow heap frequently and by small amounts from much more frequent minor page faults, there's limit of running that force pages unmap path once per 128 megs of heap growth. git-svn-id: http://gperftools.googlecode.com/svn/trunk@210 6b5cf1ce-ec42-a296-1ba9-69fdba395a50
-rw-r--r--src/page_heap.cc34
1 files changed, 34 insertions, 0 deletions
diff --git a/src/page_heap.cc b/src/page_heap.cc
index 8f0e967..fab6ff2 100644
--- a/src/page_heap.cc
+++ b/src/page_heap.cc
@@ -108,6 +108,8 @@ Span* PageHeap::SearchFreeAndLargeLists(Length n) {
return AllocLarge(n); // May be NULL
}
+static const size_t kForcedCoalesceInterval = 128*1024*1024;
+
Span* PageHeap::New(Length n) {
ASSERT(Check());
ASSERT(n > 0);
@@ -116,6 +118,38 @@ Span* PageHeap::New(Length n) {
if (result != NULL)
return result;
+ if (stats_.free_bytes != 0 && stats_.unmapped_bytes != 0
+ && stats_.free_bytes + stats_.unmapped_bytes >= stats_.system_bytes / 4
+ && (stats_.system_bytes / kForcedCoalesceInterval
+ != (stats_.system_bytes + (n << kPageShift)) / kForcedCoalesceInterval)) {
+ // We're about to grow heap, but there are lots of free pages.
+ // tcmalloc's design decision to keep unmapped and free spans
+ // separately and never coalesce them means that sometimes there
+ // can be free pages span of sufficient size, but it consists of
+ // "segments" of different type so page heap search cannot find
+ // it. In order to prevent growing heap and wasting memory in such
+ // case we're going to unmap all free pages. So that all free
+ // spans are maximally coalesced.
+ //
+ // We're also limiting 'rate' of going into this path to be at
+ // most once per 128 megs of heap growth. Otherwise programs that
+ // grow heap frequently (and that means by small amount) could be
+ // penalized with higher count of minor page faults.
+ //
+ // See also large_heap_fragmentation_unittest.cc and
+ // https://code.google.com/p/gperftools/issues/detail?id=368
+ ReleaseAtLeastNPages(static_cast<Length>(0x7fffffff));
+
+ // then try again. If we are forced to grow heap because of large
+ // spans fragmentation and not because of problem described above,
+ // then at the very least we've just unmapped free but
+ // insufficiently big large spans back to OS. So in case of really
+ // unlucky memory fragmentation we'll be consuming virtual address
+ // space, but not real memory
+ result = SearchFreeAndLargeLists(n);
+ if (result != NULL) return result;
+ }
+
// Grow the heap and try again.
if (!GrowHeap(n)) {
ASSERT(Check());