summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2019-04-30 14:56:18 +0000
committerKostya Kortchinsky <kostyak@google.com>2019-04-30 14:56:18 +0000
commit8d158e4bfb63c4792b7135d0625fcc4592dcac7b (patch)
tree04d46547b270323f9b84484cb9038e0e8105fee9
parentd201961150fe163d41c4e0bf9c296ed69780d3b2 (diff)
downloadcompiler-rt-8d158e4bfb63c4792b7135d0625fcc4592dcac7b.tar.gz
[scudo][standalone] Add the memory reclaiming mechanism
Summary: This CL implements the memory reclaiming function `releaseFreeMemoryToOS` and its associated classes. Most of this code was originally written by Aleksey for the Primary64 in sanitizer_common, and I made some changes to be able to implement 32-bit reclaiming as well. The code has be restructured a bit to accomodate for freelist of batches instead of the freearray used in the current sanitizer_common code. Reviewers: eugenis, vitalybuka, morehouse, hctim Reviewed By: vitalybuka Subscribers: srhines, mgorny, delcypher, #sanitizers, llvm-commits Tags: #llvm, #sanitizers Differential Revision: https://reviews.llvm.org/D61214 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@359567 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/scudo/standalone/CMakeLists.txt1
-rw-r--r--lib/scudo/standalone/release.h262
-rw-r--r--lib/scudo/standalone/size_class_map.h4
-rw-r--r--lib/scudo/standalone/tests/CMakeLists.txt1
-rw-r--r--lib/scudo/standalone/tests/map_test.cc2
-rw-r--r--lib/scudo/standalone/tests/release_test.cc260
6 files changed, 527 insertions, 3 deletions
diff --git a/lib/scudo/standalone/CMakeLists.txt b/lib/scudo/standalone/CMakeLists.txt
index c34910766..922f98692 100644
--- a/lib/scudo/standalone/CMakeLists.txt
+++ b/lib/scudo/standalone/CMakeLists.txt
@@ -69,6 +69,7 @@ set(SCUDO_HEADERS
list.h
mutex.h
platform.h
+ release.h
report.h
secondary.h
size_class_map.h
diff --git a/lib/scudo/standalone/release.h b/lib/scudo/standalone/release.h
new file mode 100644
index 000000000..4fe29fde4
--- /dev/null
+++ b/lib/scudo/standalone/release.h
@@ -0,0 +1,262 @@
+//===-- release.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_RELEASE_H_
+#define SCUDO_RELEASE_H_
+
+#include "common.h"
+#include "list.h"
+
+namespace scudo {
+
+class ReleaseRecorder {
+public:
+ ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr)
+ : BaseAddress(BaseAddress), Data(Data) {}
+
+ uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+ uptr getReleasedBytes() const { return ReleasedBytes; }
+
+ // Releases [From, To) range of pages back to OS.
+ void releasePageRangeToOS(uptr From, uptr To) {
+ const uptr Size = To - From;
+ releasePagesToOS(BaseAddress, From, Size, Data);
+ ReleasedRangesCount++;
+ ReleasedBytes += Size;
+ }
+
+private:
+ uptr ReleasedRangesCount = 0;
+ uptr ReleasedBytes = 0;
+ uptr BaseAddress = 0;
+ MapPlatformData *Data = nullptr;
+};
+
+// A packed array of Counters. Each counter occupies 2^N bits, enough to store
+// counter's MaxValue. Ctor will try to allocate the required Buffer via map()
+// and the caller is expected to check whether the initialization was successful
+// by checking isAllocated() result. For the performance sake, none of the
+// accessors check the validity of the arguments, It is assumed that Index is
+// always in [0, N) range and the value is not incremented past MaxValue.
+class PackedCounterArray {
+public:
+ PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) {
+ CHECK_GT(NumCounters, 0);
+ CHECK_GT(MaxValue, 0);
+ constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's Index and offset.
+ const uptr CounterSizeBits =
+ roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+ CHECK_LE(CounterSizeBits, MaxCounterBits);
+ CounterSizeBitsLog = getLog2(CounterSizeBits);
+ CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
+
+ const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
+ CHECK_GT(PackingRatio, 0);
+ PackingRatioLog = getLog2(PackingRatio);
+ BitOffsetMask = PackingRatio - 1;
+
+ BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >>
+ PackingRatioLog) *
+ sizeof(*Buffer);
+ Buffer = reinterpret_cast<uptr *>(
+ map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+ }
+ ~PackedCounterArray() {
+ if (isAllocated())
+ unmap(reinterpret_cast<void *>(Buffer), BufferSize);
+ }
+
+ bool isAllocated() const { return !!Buffer; }
+
+ uptr getCount() const { return N; }
+
+ uptr get(uptr I) const {
+ DCHECK_LT(I, N);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ return (Buffer[Index] >> BitOffset) & CounterMask;
+ }
+
+ void inc(uptr I) const {
+ DCHECK_LT(get(I), CounterMask);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ Buffer[Index] += static_cast<uptr>(1U) << BitOffset;
+ }
+
+ void incRange(uptr From, uptr To) const {
+ DCHECK_LE(From, To);
+ for (uptr I = From; I <= To; I++)
+ inc(I);
+ }
+
+ uptr getBufferSize() const { return BufferSize; }
+
+private:
+ const uptr N;
+ uptr CounterSizeBitsLog;
+ uptr CounterMask;
+ uptr PackingRatioLog;
+ uptr BitOffsetMask;
+
+ uptr BufferSize;
+ uptr *Buffer;
+};
+
+template <class ReleaseRecorderT> class FreePagesRangeTracker {
+public:
+ explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
+ : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
+
+ void processNextPage(bool Freed) {
+ if (Freed) {
+ if (!InRange) {
+ CurrentRangeStatePage = CurrentPage;
+ InRange = true;
+ }
+ } else {
+ closeOpenedRange();
+ }
+ CurrentPage++;
+ }
+
+ void finish() { closeOpenedRange(); }
+
+private:
+ void closeOpenedRange() {
+ if (InRange) {
+ Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+ (CurrentPage << PageSizeLog));
+ InRange = false;
+ }
+ }
+
+ ReleaseRecorderT *const Recorder;
+ const uptr PageSizeLog;
+ bool InRange = false;
+ uptr CurrentPage = 0;
+ uptr CurrentRangeStatePage = 0;
+};
+
+template <class TransferBatchT, class ReleaseRecorderT>
+NOINLINE void
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base,
+ uptr AllocatedPagesCount, uptr BlockSize,
+ ReleaseRecorderT *Recorder) {
+ const uptr PageSize = getPageSizeCached();
+
+ // Figure out the number of chunks per page and whether we can take a fast
+ // path (the number of chunks per page is the same for all pages).
+ uptr FullPagesBlockCountMax;
+ bool SameBlockCountPerPage;
+ if (BlockSize <= PageSize) {
+ if (PageSize % BlockSize == 0) {
+ // Same number of chunks per page, no cross overs.
+ FullPagesBlockCountMax = PageSize / BlockSize;
+ SameBlockCountPerPage = true;
+ } else if (BlockSize % (PageSize % BlockSize) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 2;
+ SameBlockCountPerPage = false;
+ }
+ } else {
+ if (BlockSize % PageSize == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ FullPagesBlockCountMax = 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ FullPagesBlockCountMax = 2;
+ SameBlockCountPerPage = false;
+ }
+ }
+
+ PackedCounterArray Counters(AllocatedPagesCount, FullPagesBlockCountMax);
+ if (!Counters.isAllocated())
+ return;
+
+ const uptr PageSizeLog = getLog2(PageSize);
+ const uptr End = Base + AllocatedPagesCount * PageSize;
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+ // Each chunk affects one page only.
+ for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+ for (u32 I = 0; I < (*It).getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ if (P >= Base && P < End)
+ Counters.inc((P - Base) >> PageSizeLog);
+ }
+ }
+ } else {
+ // In all other cases chunks might affect more than one page.
+ for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+ for (u32 I = 0; I < (*It).getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ if (P >= Base && P < End)
+ Counters.incRange((P - Base) >> PageSizeLog,
+ (P - Base + BlockSize - 1) >> PageSizeLog);
+ }
+ }
+ }
+
+ // Iterate over pages detecting ranges of pages with chunk Counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
+ if (SameBlockCountPerPage) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr I = 0; I < Counters.getCount(); I++)
+ RangeTracker.processNextPage(Counters.get(I) == FullPagesBlockCountMax);
+ } else {
+ // Slow path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
+ const uptr Pnc = Pn * BlockSize;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ uptr PrevPageBoundary = 0;
+ uptr CurrentBoundary = 0;
+ for (uptr I = 0; I < Counters.getCount(); I++) {
+ const uptr PageBoundary = PrevPageBoundary + PageSize;
+ uptr BlocksPerPage = Pn;
+ if (CurrentBoundary < PageBoundary) {
+ if (CurrentBoundary > PrevPageBoundary)
+ BlocksPerPage++;
+ CurrentBoundary += Pnc;
+ if (CurrentBoundary < PageBoundary) {
+ BlocksPerPage++;
+ CurrentBoundary += BlockSize;
+ }
+ }
+ PrevPageBoundary = PageBoundary;
+
+ RangeTracker.processNextPage(Counters.get(I) == BlocksPerPage);
+ }
+ }
+ RangeTracker.finish();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_RELEASE_H_
diff --git a/lib/scudo/standalone/size_class_map.h b/lib/scudo/standalone/size_class_map.h
index 50320701b..b7df54cf8 100644
--- a/lib/scudo/standalone/size_class_map.h
+++ b/lib/scudo/standalone/size_class_map.h
@@ -31,8 +31,8 @@ namespace scudo {
//
// This class also gives a hint to a thread-caching allocator about the amount
// of chunks that can be cached per-thread:
-// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
-// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
+// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
+// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog,
u32 MaxNumCachedHintT, u8 MaxBytesCachedLog>
diff --git a/lib/scudo/standalone/tests/CMakeLists.txt b/lib/scudo/standalone/tests/CMakeLists.txt
index 5fcf67903..75c67263b 100644
--- a/lib/scudo/standalone/tests/CMakeLists.txt
+++ b/lib/scudo/standalone/tests/CMakeLists.txt
@@ -56,6 +56,7 @@ set(SCUDO_UNIT_TEST_SOURCES
list_test.cc
map_test.cc
mutex_test.cc
+ release_test.cc
report_test.cc
secondary_test.cc
size_class_map_test.cc
diff --git a/lib/scudo/standalone/tests/map_test.cc b/lib/scudo/standalone/tests/map_test.cc
index dbf67cb45..7c726e947 100644
--- a/lib/scudo/standalone/tests/map_test.cc
+++ b/lib/scudo/standalone/tests/map_test.cc
@@ -12,7 +12,7 @@
#include <string.h>
-const char *MappingName = "scudo:test";
+static const char *MappingName = "scudo:test";
TEST(ScudoMapTest, MapNoAccessUnmap) {
const scudo::uptr Size = 4 * scudo::getPageSizeCached();
diff --git a/lib/scudo/standalone/tests/release_test.cc b/lib/scudo/standalone/tests/release_test.cc
new file mode 100644
index 000000000..2279d5d15
--- /dev/null
+++ b/lib/scudo/standalone/tests/release_test.cc
@@ -0,0 +1,260 @@
+//===-- release_test.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "list.h"
+#include "release.h"
+#include "size_class_map.h"
+
+#include "gtest/gtest.h"
+
+#include <string.h>
+
+#include <algorithm>
+#include <random>
+
+TEST(ScudoReleaseTest, PackedCounterArray) {
+ for (scudo::uptr I = 0; I < SCUDO_WORDSIZE; I++) {
+ // Various valid counter's max values packed into one word.
+ scudo::PackedCounterArray Counters2N(1, 1UL << I);
+ EXPECT_EQ(sizeof(scudo::uptr), Counters2N.getBufferSize());
+ // Check the "all bit set" values too.
+ scudo::PackedCounterArray Counters2N1_1(1, ~0UL >> I);
+ EXPECT_EQ(sizeof(scudo::uptr), Counters2N1_1.getBufferSize());
+ // Verify the packing ratio, the counter is Expected to be packed into the
+ // closest power of 2 bits.
+ scudo::PackedCounterArray Counters(SCUDO_WORDSIZE, 1UL << I);
+ EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpToPowerOfTwo(I + 1),
+ Counters.getBufferSize());
+ }
+
+ // Go through 1, 2, 4, 8, .. {32,64} bits per counter.
+ for (scudo::uptr I = 0; (SCUDO_WORDSIZE >> I) != 0; I++) {
+ // Make sure counters request one memory page for the buffer.
+ const scudo::uptr NumCounters =
+ (scudo::getPageSizeCached() / 8) * (SCUDO_WORDSIZE >> I);
+ scudo::PackedCounterArray Counters(NumCounters, 1UL << ((1UL << I) - 1));
+ Counters.inc(0);
+ for (scudo::uptr C = 1; C < NumCounters - 1; C++) {
+ EXPECT_EQ(0UL, Counters.get(C));
+ Counters.inc(C);
+ EXPECT_EQ(1UL, Counters.get(C - 1));
+ }
+ EXPECT_EQ(0UL, Counters.get(NumCounters - 1));
+ Counters.inc(NumCounters - 1);
+ if (I > 0) {
+ Counters.incRange(0, NumCounters - 1);
+ for (scudo::uptr C = 0; C < NumCounters; C++)
+ EXPECT_EQ(2UL, Counters.get(C));
+ }
+ }
+}
+
+class StringRangeRecorder {
+public:
+ std::string ReportedPages;
+
+ StringRangeRecorder()
+ : PageSizeScaledLog(scudo::getLog2(scudo::getPageSizeCached())) {}
+
+ void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
+ From >>= PageSizeScaledLog;
+ To >>= PageSizeScaledLog;
+ EXPECT_LT(From, To);
+ if (!ReportedPages.empty())
+ EXPECT_LT(LastPageReported, From);
+ ReportedPages.append(From - LastPageReported, '.');
+ ReportedPages.append(To - From, 'x');
+ LastPageReported = To;
+ }
+
+private:
+ const scudo::uptr PageSizeScaledLog;
+ scudo::uptr LastPageReported = 0;
+};
+
+TEST(ScudoReleaseTest, FreePagesRangeTracker) {
+ // 'x' denotes a page to be released, '.' denotes a page to be kept around.
+ const char *TestCases[] = {
+ "",
+ ".",
+ "x",
+ "........",
+ "xxxxxxxxxxx",
+ "..............xxxxx",
+ "xxxxxxxxxxxxxxxxxx.....",
+ "......xxxxxxxx........",
+ "xxx..........xxxxxxxxxxxxxxx",
+ "......xxxx....xxxx........",
+ "xxx..........xxxxxxxx....xxxxxxx",
+ "x.x.x.x.x.x.x.x.x.x.x.x.",
+ ".x.x.x.x.x.x.x.x.x.x.x.x",
+ ".x.x.x.x.x.x.x.x.x.x.x.x.",
+ "x.x.x.x.x.x.x.x.x.x.x.x.x",
+ };
+ typedef scudo::FreePagesRangeTracker<StringRangeRecorder> RangeTracker;
+
+ for (auto TestCase : TestCases) {
+ StringRangeRecorder Recorder;
+ RangeTracker Tracker(&Recorder);
+ for (scudo::uptr I = 0; TestCase[I] != 0; I++)
+ Tracker.processNextPage(TestCase[I] == 'x');
+ Tracker.finish();
+ // Strip trailing '.'-pages before comparing the results as they are not
+ // going to be reported to range_recorder anyway.
+ const char *LastX = strrchr(TestCase, 'x');
+ std::string Expected(TestCase,
+ LastX == nullptr ? 0 : (LastX - TestCase + 1));
+ EXPECT_STREQ(Expected.c_str(), Recorder.ReportedPages.c_str());
+ }
+}
+
+class ReleasedPagesRecorder {
+public:
+ std::set<scudo::uptr> ReportedPages;
+
+ void releasePageRangeToOS(scudo::uptr From, scudo::uptr To) {
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ for (scudo::uptr I = From; I < To; I += PageSize)
+ ReportedPages.insert(I);
+ }
+};
+
+// Simplified version of a TransferBatch.
+template <class SizeClassMap> struct FreeBatch {
+ static const scudo::u32 MaxCount = SizeClassMap::MaxNumCachedHint;
+ void clear() { Count = 0; }
+ void add(scudo::uptr P) {
+ DCHECK_LT(Count, MaxCount);
+ Batch[Count++] = P;
+ }
+ scudo::u32 getCount() const { return Count; }
+ scudo::uptr get(scudo::u32 I) const {
+ DCHECK_LE(I, Count);
+ return Batch[I];
+ }
+ FreeBatch *Next;
+
+private:
+ scudo::u32 Count;
+ scudo::uptr Batch[MaxCount];
+};
+
+template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
+ typedef FreeBatch<SizeClassMap> Batch;
+ const scudo::uptr AllocatedPagesCount = 1024;
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ std::mt19937 R;
+ scudo::u32 RandState = 42;
+
+ for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) {
+ const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I);
+ const scudo::uptr MaxBlocks = AllocatedPagesCount * PageSize / BlockSize;
+
+ // Generate the random free list.
+ std::vector<scudo::uptr> FreeArray;
+ bool InFreeRange = false;
+ scudo::uptr CurrentRangeEnd = 0;
+ for (scudo::uptr I = 0; I < MaxBlocks; I++) {
+ if (I == CurrentRangeEnd) {
+ InFreeRange = (scudo::getRandomU32(&RandState) & 1U) == 1;
+ CurrentRangeEnd += (scudo::getRandomU32(&RandState) & 0x7f) + 1;
+ }
+ if (InFreeRange)
+ FreeArray.push_back(I * BlockSize);
+ }
+ if (FreeArray.empty())
+ continue;
+ // Shuffle the array to ensure that the order is irrelevant.
+ std::shuffle(FreeArray.begin(), FreeArray.end(), R);
+
+ // Build the FreeList from the FreeArray.
+ scudo::IntrusiveList<Batch> FreeList;
+ FreeList.clear();
+ Batch *CurrentBatch = nullptr;
+ for (auto const &Block : FreeArray) {
+ if (!CurrentBatch) {
+ CurrentBatch = new Batch;
+ CurrentBatch->clear();
+ FreeList.push_back(CurrentBatch);
+ }
+ CurrentBatch->add(Block);
+ if (CurrentBatch->getCount() == Batch::MaxCount)
+ CurrentBatch = nullptr;
+ }
+
+ // Release the memory.
+ ReleasedPagesRecorder Recorder;
+ releaseFreeMemoryToOS(&FreeList, 0, AllocatedPagesCount, BlockSize,
+ &Recorder);
+
+ // Verify that there are no released pages touched by used chunks and all
+ // ranges of free chunks big enough to contain the entire memory pages had
+ // these pages released.
+ scudo::uptr VerifiedReleasedPages = 0;
+ std::set<scudo::uptr> FreeBlocks(FreeArray.begin(), FreeArray.end());
+
+ scudo::uptr CurrentBlock = 0;
+ InFreeRange = false;
+ scudo::uptr CurrentFreeRangeStart = 0;
+ for (scudo::uptr I = 0; I <= MaxBlocks; I++) {
+ const bool IsFreeBlock =
+ FreeBlocks.find(CurrentBlock) != FreeBlocks.end();
+ if (IsFreeBlock) {
+ if (!InFreeRange) {
+ InFreeRange = true;
+ CurrentFreeRangeStart = CurrentBlock;
+ }
+ } else {
+ // Verify that this used chunk does not touch any released page.
+ const scudo::uptr StartPage = CurrentBlock / PageSize;
+ const scudo::uptr EndPage = (CurrentBlock + BlockSize - 1) / PageSize;
+ for (scudo::uptr J = StartPage; J <= EndPage; J++) {
+ const bool PageReleased = Recorder.ReportedPages.find(J * PageSize) !=
+ Recorder.ReportedPages.end();
+ EXPECT_EQ(false, PageReleased);
+ }
+
+ if (InFreeRange) {
+ InFreeRange = false;
+ // Verify that all entire memory pages covered by this range of free
+ // chunks were released.
+ scudo::uptr P = scudo::roundUpTo(CurrentFreeRangeStart, PageSize);
+ while (P + PageSize <= CurrentBlock) {
+ const bool PageReleased =
+ Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end();
+ EXPECT_EQ(true, PageReleased);
+ VerifiedReleasedPages++;
+ P += PageSize;
+ }
+ }
+ }
+
+ CurrentBlock += BlockSize;
+ }
+
+ EXPECT_EQ(Recorder.ReportedPages.size(), VerifiedReleasedPages);
+
+ while (!FreeList.empty()) {
+ CurrentBatch = FreeList.front();
+ FreeList.pop_front();
+ delete CurrentBatch;
+ }
+ }
+}
+
+TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSDefault) {
+ testReleaseFreeMemoryToOS<scudo::DefaultSizeClassMap>();
+}
+
+TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSAndroid) {
+ testReleaseFreeMemoryToOS<scudo::AndroidSizeClassMap>();
+}
+
+TEST(ScudoReleaseTest, ReleaseFreeMemoryToOSSvelte) {
+ testReleaseFreeMemoryToOS<scudo::SvelteSizeClassMap>();
+}