summaryrefslogtreecommitdiff
path: root/lib/scudo
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2019-06-11 19:50:12 +0000
committerKostya Kortchinsky <kostyak@google.com>2019-06-11 19:50:12 +0000
commit7bc6ce1b225a77ebb2707922ae863a2ce223eabc (patch)
tree2e93b31093ff722b647b1a2bc0d9a3db88655a29 /lib/scudo
parentdd1051d9cd25f713c3b050c157bfe1ddf44ffb84 (diff)
downloadcompiler-rt-7bc6ce1b225a77ebb2707922ae863a2ce223eabc.tar.gz
[scudo][standalone] Unmap memory in tests
Summary: The more tests are added, the more we are limited by the size of the address space on 32-bit. Implement `unmapTestOnly` all around (like it is in sanitzer_common) to be able to free up some memory. This is not intended to be a proper "destructor" for an allocator, but allows us to not fail due to having no memory left. Reviewers: morehouse, vitalybuka, eugenis, hctim Reviewed By: morehouse Subscribers: delcypher, jfb, #sanitizers, llvm-commits Tags: #llvm, #sanitizers Differential Revision: https://reviews.llvm.org/D63146 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@363095 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/scudo')
-rw-r--r--lib/scudo/standalone/bytemap.h8
-rw-r--r--lib/scudo/standalone/primary32.h11
-rw-r--r--lib/scudo/standalone/primary64.h6
-rw-r--r--lib/scudo/standalone/tests/bytemap_test.cc5
-rw-r--r--lib/scudo/standalone/tests/primary_test.cc29
-rw-r--r--lib/scudo/standalone/tests/tsd_test.cc22
-rw-r--r--lib/scudo/standalone/tsd_exclusive.h4
-rw-r--r--lib/scudo/standalone/tsd_shared.h5
8 files changed, 74 insertions, 16 deletions
diff --git a/lib/scudo/standalone/bytemap.h b/lib/scudo/standalone/bytemap.h
index 2c8ba1fd0..ab0091936 100644
--- a/lib/scudo/standalone/bytemap.h
+++ b/lib/scudo/standalone/bytemap.h
@@ -22,6 +22,8 @@ public:
}
void init() { initLinkerInitialized(); }
+ void unmapTestOnly() { unmap(reinterpret_cast<void *>(Map), Size); }
+
void set(uptr Index, u8 Value) {
DCHECK_LT(Index, Size);
DCHECK_EQ(0U, Map[Index]);
@@ -57,6 +59,12 @@ public:
memset(Level1Map, 0, sizeof(atomic_uptr) * Level1Size);
}
+ void unmapTestOnly() {
+ reset();
+ unmap(reinterpret_cast<void *>(Level1Map),
+ sizeof(atomic_uptr) * Level1Size);
+ }
+
uptr size() const { return Level1Size * Level2Size; }
void set(uptr Index, u8 Value) {
diff --git a/lib/scudo/standalone/primary32.h b/lib/scudo/standalone/primary32.h
index 066e63749..e89409d10 100644
--- a/lib/scudo/standalone/primary32.h
+++ b/lib/scudo/standalone/primary32.h
@@ -83,6 +83,17 @@ public:
initLinkerInitialized(ReleaseToOsInterval);
}
+ void unmapTestOnly() {
+ while (NumberOfStashedRegions > 0)
+ unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+ RegionSize);
+ // TODO(kostyak): unmap the TransferBatch regions as well.
+ for (uptr I = 0; I < NumRegions; I++)
+ if (PossibleRegions[I])
+ unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
+ PossibleRegions.unmapTestOnly();
+ }
+
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
diff --git a/lib/scudo/standalone/primary64.h b/lib/scudo/standalone/primary64.h
index 44e9b9c47..9c50e78d3 100644
--- a/lib/scudo/standalone/primary64.h
+++ b/lib/scudo/standalone/primary64.h
@@ -91,6 +91,12 @@ public:
initLinkerInitialized(ReleaseToOsInterval);
}
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
+ unmap(reinterpret_cast<void *>(RegionInfoArray),
+ sizeof(RegionInfo) * NumClasses);
+ }
+
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
RegionInfo *Region = getRegionInfo(ClassId);
diff --git a/lib/scudo/standalone/tests/bytemap_test.cc b/lib/scudo/standalone/tests/bytemap_test.cc
index 615b946c5..ea34dc0ea 100644
--- a/lib/scudo/standalone/tests/bytemap_test.cc
+++ b/lib/scudo/standalone/tests/bytemap_test.cc
@@ -28,13 +28,14 @@ TEST(ScudoByteMapTest, FlatByteMap) {
const scudo::uptr Size = 1U << 10;
scudo::FlatByteMap<Size> Map;
testMap(Map, Size);
+ Map.unmapTestOnly();
}
TEST(ScudoByteMapTest, TwoLevelByteMap) {
const scudo::uptr Size1 = 1U << 6, Size2 = 1U << 12;
scudo::TwoLevelByteMap<Size1, Size2> Map;
testMap(Map, Size1 * Size2);
- Map.reset();
+ Map.unmapTestOnly();
}
using TestByteMap = scudo::TwoLevelByteMap<1U << 12, 1U << 13>;
@@ -69,5 +70,5 @@ TEST(ScudoByteMapTest, ThreadedTwoLevelByteMap) {
}
for (scudo::uptr I = 0; I < NumberOfThreads; I++)
pthread_join(T[I], 0);
- Map.reset();
+ Map.unmapTestOnly();
}
diff --git a/lib/scudo/standalone/tests/primary_test.cc b/lib/scudo/standalone/tests/primary_test.cc
index 0b074f3d8..b4abbc208 100644
--- a/lib/scudo/standalone/tests/primary_test.cc
+++ b/lib/scudo/standalone/tests/primary_test.cc
@@ -22,7 +22,11 @@
template <typename Primary> static void testPrimary() {
const scudo::uptr NumberOfAllocations = 32U;
- std::unique_ptr<Primary> Allocator(new Primary);
+ auto Deleter = [](Primary *P) {
+ P->unmapTestOnly();
+ delete P;
+ };
+ std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter);
Allocator->init(/*ReleaseToOsInterval=*/-1);
typename Primary::CacheT Cache;
Cache.init(nullptr, Allocator.get());
@@ -84,10 +88,15 @@ TEST(ScudoPrimaryTest, Primary64OOM) {
Allocator.releaseToOS();
Allocator.printStats();
EXPECT_EQ(AllocationFailed, true);
+ Allocator.unmapTestOnly();
}
template <typename Primary> static void testIteratePrimary() {
- std::unique_ptr<Primary> Allocator(new Primary);
+ auto Deleter = [](Primary *P) {
+ P->unmapTestOnly();
+ delete P;
+ };
+ std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter);
Allocator->init(/*ReleaseToOsInterval=*/-1);
typename Primary::CacheT Cache;
Cache.init(nullptr, Allocator.get());
@@ -125,10 +134,6 @@ TEST(ScudoPrimaryTest, PrimaryIterate) {
testIteratePrimary<scudo::SizeClassAllocator64<SizeClassMap, 24U>>();
}
-// TODO(kostyak): reenable on 32-bit after implementing unmapTestOnly for the
-// primary: we are running out of addressable space without.
-#if SCUDO_WORDSIZE == 64U
-
static std::mutex Mutex;
static std::condition_variable Cv;
static bool Ready = false;
@@ -143,7 +148,7 @@ template <typename Primary> static void performAllocations(Primary *Allocator) {
Cv.wait(Lock);
}
for (scudo::uptr I = 0; I < 256U; I++) {
- const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize;
+ const scudo::uptr Size = std::rand() % Primary::SizeClassMap::MaxSize / 4;
const scudo::uptr ClassId = Primary::SizeClassMap::getClassIdBySize(Size);
void *P = Cache.allocate(ClassId);
if (P)
@@ -158,7 +163,11 @@ template <typename Primary> static void performAllocations(Primary *Allocator) {
}
template <typename Primary> static void testPrimaryThreaded() {
- std::unique_ptr<Primary> Allocator(new Primary);
+ auto Deleter = [](Primary *P) {
+ P->unmapTestOnly();
+ delete P;
+ };
+ std::unique_ptr<Primary, decltype(Deleter)> Allocator(new Primary, Deleter);
Allocator->init(/*ReleaseToOsInterval=*/-1);
std::thread Threads[32];
for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
@@ -177,7 +186,5 @@ template <typename Primary> static void testPrimaryThreaded() {
TEST(ScudoPrimaryTest, PrimaryThreaded) {
using SizeClassMap = scudo::SvelteSizeClassMap;
testPrimaryThreaded<scudo::SizeClassAllocator32<SizeClassMap, 18U>>();
- testPrimaryThreaded<scudo::SizeClassAllocator64<SizeClassMap, 28U>>();
+ testPrimaryThreaded<scudo::SizeClassAllocator64<SizeClassMap, 24U>>();
}
-
-#endif // SCUDO_WORDSIZE == 64U
diff --git a/lib/scudo/standalone/tests/tsd_test.cc b/lib/scudo/standalone/tests/tsd_test.cc
index b1badd2d7..9ab101016 100644
--- a/lib/scudo/standalone/tests/tsd_test.cc
+++ b/lib/scudo/standalone/tests/tsd_test.cc
@@ -32,6 +32,7 @@ public:
}
void reset() { memset(this, 0, sizeof(*this)); }
+ void unmapTestOnly() { TSDRegistry.unmapTestOnly(); }
void initCache(CacheT *Cache) { memset(Cache, 0, sizeof(*Cache)); }
void commitBack(scudo::TSD<MockAllocator> *TSD) {}
TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
@@ -60,7 +61,12 @@ struct ExclusiveCaches {
TEST(ScudoTSDTest, TSDRegistryInit) {
using AllocatorT = MockAllocator<OneCache>;
- std::unique_ptr<AllocatorT> Allocator(new AllocatorT);
+ auto Deleter = [](AllocatorT *A) {
+ A->unmapTestOnly();
+ delete A;
+ };
+ std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+ Deleter);
Allocator->reset();
EXPECT_FALSE(Allocator->isInitialized());
@@ -70,7 +76,12 @@ TEST(ScudoTSDTest, TSDRegistryInit) {
}
template <class AllocatorT> static void testRegistry() {
- std::unique_ptr<AllocatorT> Allocator(new AllocatorT);
+ auto Deleter = [](AllocatorT *A) {
+ A->unmapTestOnly();
+ delete A;
+ };
+ std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+ Deleter);
Allocator->reset();
EXPECT_FALSE(Allocator->isInitialized());
@@ -131,7 +142,12 @@ template <typename AllocatorT> static void stressCache(AllocatorT *Allocator) {
}
template <class AllocatorT> static void testRegistryThreaded() {
- std::unique_ptr<AllocatorT> Allocator(new AllocatorT);
+ auto Deleter = [](AllocatorT *A) {
+ A->unmapTestOnly();
+ delete A;
+ };
+ std::unique_ptr<AllocatorT, decltype(Deleter)> Allocator(new AllocatorT,
+ Deleter);
Allocator->reset();
std::thread Threads[32];
for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
diff --git a/lib/scudo/standalone/tsd_exclusive.h b/lib/scudo/standalone/tsd_exclusive.h
index 0634e0b3e..ce9affcb4 100644
--- a/lib/scudo/standalone/tsd_exclusive.h
+++ b/lib/scudo/standalone/tsd_exclusive.h
@@ -37,6 +37,10 @@ template <class Allocator> struct TSDRegistryExT {
initLinkerInitialized(Instance);
}
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
+ }
+
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
if (LIKELY(State != ThreadState::NotInitialized))
return;
diff --git a/lib/scudo/standalone/tsd_shared.h b/lib/scudo/standalone/tsd_shared.h
index 61305660f..126d74355 100644
--- a/lib/scudo/standalone/tsd_shared.h
+++ b/lib/scudo/standalone/tsd_shared.h
@@ -47,6 +47,11 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
initLinkerInitialized(Instance);
}
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(TSDs),
+ sizeof(TSD<Allocator>) * NumberOfTSDs);
+ }
+
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
UNUSED bool MinimalInit) {
if (LIKELY(getCurrentTSD()))