summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLuís Marques <luismarques@lowrisc.org>2021-03-30 15:07:49 +0100
committerLuís Marques <luismarques@lowrisc.org>2021-03-30 15:07:49 +0100
commitf00a996acf21d2d253c568a6bf3a5b123c301f56 (patch)
tree6cea8ff4a169f7354c1eb415bc08e2cfe09485a6
parentce03a862372a6f36d2fcf80dc80052aa155fcae8 (diff)
downloadllvm-test-D97234.tar.gz
[Sanitizer][RISCV][AArch64][Android] Adjust allocator teststest-D97234
On 64-bit systems with small VMAs (e.g. 39-bit) we can't use SizeClassAllocator64 parameterized with size class maps containing a large number of classes, as that will make the allocator region size too small (< 2^32). Several tests were already disabled for Android because of this. This patch provides the correct allocator configuration for RISC-V (riscv64), generalizes the gating condition for tests that can't be enabled for small VMA systems, and tweaks the tests that can be made compatible with those systems to enable them. I think the previous gating on Android should instead be AArch64+Android, so the patch reflects that.
-rw-r--r--compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp46
1 files changed, 31 insertions, 15 deletions
diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
index 38da7f0184c0..58f1ef404ba6 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
@@ -36,6 +36,17 @@ using namespace __sanitizer;
#define SKIP_ON_SOLARIS_SPARCV9(x) x
#endif
+// On 64-bit systems with small virtual address spaces (e.g. 39-bit) we can't
+// use size class maps with a large number of classes, as that will make the
+// SizeClassAllocator64 region size too small (< 2^32).
+#if SANITIZER_ANDROID && defined(__aarch64__)
+#define ALLOCATOR64_SMALL_SIZE 1
+#elif SANITIZER_RISCV64
+#define ALLOCATOR64_SMALL_SIZE 1
+#else
+#define ALLOCATOR64_SMALL_SIZE 0
+#endif
+
// Too slow for debug build
#if !SANITIZER_DEBUG
@@ -53,6 +64,11 @@ static const uptr kAllocatorSpace = 0x3000000000ULL;
static const uptr kAllocatorSize = 0x2000000000ULL;
static const u64 kAddressSpaceSize = 1ULL << 39;
typedef VeryCompactSizeClassMap SizeClassMap;
+#elif SANITIZER_RISCV64
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+static const u64 kAddressSpaceSize = 1ULL << 38;
+typedef VeryDenseSizeClassMap SizeClassMap;
#else
static const uptr kAllocatorSpace = 0x700000000000ULL;
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
@@ -295,7 +311,7 @@ TEST(SanitizerCommon, SizeClassAllocator64Dynamic) {
TestSizeClassAllocator<Allocator64Dynamic>();
}
-#if !SANITIZER_ANDROID
+#if !ALLOCATOR64_SMALL_SIZE
// Android only has 39-bit address space, so mapping 2 * kAllocatorSize
// sometimes fails.
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
@@ -303,7 +319,6 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicPremapped) {
TestSizeClassAllocator<Allocator64Dynamic>(h.Addr());
}
-//FIXME(kostyak): find values so that those work on Android as well.
TEST(SanitizerCommon, SizeClassAllocator64Compact) {
TestSizeClassAllocator<Allocator64Compact>();
}
@@ -387,7 +402,7 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
}
-#if !SANITIZER_ANDROID
+#if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedMetadataStress) {
ScopedPremappedHeap h;
SizeClassAllocatorMetadataStress<Allocator64Dynamic>(h.Addr());
@@ -440,7 +455,7 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
1ULL << (SANITIZER_ANDROID ? 31 : 33));
}
-#if !SANITIZER_ANDROID
+#if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedGetBlockBegin) {
ScopedPremappedHeap h;
SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
@@ -557,7 +572,7 @@ TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
// Don't test OOM conditions on Win64 because it causes other tests on the same
// machine to OOM.
-#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
Allocator64 a;
a.Init(kReleaseToOSIntervalNever);
@@ -571,7 +586,8 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
uint32_t chunks[kNumChunks];
bool allocation_failed = false;
for (int i = 0; i < 1000000; i++) {
- if (!a.GetFromAllocator(&stats, 52, chunks, kNumChunks)) {
+ uptr class_id = a.kNumClasses - 1;
+ if (!a.GetFromAllocator(&stats, class_id, chunks, kNumChunks)) {
allocation_failed = true;
break;
}
@@ -735,7 +751,7 @@ TEST(SanitizerCommon, CombinedAllocator64Dynamic) {
TestCombinedAllocator<Allocator64Dynamic>();
}
-#if !SANITIZER_ANDROID
+#if !ALLOCATOR64_SMALL_SIZE
#if !SANITIZER_WINDOWS
// Windows fails to map 1TB, so disable this test.
TEST(SanitizerCommon, CombinedAllocator64DynamicPremapped) {
@@ -804,7 +820,7 @@ TEST(SanitizerCommon, SizeClassAllocator64DynamicLocalCache) {
TestSizeClassAllocatorLocalCache<Allocator64Dynamic>();
}
-#if !SANITIZER_ANDROID
+#if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedLocalCache) {
ScopedPremappedHeap h;
TestSizeClassAllocatorLocalCache<Allocator64Dynamic>(h.Addr());
@@ -992,7 +1008,7 @@ TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
TEST(SanitizerCommon, SizeClassAllocator64DynamicIteration) {
TestSizeClassAllocatorIteration<Allocator64Dynamic>();
}
-#if !SANITIZER_ANDROID
+#if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon, SizeClassAllocator64DynamicPremappedIteration) {
ScopedPremappedHeap h;
TestSizeClassAllocatorIteration<Allocator64Dynamic>(h.Addr());
@@ -1072,8 +1088,8 @@ TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
// Don't test OOM conditions on Win64 because it causes other tests on the same
// machine to OOM.
-#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
-typedef __sanitizer::SizeClassMap<3, 4, 8, 38, 128, 16> SpecialSizeClassMap;
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
+typedef __sanitizer::SizeClassMap<2, 22, 22, 34, 128, 16> SpecialSizeClassMap;
template <typename AddressSpaceViewTy = LocalAddressSpaceView>
struct AP64_SpecialSizeClassMap {
static const uptr kSpaceBeg = kAllocatorSpace;
@@ -1100,7 +1116,7 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
// ...one man is on a mission to overflow a region with a series of
// successive allocations.
- const uptr kClassID = 107;
+ const uptr kClassID = ALLOCATOR64_SMALL_SIZE ? 18 : 24;
const uptr kAllocationSize = SpecialSizeClassMap::Size(kClassID);
ASSERT_LT(2 * kAllocationSize, kRegionSize);
ASSERT_GT(3 * kAllocationSize, kRegionSize);
@@ -1108,7 +1124,7 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
EXPECT_NE(cache.Allocate(a, kClassID), nullptr);
EXPECT_EQ(cache.Allocate(a, kClassID), nullptr);
- const uptr Class2 = 100;
+ const uptr Class2 = ALLOCATOR64_SMALL_SIZE ? 15 : 21;
const uptr Size2 = SpecialSizeClassMap::Size(Class2);
ASSERT_EQ(Size2 * 8, kRegionSize);
char *p[7];
@@ -1394,7 +1410,7 @@ TEST(SanitizerCommon, SizeClassAllocator64ReleaseFreeMemoryToOS) {
TestReleaseFreeMemoryToOS<Allocator64>();
}
-#if !SANITIZER_ANDROID
+#if !ALLOCATOR64_SMALL_SIZE
TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
TestReleaseFreeMemoryToOS<Allocator64Compact>();
}
@@ -1402,7 +1418,7 @@ TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
TestReleaseFreeMemoryToOS<Allocator64VeryCompact>();
}
-#endif // !SANITIZER_ANDROID
+#endif // !ALLOCATOR64_SMALL_SIZE
#endif // SANITIZER_CAN_USE_ALLOCATOR64