summaryrefslogtreecommitdiff
path: root/lib/xray
diff options
context:
space:
mode:
authorDean Michael Berris <dberris@google.com>2018-11-20 03:56:04 +0000
committerDean Michael Berris <dberris@google.com>2018-11-20 03:56:04 +0000
commit57c94323fbb421fdef4592ad241302ad391da8f0 (patch)
treee07c3e255faf4b8ee56bc39410defce5ffa4dabe /lib/xray
parent7ef733dc1cee2410d80ddb6a9b7ed7245b2ed4ab (diff)
downloadcompiler-rt-57c94323fbb421fdef4592ad241302ad391da8f0.tar.gz
[XRay] Add a test for allocator exhaustion
Use a more representative test of allocating small chunks for oddly-sized (small) objects from an allocator that has a page's worth of memory. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@347286 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/xray')
-rw-r--r--lib/xray/tests/unit/allocator_test.cc20
-rw-r--r--lib/xray/xray_allocator.h15
2 files changed, 27 insertions, 8 deletions
diff --git a/lib/xray/tests/unit/allocator_test.cc b/lib/xray/tests/unit/allocator_test.cc
index be404160e..0177798b0 100644
--- a/lib/xray/tests/unit/allocator_test.cc
+++ b/lib/xray/tests/unit/allocator_test.cc
@@ -33,10 +33,28 @@ TEST(AllocatorTest, Allocate) {
TEST(AllocatorTest, OverAllocate) {
Allocator<sizeof(TestData)> A(sizeof(TestData));
auto B1 = A.Allocate();
- (void)B1;
+ ASSERT_NE(B1.Data, nullptr);
auto B2 = A.Allocate();
ASSERT_EQ(B2.Data, nullptr);
}
+struct OddSizedData {
+ s64 A;
+ s32 B;
+};
+
+TEST(AllocatorTest, AllocateBoundaries) {
+ Allocator<sizeof(OddSizedData)> A(GetPageSizeCached());
+
+ // Keep allocating until we hit a nullptr block.
+ unsigned C = 0;
+ auto Expected =
+ GetPageSizeCached() / RoundUpTo(sizeof(OddSizedData), kCacheLineSize);
+ for (auto B = A.Allocate(); B.Data != nullptr; B = A.Allocate(), ++C)
+ ;
+
+ ASSERT_EQ(C, Expected);
+}
+
} // namespace
} // namespace __xray
diff --git a/lib/xray/xray_allocator.h b/lib/xray/xray_allocator.h
index f77bccbd9..af63d9d37 100644
--- a/lib/xray/xray_allocator.h
+++ b/lib/xray/xray_allocator.h
@@ -53,7 +53,8 @@ template <class T> void deallocate(T *B) XRAY_NEVER_INSTRUMENT {
internal_munmap(B, RoundedSize);
}
-template <class T = uint8_t> T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
+template <class T = unsigned char>
+T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached());
uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
@@ -111,8 +112,8 @@ template <size_t N> struct Allocator {
private:
const size_t MaxMemory{0};
- uint8_t *BackingStore = nullptr;
- uint8_t *AlignedNextBlock = nullptr;
+ unsigned char *BackingStore = nullptr;
+ unsigned char *AlignedNextBlock = nullptr;
size_t AllocatedBlocks = 0;
SpinMutex Mutex{};
@@ -141,7 +142,7 @@ private:
return nullptr;
}
- AlignedNextBlock = reinterpret_cast<uint8_t *>(AlignedNextBlockNum);
+ AlignedNextBlock = reinterpret_cast<unsigned char *>(AlignedNextBlockNum);
// Assert that AlignedNextBlock is cache-line aligned.
DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock) % kCacheLineSize,
@@ -154,15 +155,15 @@ private:
// Align the pointer we'd like to return to an appropriate alignment, then
// advance the pointer from where to start allocations.
void *Result = AlignedNextBlock;
- AlignedNextBlock = reinterpret_cast<uint8_t *>(
- reinterpret_cast<uint8_t *>(AlignedNextBlock) + N);
+ AlignedNextBlock = reinterpret_cast<unsigned char *>(
+ reinterpret_cast<unsigned char *>(AlignedNextBlock) + N);
++AllocatedBlocks;
return Result;
}
public:
explicit Allocator(size_t M) XRAY_NEVER_INSTRUMENT
- : MaxMemory(nearest_boundary(M, kCacheLineSize)) {}
+ : MaxMemory(RoundUpTo(M, kCacheLineSize)) {}
Block Allocate() XRAY_NEVER_INSTRUMENT { return {Alloc()}; }