summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_allocator.h
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2013-11-25 11:33:41 +0000
committerKostya Serebryany <kcc@google.com>2013-11-25 11:33:41 +0000
commitdd83744f358a1374c826b27be0b367f6ebbab4f6 (patch)
tree72753e52f7bbe8dbe58e0dcca52b9b23ff777699 /lib/sanitizer_common/sanitizer_allocator.h
parent89836d20af4d09899a0d9fe49fa858fcbeb56318 (diff)
downloadcompiler-rt-dd83744f358a1374c826b27be0b367f6ebbab4f6.tar.gz
[sanitizer] Implement TwoLevelByteMap and use it for the internal allocator on 64-bit.
Summary: Implement TwoLevelByteMap and use it for the internal allocator on 64-bit. This reduces bss on 64-bit by ~8Mb because we don't use FlatByteMap on 64-bits any more. Dmitry, please check my understanding of atomics. Reviewers: dvyukov Reviewed By: dvyukov CC: samsonov, llvm-commits Differential Revision: http://llvm-reviews.chandlerc.com/D2259 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@195637 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_allocator.h')
-rw-r--r--lib/sanitizer_common/sanitizer_allocator.h64
1 files changed, 63 insertions, 1 deletions
diff --git a/lib/sanitizer_common/sanitizer_allocator.h b/lib/sanitizer_common/sanitizer_allocator.h
index 5ea1edbc7..a8debd971 100644
--- a/lib/sanitizer_common/sanitizer_allocator.h
+++ b/lib/sanitizer_common/sanitizer_allocator.h
@@ -587,7 +587,69 @@ class FlatByteMap {
u8 map_[kSize];
};
-// FIXME: Also implement TwoLevelByteMap.
+// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
+class TwoLevelByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map1_, 0, sizeof(map1_));
+ mu_.Init();
+ }
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ u8 *p = Get(i);
+ if (!p) continue;
+ MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
+ UnmapOrDie(p, kSize2);
+ }
+ }
+
+ uptr size() const { return kSize1 * kSize2; }
+ uptr size1() const { return kSize1; }
+ uptr size2() const { return kSize2; }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = GetOrCreate(idx / kSize2);
+ CHECK_EQ(0U, map2[idx % kSize2]);
+ map2[idx % kSize2] = val;
+ }
+
+ u8 operator[] (uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = Get(idx / kSize2);
+ if (!map2) return 0;
+ return map2[idx % kSize2];
+ }
+
+ private:
+ u8 *Get(uptr idx) const {
+ CHECK_LT(idx, kSize1);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ u8 *GetOrCreate(uptr idx) {
+ u8 *res = Get(idx);
+ if (!res) {
+ SpinMutexLock l(&mu_);
+ if (!(res = Get(idx))) {
+ res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
+ MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ }
+ return res;
+ }
+
+ atomic_uintptr_t map1_[kSize1];
+ StaticSpinMutex mu_;
+};
// SizeClassAllocator32 -- allocator for 32-bit address space.
// This allocator can theoretically be used on 64-bit arch, but there it is less