summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2017-07-12 12:34:12 +0000
committerDmitry Vyukov <dvyukov@google.com>2017-07-12 12:34:12 +0000
commitbc135c12cd648ba828b723b7913e05440e5641e9 (patch)
treeae943552e394301d717d73478a271e58849c1a6b
parent5de8108ffc5363bc87abe057ba65b4a3fb0c1801 (diff)
downloadcompiler-rt-bc135c12cd648ba828b723b7913e05440e5641e9.tar.gz
tsan: give debug names to dense allocators
Improves crash message on dense alloc overflow. Allows to understand what alloc overflowed. git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@307780 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/tsan/rtl/tsan_dense_alloc.h11
-rw-r--r--lib/tsan/rtl/tsan_rtl.cc3
-rw-r--r--lib/tsan/rtl/tsan_sync.cc4
3 files changed, 13 insertions, 5 deletions
diff --git a/lib/tsan/rtl/tsan_dense_alloc.h b/lib/tsan/rtl/tsan_dense_alloc.h
index e9815c90a..16dbdf391 100644
--- a/lib/tsan/rtl/tsan_dense_alloc.h
+++ b/lib/tsan/rtl/tsan_dense_alloc.h
@@ -39,7 +39,7 @@ class DenseSlabAlloc {
typedef DenseSlabAllocCache Cache;
typedef typename Cache::IndexT IndexT;
- DenseSlabAlloc() {
+ explicit DenseSlabAlloc(const char *name) {
// Check that kL1Size and kL2Size are sane.
CHECK_EQ(kL1Size & (kL1Size - 1), 0);
CHECK_EQ(kL2Size & (kL2Size - 1), 0);
@@ -49,6 +49,7 @@ class DenseSlabAlloc {
internal_memset(map_, 0, sizeof(map_));
freelist_ = 0;
fillpos_ = 0;
+ name_ = name;
}
~DenseSlabAlloc() {
@@ -96,15 +97,19 @@ class DenseSlabAlloc {
SpinMutex mtx_;
IndexT freelist_;
uptr fillpos_;
+ const char *name_;
void Refill(Cache *c) {
SpinMutexLock lock(&mtx_);
if (freelist_ == 0) {
if (fillpos_ == kL1Size) {
- Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
+ Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
+ name_, kL1Size, kL2Size);
Die();
}
- T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
+ VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
+ name_, fillpos_, kL1Size, kL2Size);
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
// Reserve 0 as invalid index.
IndexT start = fillpos_ == 0 ? 1 : 0;
for (IndexT i = start; i < kL2Size; i++) {
diff --git a/lib/tsan/rtl/tsan_rtl.cc b/lib/tsan/rtl/tsan_rtl.cc
index fa60f3247..a01525302 100644
--- a/lib/tsan/rtl/tsan_rtl.cc
+++ b/lib/tsan/rtl/tsan_rtl.cc
@@ -104,7 +104,8 @@ Context::Context()
, racy_stacks(MBlockRacyStacks)
, racy_addresses(MBlockRacyAddresses)
, fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
- , fired_suppressions(8) {
+ , fired_suppressions(8)
+ , clock_alloc("clock allocator") {
}
// The objects are allocated in TLS, so one may rely on zero-initialization.
diff --git a/lib/tsan/rtl/tsan_sync.cc b/lib/tsan/rtl/tsan_sync.cc
index 4cc3cb89c..44ae558fa 100644
--- a/lib/tsan/rtl/tsan_sync.cc
+++ b/lib/tsan/rtl/tsan_sync.cc
@@ -53,7 +53,9 @@ void SyncVar::Reset(Processor *proc) {
}
}
-MetaMap::MetaMap() {
+MetaMap::MetaMap()
+ : block_alloc_("heap block allocator")
+ , sync_alloc_("sync allocator") {
atomic_store(&uid_gen_, 0, memory_order_relaxed);
}