summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2019-07-11 15:32:26 +0000
committerKostya Kortchinsky <kostyak@google.com>2019-07-11 15:32:26 +0000
commit1a513e395ea23df10e58c807ce264fbe5df596f7 (patch)
tree24b49f4ee284bd3cb5b421b51066cc7b59449530
parent2dff76d9e1a6b7ab9f871624e3572b56815b3050 (diff)
downloadcompiler-rt-1a513e395ea23df10e58c807ce264fbe5df596f7.tar.gz
[scudo][standalone] Merge Spin & Blocking mutex into a Hybrid one
Summary: We ran into a problem on Fuchsia where yielding threads would never be deboosted, ultimately resulting in several threads spinning on the same TSD, and no possibility for another thread to be scheduled, dead-locking the process. While this was fixed in Zircon, this lead to discussions about if spinning without a break condition was a good decision, and settled on a new hybrid model that would spin for a while then block. Currently we are using a number of iterations for spinning that is mostly arbitrary (based on sanitizer_common values), but this can be tuned in the future. Since we are touching `common.h`, we also use this change as a vehicle for an Android optimization (the page size is fixed in Bionic, so use a fixed value too). Reviewers: morehouse, hctim, eugenis, dvyukov, vitalybuka Reviewed By: hctim Subscribers: srhines, delcypher, jfb, #sanitizers, llvm-commits Tags: #llvm, #sanitizers Differential Revision: https://reviews.llvm.org/D64358 git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@365790 91177308-0d34-0410-b5e6-96231b3b80d8
-rw-r--r--lib/scudo/standalone/atomic_helpers.h8
-rw-r--r--lib/scudo/standalone/bytemap.h6
-rw-r--r--lib/scudo/standalone/common.h5
-rw-r--r--lib/scudo/standalone/fuchsia.cc21
-rw-r--r--lib/scudo/standalone/linux.cc53
-rw-r--r--lib/scudo/standalone/mutex.h92
-rw-r--r--lib/scudo/standalone/primary32.h14
-rw-r--r--lib/scudo/standalone/primary64.h8
-rw-r--r--lib/scudo/standalone/quarantine.h10
-rw-r--r--lib/scudo/standalone/secondary.cc4
-rw-r--r--lib/scudo/standalone/secondary.h2
-rw-r--r--lib/scudo/standalone/stats.h8
-rw-r--r--lib/scudo/standalone/tests/map_test.cc6
-rw-r--r--lib/scudo/standalone/tests/mutex_test.cc55
-rw-r--r--lib/scudo/standalone/tsd.h2
-rw-r--r--lib/scudo/standalone/tsd_exclusive.h4
-rw-r--r--lib/scudo/standalone/tsd_shared.h4
17 files changed, 144 insertions, 158 deletions
diff --git a/lib/scudo/standalone/atomic_helpers.h b/lib/scudo/standalone/atomic_helpers.h
index 35d7369c1..47037d764 100644
--- a/lib/scudo/standalone/atomic_helpers.h
+++ b/lib/scudo/standalone/atomic_helpers.h
@@ -126,6 +126,14 @@ INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
atomic_store(A, V, memory_order_relaxed);
}
+template <typename T>
+INLINE typename T::Type atomic_compare_exchange(volatile T *A,
+ typename T::Type Cmp,
+ typename T::Type Xchg) {
+ atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+ return Cmp;
+}
+
} // namespace scudo
#endif // SCUDO_ATOMIC_H_
diff --git a/lib/scudo/standalone/bytemap.h b/lib/scudo/standalone/bytemap.h
index ab0091936..caeeb2fac 100644
--- a/lib/scudo/standalone/bytemap.h
+++ b/lib/scudo/standalone/bytemap.h
@@ -45,8 +45,8 @@ public:
map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap"));
}
void init() {
- initLinkerInitialized();
Mutex.init();
+ initLinkerInitialized();
}
void reset() {
@@ -92,7 +92,7 @@ private:
u8 *getOrCreate(uptr Index) {
u8 *Res = get(Index);
if (!Res) {
- SpinMutexLock L(&Mutex);
+ ScopedLock L(Mutex);
if (!(Res = get(Index))) {
Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap"));
atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res),
@@ -103,7 +103,7 @@ private:
}
atomic_uptr *Level1Map;
- StaticSpinMutex Mutex;
+ HybridMutex Mutex;
};
} // namespace scudo
diff --git a/lib/scudo/standalone/common.h b/lib/scudo/standalone/common.h
index 313f89c9c..c015d1ca5 100644
--- a/lib/scudo/standalone/common.h
+++ b/lib/scudo/standalone/common.h
@@ -115,11 +115,12 @@ INLINE void yieldProcessor(u8 Count) {
// Platform specific functions.
-void yieldPlatform();
-
extern uptr PageSizeCached;
uptr getPageSizeSlow();
INLINE uptr getPageSizeCached() {
+ // Bionic uses a hardcoded value.
+ if (SCUDO_ANDROID)
+ return 4096U;
if (LIKELY(PageSizeCached))
return PageSizeCached;
return getPageSizeSlow();
diff --git a/lib/scudo/standalone/fuchsia.cc b/lib/scudo/standalone/fuchsia.cc
index cf0323819..896d346e7 100644
--- a/lib/scudo/standalone/fuchsia.cc
+++ b/lib/scudo/standalone/fuchsia.cc
@@ -23,11 +23,6 @@
namespace scudo {
-void yieldPlatform() {
- const zx_status_t Status = _zx_nanosleep(0);
- CHECK_EQ(Status, ZX_OK);
-}
-
uptr getPageSize() { return PAGE_SIZE; }
void NORETURN die() { __builtin_trap(); }
@@ -155,18 +150,20 @@ const char *getEnv(const char *Name) { return getenv(Name); }
// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
// because the Fuchsia implementation of sync_mutex_t has clang thread safety
// annotations. Were we to apply proper capability annotations to the top level
-// BlockingMutex class itself, they would not be needed. As it stands, the
+// HybridMutex class itself, they would not be needed. As it stands, the
// thread analysis thinks that we are locking the mutex and accidentally leaving
// it locked on the way out.
-void BlockingMutex::lock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
// Size and alignment must be compatible between both types.
- COMPILER_CHECK(sizeof(sync_mutex_t) <= sizeof(OpaqueStorage));
- COMPILER_CHECK(!(alignof(decltype(OpaqueStorage)) % alignof(sync_mutex_t)));
- sync_mutex_lock(reinterpret_cast<sync_mutex_t *>(OpaqueStorage));
+ return sync_mutex_trylock(&M) == ZX_OK;
+}
+
+void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_lock(&M);
}
-void BlockingMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
- sync_mutex_unlock(reinterpret_cast<sync_mutex_t *>(OpaqueStorage));
+void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_unlock(&M);
}
u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
diff --git a/lib/scudo/standalone/linux.cc b/lib/scudo/standalone/linux.cc
index bfda03239..049477bba 100644
--- a/lib/scudo/standalone/linux.cc
+++ b/lib/scudo/standalone/linux.cc
@@ -37,8 +37,6 @@
namespace scudo {
-void yieldPlatform() { sched_yield(); }
-
uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
void NORETURN die() { abort(); }
@@ -46,15 +44,18 @@ void NORETURN die() { abort(); }
void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
UNUSED MapPlatformData *Data) {
int MmapFlags = MAP_PRIVATE | MAP_ANON;
- if (Flags & MAP_NOACCESS)
+ int MmapProt;
+ if (Flags & MAP_NOACCESS) {
MmapFlags |= MAP_NORESERVE;
+ MmapProt = PROT_NONE;
+ } else {
+ MmapProt = PROT_READ | PROT_WRITE;
+ }
if (Addr) {
// Currently no scenario for a noaccess mapping with a fixed address.
DCHECK_EQ(Flags & MAP_NOACCESS, 0);
MmapFlags |= MAP_FIXED;
}
- const int MmapProt =
- (Flags & MAP_NOACCESS) ? PROT_NONE : PROT_READ | PROT_WRITE;
void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
if (P == MAP_FAILED) {
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
@@ -84,22 +85,34 @@ void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
// Calling getenv should be fine (c)(tm) at any time.
const char *getEnv(const char *Name) { return getenv(Name); }
-void BlockingMutex::lock() {
- atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
- if (atomic_exchange(M, MtxLocked, memory_order_acquire) == MtxUnlocked)
+namespace {
+enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
+}
+
+bool HybridMutex::tryLock() {
+ return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
+}
+
+// The following is based on https://akkadia.org/drepper/futex.pdf.
+void HybridMutex::lockSlow() {
+ u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
+ if (V == Unlocked)
return;
- while (atomic_exchange(M, MtxSleeping, memory_order_acquire) != MtxUnlocked)
- syscall(SYS_futex, reinterpret_cast<uptr>(OpaqueStorage),
- FUTEX_WAIT_PRIVATE, MtxSleeping, nullptr, nullptr, 0);
+ if (V != Sleeping)
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ while (V != Unlocked) {
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
+ nullptr, nullptr, 0);
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ }
}
-void BlockingMutex::unlock() {
- atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
- const u32 V = atomic_exchange(M, MtxUnlocked, memory_order_release);
- DCHECK_NE(V, MtxUnlocked);
- if (V == MtxSleeping)
- syscall(SYS_futex, reinterpret_cast<uptr>(OpaqueStorage),
- FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0);
+void HybridMutex::unlock() {
+ if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
+ atomic_store(&M, Unlocked, memory_order_release);
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
+ nullptr, nullptr, 0);
+ }
}
u64 getMonotonicTime() {
@@ -141,8 +154,8 @@ bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
}
void outputRaw(const char *Buffer) {
- static StaticSpinMutex Mutex;
- SpinMutexLock L(&Mutex);
+ static HybridMutex Mutex;
+ ScopedLock L(Mutex);
write(2, Buffer, strlen(Buffer));
}
diff --git a/lib/scudo/standalone/mutex.h b/lib/scudo/standalone/mutex.h
index 58bc15898..b6dc9188d 100644
--- a/lib/scudo/standalone/mutex.h
+++ b/lib/scudo/standalone/mutex.h
@@ -12,82 +12,62 @@
#include "atomic_helpers.h"
#include "common.h"
+#include <string.h>
+
+#if SCUDO_FUCHSIA
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#endif
+
namespace scudo {
-class StaticSpinMutex {
+class HybridMutex {
public:
- void init() { atomic_store_relaxed(&State, 0); }
-
- void lock() {
+ void init() { memset(this, 0, sizeof(*this)); }
+ bool tryLock();
+ NOINLINE void lock() {
if (tryLock())
return;
- lockSlow();
- }
-
- bool tryLock() {
- return atomic_exchange(&State, 1, memory_order_acquire) == 0;
- }
-
- void unlock() { atomic_store(&State, 0, memory_order_release); }
-
- void checkLocked() { CHECK_EQ(atomic_load_relaxed(&State), 1); }
-
-private:
- atomic_u8 State;
-
- void NOINLINE lockSlow() {
- for (u32 I = 0;; I++) {
- if (I < 10)
- yieldProcessor(10);
- else
- yieldPlatform();
- if (atomic_load_relaxed(&State) == 0 &&
- atomic_exchange(&State, 1, memory_order_acquire) == 0)
+ // The compiler may try to fully unroll the loop, ending up in a
+ // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
+ // is large, ugly and unneeded, a compact loop is better for our purpose
+ // here. Use a pragma to tell the compiler not to unroll the loop.
+#ifdef __clang__
+#pragma nounroll
+#endif
+ for (u8 I = 0U; I < NumberOfTries; I++) {
+ yieldProcessor(NumberOfYields);
+ if (tryLock())
return;
}
+ lockSlow();
}
-};
-
-class SpinMutex : public StaticSpinMutex {
-public:
- SpinMutex() { init(); }
+ void unlock();
private:
- SpinMutex(const SpinMutex &) = delete;
- void operator=(const SpinMutex &) = delete;
-};
+ static constexpr u8 NumberOfTries = 10U;
+ static constexpr u8 NumberOfYields = 10U;
-class BlockingMutex {
-public:
- explicit constexpr BlockingMutex(LinkerInitialized) : OpaqueStorage{} {}
- BlockingMutex() { memset(this, 0, sizeof(*this)); }
- void lock();
- void unlock();
- void checkLocked() {
- atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
- CHECK_NE(MtxUnlocked, atomic_load_relaxed(M));
- }
+#if SCUDO_LINUX
+ atomic_u32 M;
+#elif SCUDO_FUCHSIA
+ sync_mutex_t M;
+#endif
-private:
- enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
- uptr OpaqueStorage[1];
+ void lockSlow();
};
-template <typename MutexType> class GenericScopedLock {
+class ScopedLock {
public:
- explicit GenericScopedLock(MutexType *M) : Mutex(M) { Mutex->lock(); }
- ~GenericScopedLock() { Mutex->unlock(); }
+ explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
+ ~ScopedLock() { Mutex.unlock(); }
private:
- MutexType *Mutex;
+ HybridMutex &Mutex;
- GenericScopedLock(const GenericScopedLock &) = delete;
- void operator=(const GenericScopedLock &) = delete;
+ ScopedLock(const ScopedLock &) = delete;
+ void operator=(const ScopedLock &) = delete;
};
-typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
-typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
-
} // namespace scudo
#endif // SCUDO_MUTEX_H_
diff --git a/lib/scudo/standalone/primary32.h b/lib/scudo/standalone/primary32.h
index e89409d10..eade88a45 100644
--- a/lib/scudo/standalone/primary32.h
+++ b/lib/scudo/standalone/primary32.h
@@ -97,7 +97,7 @@ public:
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
- BlockingMutexLock L(&Sci->Mutex);
+ ScopedLock L(Sci->Mutex);
TransferBatch *B = Sci->FreeList.front();
if (B)
Sci->FreeList.pop_front();
@@ -115,7 +115,7 @@ public:
DCHECK_LT(ClassId, NumClasses);
DCHECK_GT(B->getCount(), 0);
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
- BlockingMutexLock L(&Sci->Mutex);
+ ScopedLock L(Sci->Mutex);
Sci->FreeList.push_front(B);
Sci->Stats.PushedBlocks += B->getCount();
if (Sci->CanRelease)
@@ -164,7 +164,7 @@ public:
void releaseToOS() {
for (uptr I = 1; I < NumClasses; I++) {
SizeClassInfo *Sci = getSizeClassInfo(I);
- BlockingMutexLock L(&Sci->Mutex);
+ ScopedLock L(Sci->Mutex);
releaseToOSMaybe(Sci, I, /*Force=*/true);
}
}
@@ -192,7 +192,7 @@ private:
};
struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
- BlockingMutex Mutex;
+ HybridMutex Mutex;
IntrusiveList<TransferBatch> FreeList;
SizeClassStats Stats;
bool CanRelease;
@@ -217,7 +217,7 @@ private:
const uptr MapEnd = MapBase + MapSize;
uptr Region = MapBase;
if (isAligned(Region, RegionSize)) {
- SpinMutexLock L(&RegionsStashMutex);
+ ScopedLock L(RegionsStashMutex);
if (NumberOfStashedRegions < MaxStashedRegions)
RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
else
@@ -237,7 +237,7 @@ private:
DCHECK_LT(ClassId, NumClasses);
uptr Region = 0;
{
- SpinMutexLock L(&RegionsStashMutex);
+ ScopedLock L(RegionsStashMutex);
if (NumberOfStashedRegions > 0)
Region = RegionsStash[--NumberOfStashedRegions];
}
@@ -389,7 +389,7 @@ private:
// Unless several threads request regions simultaneously from different size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
- StaticSpinMutex RegionsStashMutex;
+ HybridMutex RegionsStashMutex;
uptr NumberOfStashedRegions;
uptr RegionsStash[MaxStashedRegions];
};
diff --git a/lib/scudo/standalone/primary64.h b/lib/scudo/standalone/primary64.h
index 9c50e78d3..89a43cce3 100644
--- a/lib/scudo/standalone/primary64.h
+++ b/lib/scudo/standalone/primary64.h
@@ -100,7 +100,7 @@ public:
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
RegionInfo *Region = getRegionInfo(ClassId);
- BlockingMutexLock L(&Region->Mutex);
+ ScopedLock L(Region->Mutex);
TransferBatch *B = Region->FreeList.front();
if (B)
Region->FreeList.pop_front();
@@ -117,7 +117,7 @@ public:
void pushBatch(uptr ClassId, TransferBatch *B) {
DCHECK_GT(B->getCount(), 0);
RegionInfo *Region = getRegionInfo(ClassId);
- BlockingMutexLock L(&Region->Mutex);
+ ScopedLock L(Region->Mutex);
Region->FreeList.push_front(B);
Region->Stats.PushedBlocks += B->getCount();
if (Region->CanRelease)
@@ -168,7 +168,7 @@ public:
void releaseToOS() {
for (uptr I = 1; I < NumClasses; I++) {
RegionInfo *Region = getRegionInfo(I);
- BlockingMutexLock L(&Region->Mutex);
+ ScopedLock L(Region->Mutex);
releaseToOSMaybe(Region, I, /*Force=*/true);
}
}
@@ -194,7 +194,7 @@ private:
};
struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
- BlockingMutex Mutex;
+ HybridMutex Mutex;
IntrusiveList<TransferBatch> FreeList;
RegionStats Stats;
bool CanRelease;
diff --git a/lib/scudo/standalone/quarantine.h b/lib/scudo/standalone/quarantine.h
index ec985da76..bac36e01c 100644
--- a/lib/scudo/standalone/quarantine.h
+++ b/lib/scudo/standalone/quarantine.h
@@ -202,7 +202,7 @@ public:
void NOINLINE drain(CacheT *C, Callback Cb) {
{
- SpinMutexLock L(&CacheMutex);
+ ScopedLock L(CacheMutex);
Cache.transfer(C);
}
if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock())
@@ -211,7 +211,7 @@ public:
void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
{
- SpinMutexLock L(&CacheMutex);
+ ScopedLock L(CacheMutex);
Cache.transfer(C);
}
RecyleMutex.lock();
@@ -227,9 +227,9 @@ public:
private:
// Read-only data.
- alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex CacheMutex;
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
CacheT Cache;
- alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex RecyleMutex;
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;
atomic_uptr MinSize;
atomic_uptr MaxSize;
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
@@ -238,7 +238,7 @@ private:
CacheT Tmp;
Tmp.init();
{
- SpinMutexLock L(&CacheMutex);
+ ScopedLock L(CacheMutex);
// Go over the batches and merge partially filled ones to
// save some memory, otherwise batches themselves (since the memory used
// by them is counted against quarantine limit) can overcome the actual
diff --git a/lib/scudo/standalone/secondary.cc b/lib/scudo/standalone/secondary.cc
index c0de268be..75f9171f1 100644
--- a/lib/scudo/standalone/secondary.cc
+++ b/lib/scudo/standalone/secondary.cc
@@ -72,7 +72,7 @@ void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
H->BlockEnd = CommitBase + CommitSize;
H->Data = Data;
{
- SpinMutexLock L(&Mutex);
+ ScopedLock L(Mutex);
if (!Tail) {
Tail = H;
} else {
@@ -95,7 +95,7 @@ void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
void MapAllocator::deallocate(void *Ptr) {
LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
{
- SpinMutexLock L(&Mutex);
+ ScopedLock L(Mutex);
LargeBlock::Header *Prev = H->Prev;
LargeBlock::Header *Next = H->Next;
if (Prev) {
diff --git a/lib/scudo/standalone/secondary.h b/lib/scudo/standalone/secondary.h
index 016928cc6..9124e2a41 100644
--- a/lib/scudo/standalone/secondary.h
+++ b/lib/scudo/standalone/secondary.h
@@ -82,7 +82,7 @@ public:
}
private:
- StaticSpinMutex Mutex;
+ HybridMutex Mutex;
LargeBlock::Header *Tail;
uptr AllocatedBytes;
uptr FreedBytes;
diff --git a/lib/scudo/standalone/stats.h b/lib/scudo/standalone/stats.h
index 7fb9c9ed6..124367562 100644
--- a/lib/scudo/standalone/stats.h
+++ b/lib/scudo/standalone/stats.h
@@ -65,7 +65,7 @@ public:
}
void link(LocalStats *S) {
- SpinMutexLock L(&Mutex);
+ ScopedLock L(Mutex);
S->Next = Next;
S->Prev = this;
Next->Prev = S;
@@ -73,7 +73,7 @@ public:
}
void unlink(LocalStats *S) {
- SpinMutexLock L(&Mutex);
+ ScopedLock L(Mutex);
S->Prev->Next = S->Next;
S->Next->Prev = S->Prev;
for (uptr I = 0; I < StatCount; I++)
@@ -82,7 +82,7 @@ public:
void get(uptr *S) const {
memset(S, 0, StatCount * sizeof(uptr));
- SpinMutexLock L(&Mutex);
+ ScopedLock L(Mutex);
const LocalStats *Stats = this;
for (;;) {
for (uptr I = 0; I < StatCount; I++)
@@ -97,7 +97,7 @@ public:
}
private:
- mutable StaticSpinMutex Mutex;
+ mutable HybridMutex Mutex;
};
} // namespace scudo
diff --git a/lib/scudo/standalone/tests/map_test.cc b/lib/scudo/standalone/tests/map_test.cc
index 7c726e947..a645e2365 100644
--- a/lib/scudo/standalone/tests/map_test.cc
+++ b/lib/scudo/standalone/tests/map_test.cc
@@ -11,9 +11,15 @@
#include "gtest/gtest.h"
#include <string.h>
+#include <unistd.h>
static const char *MappingName = "scudo:test";
+TEST(ScudoMapTest, PageSize) {
+ EXPECT_EQ(scudo::getPageSizeCached(),
+ static_cast<scudo::uptr>(getpagesize()));
+}
+
TEST(ScudoMapTest, MapNoAccessUnmap) {
const scudo::uptr Size = 4 * scudo::getPageSizeCached();
scudo::MapPlatformData Data = {};
diff --git a/lib/scudo/standalone/tests/mutex_test.cc b/lib/scudo/standalone/tests/mutex_test.cc
index ce33db58b..930838c5e 100644
--- a/lib/scudo/standalone/tests/mutex_test.cc
+++ b/lib/scudo/standalone/tests/mutex_test.cc
@@ -12,15 +12,15 @@
#include <string.h>
-template <typename MutexType> class TestData {
+class TestData {
public:
- explicit TestData(MutexType *M) : Mutex(M) {
+ explicit TestData(scudo::HybridMutex &M) : Mutex(M) {
for (scudo::u32 I = 0; I < Size; I++)
Data[I] = 0;
}
void write() {
- Lock L(Mutex);
+ scudo::ScopedLock L(Mutex);
T V0 = Data[0];
for (scudo::u32 I = 0; I < Size; I++) {
EXPECT_EQ(Data[I], V0);
@@ -29,14 +29,14 @@ public:
}
void tryWrite() {
- if (!Mutex->tryLock())
+ if (!Mutex.tryLock())
return;
T V0 = Data[0];
for (scudo::u32 I = 0; I < Size; I++) {
EXPECT_EQ(Data[I], V0);
Data[I]++;
}
- Mutex->unlock();
+ Mutex.unlock();
}
void backoff() {
@@ -48,10 +48,9 @@ public:
}
private:
- typedef scudo::GenericScopedLock<MutexType> Lock;
static const scudo::u32 Size = 64U;
typedef scudo::u64 T;
- MutexType *Mutex;
+ scudo::HybridMutex &Mutex;
ALIGNED(SCUDO_CACHE_LINE_SIZE) T Data[Size];
};
@@ -62,8 +61,8 @@ const scudo::u32 NumberOfIterations = 4 * 1024;
const scudo::u32 NumberOfIterations = 16 * 1024;
#endif
-template <typename MutexType> static void *lockThread(void *Param) {
- TestData<MutexType> *Data = reinterpret_cast<TestData<MutexType> *>(Param);
+static void *lockThread(void *Param) {
+ TestData *Data = reinterpret_cast<TestData *>(Param);
for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
Data->write();
Data->backoff();
@@ -71,8 +70,8 @@ template <typename MutexType> static void *lockThread(void *Param) {
return 0;
}
-template <typename MutexType> static void *tryThread(void *Param) {
- TestData<MutexType> *Data = reinterpret_cast<TestData<MutexType> *>(Param);
+static void *tryThread(void *Param) {
+ TestData *Data = reinterpret_cast<TestData *>(Param);
for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
Data->tryWrite();
Data->backoff();
@@ -80,42 +79,24 @@ template <typename MutexType> static void *tryThread(void *Param) {
return 0;
}
-template <typename MutexType> static void checkLocked(MutexType *M) {
- scudo::GenericScopedLock<MutexType> L(M);
- M->checkLocked();
-}
-
-TEST(ScudoMutexTest, SpinMutex) {
- scudo::SpinMutex M;
+TEST(ScudoMutexTest, Mutex) {
+ scudo::HybridMutex M;
M.init();
- TestData<scudo::SpinMutex> Data(&M);
+ TestData Data(M);
pthread_t Threads[NumberOfThreads];
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
- pthread_create(&Threads[I], 0, lockThread<scudo::SpinMutex>, &Data);
+ pthread_create(&Threads[I], 0, lockThread, &Data);
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
pthread_join(Threads[I], 0);
}
-TEST(ScudoMutexTest, SpinMutexTry) {
- scudo::SpinMutex M;
+TEST(ScudoMutexTest, MutexTry) {
+ scudo::HybridMutex M;
M.init();
- TestData<scudo::SpinMutex> Data(&M);
- pthread_t Threads[NumberOfThreads];
- for (scudo::u32 I = 0; I < NumberOfThreads; I++)
- pthread_create(&Threads[I], 0, tryThread<scudo::SpinMutex>, &Data);
- for (scudo::u32 I = 0; I < NumberOfThreads; I++)
- pthread_join(Threads[I], 0);
-}
-
-TEST(ScudoMutexTest, BlockingMutex) {
- scudo::u64 MutexMemory[1024] = {};
- scudo::BlockingMutex *M =
- new (MutexMemory) scudo::BlockingMutex(scudo::LINKER_INITIALIZED);
- TestData<scudo::BlockingMutex> Data(M);
+ TestData Data(M);
pthread_t Threads[NumberOfThreads];
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
- pthread_create(&Threads[I], 0, lockThread<scudo::BlockingMutex>, &Data);
+ pthread_create(&Threads[I], 0, tryThread, &Data);
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
pthread_join(Threads[I], 0);
- checkLocked(M);
}
diff --git a/lib/scudo/standalone/tsd.h b/lib/scudo/standalone/tsd.h
index 10cb83f94..f24ff0196 100644
--- a/lib/scudo/standalone/tsd.h
+++ b/lib/scudo/standalone/tsd.h
@@ -57,7 +57,7 @@ template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
private:
- StaticSpinMutex Mutex;
+ HybridMutex Mutex;
atomic_uptr Precedence;
};
diff --git a/lib/scudo/standalone/tsd_exclusive.h b/lib/scudo/standalone/tsd_exclusive.h
index ce9affcb4..18cce1c56 100644
--- a/lib/scudo/standalone/tsd_exclusive.h
+++ b/lib/scudo/standalone/tsd_exclusive.h
@@ -60,7 +60,7 @@ template <class Allocator> struct TSDRegistryExT {
private:
void initOnceMaybe(Allocator *Instance) {
- SpinMutexLock L(&Mutex);
+ ScopedLock L(Mutex);
if (Initialized)
return;
initLinkerInitialized(Instance); // Sets Initialized.
@@ -82,7 +82,7 @@ private:
pthread_key_t PThreadKey;
bool Initialized;
TSD<Allocator> *FallbackTSD;
- StaticSpinMutex Mutex;
+ HybridMutex Mutex;
static THREADLOCAL ThreadState State;
static THREADLOCAL TSD<Allocator> ThreadTSD;
diff --git a/lib/scudo/standalone/tsd_shared.h b/lib/scudo/standalone/tsd_shared.h
index 48747f69f..0f0a83a3e 100644
--- a/lib/scudo/standalone/tsd_shared.h
+++ b/lib/scudo/standalone/tsd_shared.h
@@ -94,7 +94,7 @@ private:
}
void initOnceMaybe(Allocator *Instance) {
- SpinMutexLock L(&Mutex);
+ ScopedLock L(Mutex);
if (Initialized)
return;
initLinkerInitialized(Instance); // Sets Initialized.
@@ -152,7 +152,7 @@ private:
u32 NumberOfCoPrimes;
u32 CoPrimes[MaxTSDCount];
bool Initialized;
- StaticSpinMutex Mutex;
+ HybridMutex Mutex;
#if SCUDO_LINUX && !SCUDO_ANDROID
static THREADLOCAL TSD<Allocator> *ThreadTSD;
#endif