From 66507f1f9d246d83f61c34750c05c31cc377dfcb Mon Sep 17 00:00:00 2001 From: Russell Gallop Date: Wed, 16 Sep 2020 09:56:12 +0100 Subject: Comment out all of atomic_helpers.h to see what needs implementing on Windows. --- compiler-rt/lib/scudo/standalone/atomic_helpers.h | 253 +++++++++++----------- 1 file changed, 126 insertions(+), 127 deletions(-) diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h index d88f5d7be642..c373950b063c 100644 --- a/compiler-rt/lib/scudo/standalone/atomic_helpers.h +++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h @@ -13,133 +13,132 @@ namespace scudo { -enum memory_order { - memory_order_relaxed = 0, - memory_order_consume = 1, - memory_order_acquire = 2, - memory_order_release = 3, - memory_order_acq_rel = 4, - memory_order_seq_cst = 5 -}; -static_assert(memory_order_relaxed == __ATOMIC_RELAXED, ""); -static_assert(memory_order_consume == __ATOMIC_CONSUME, ""); -static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, ""); -static_assert(memory_order_release == __ATOMIC_RELEASE, ""); -static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, ""); -static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, ""); - -struct atomic_u8 { - typedef u8 Type; - volatile Type ValDoNotUse; -}; - -struct atomic_u16 { - typedef u16 Type; - volatile Type ValDoNotUse; -}; - -struct atomic_s32 { - typedef s32 Type; - volatile Type ValDoNotUse; -}; - -struct atomic_u32 { - typedef u32 Type; - volatile Type ValDoNotUse; -}; - -struct atomic_u64 { - typedef u64 Type; - // On 32-bit platforms u64 is not necessarily aligned on 8 bytes. - alignas(8) volatile Type ValDoNotUse; -}; - -struct atomic_uptr { - typedef uptr Type; - volatile Type ValDoNotUse; -}; - -template -inline typename T::Type atomic_load(const volatile T *A, memory_order MO) { - DCHECK(!(reinterpret_cast(A) % sizeof(*A))); - typename T::Type V; - __atomic_load(&A->ValDoNotUse, &V, MO); - return V; -} - -template -inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) { - DCHECK(!(reinterpret_cast(A) % sizeof(*A))); - __atomic_store(&A->ValDoNotUse, &V, MO); -} - -inline void atomic_thread_fence(memory_order) { __sync_synchronize(); } - -template -inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V, - memory_order MO) { - DCHECK(!(reinterpret_cast(A) % sizeof(*A))); - return __atomic_fetch_add(&A->ValDoNotUse, V, MO); -} - -template -inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V, - memory_order MO) { - DCHECK(!(reinterpret_cast(A) % sizeof(*A))); - return __atomic_fetch_sub(&A->ValDoNotUse, V, MO); -} - -template -inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V, - memory_order MO) { - DCHECK(!(reinterpret_cast(A) % sizeof(*A))); - return __atomic_fetch_and(&A->ValDoNotUse, V, MO); -} - -template -inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V, - memory_order MO) { - DCHECK(!(reinterpret_cast(A) % sizeof(*A))); - return __atomic_fetch_or(&A->ValDoNotUse, V, MO); -} - -template -inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V, - memory_order MO) { - DCHECK(!(reinterpret_cast(A) % sizeof(*A))); - typename T::Type R; - __atomic_exchange(&A->ValDoNotUse, &V, &R, MO); - return R; -} - -template -inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp, - typename T::Type Xchg, - memory_order MO) { - return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO, - __ATOMIC_RELAXED); -} - -// Clutter-reducing helpers. - -template -inline typename T::Type atomic_load_relaxed(const volatile T *A) { - return atomic_load(A, memory_order_relaxed); -} - -template -inline void atomic_store_relaxed(volatile T *A, typename T::Type V) { - atomic_store(A, V, memory_order_relaxed); -} - -template -inline typename T::Type atomic_compare_exchange(volatile T *A, - typename T::Type Cmp, - typename T::Type Xchg) { - atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire); - return Cmp; -} - +//enum memory_order { +// memory_order_relaxed = 0, +// memory_order_consume = 1, +// memory_order_acquire = 2, +// memory_order_release = 3, +// memory_order_acq_rel = 4, +// memory_order_seq_cst = 5 +//}; +//static_assert(memory_order_relaxed == __ATOMIC_RELAXED, ""); +//static_assert(memory_order_consume == __ATOMIC_CONSUME, ""); +//static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, ""); +//static_assert(memory_order_release == __ATOMIC_RELEASE, ""); +//static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, ""); +//static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, ""); +// +//struct atomic_u8 { +// typedef u8 Type; +// volatile Type ValDoNotUse; +//}; +// +//struct atomic_u16 { +// typedef u16 Type; +// volatile Type ValDoNotUse; +//}; +// +//struct atomic_s32 { +// typedef s32 Type; +// volatile Type ValDoNotUse; +//}; +// +//struct atomic_u32 { +// typedef u32 Type; +// volatile Type ValDoNotUse; +//}; +// +//struct atomic_u64 { +// typedef u64 Type; +// // On 32-bit platforms u64 is not necessarily aligned on 8 bytes. +// alignas(8) volatile Type ValDoNotUse; +//}; +// +//struct atomic_uptr { +// typedef uptr Type; +// volatile Type ValDoNotUse; +//}; +// +//template +//inline typename T::Type atomic_load(const volatile T *A, memory_order MO) { +// DCHECK(!(reinterpret_cast(A) % sizeof(*A))); +// typename T::Type V; +// __atomic_load(&A->ValDoNotUse, &V, MO); +// return V; +//} +// +//template +//inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) { +// DCHECK(!(reinterpret_cast(A) % sizeof(*A))); +// __atomic_store(&A->ValDoNotUse, &V, MO); +//} +// +//inline void atomic_thread_fence(memory_order) { __sync_synchronize(); } +// +//template +//inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V, +// memory_order MO) { +// DCHECK(!(reinterpret_cast(A) % sizeof(*A))); +// return __atomic_fetch_add(&A->ValDoNotUse, V, MO); +//} +// +//template +//inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V, +// memory_order MO) { +// DCHECK(!(reinterpret_cast(A) % sizeof(*A))); +// return __atomic_fetch_sub(&A->ValDoNotUse, V, MO); +//} +// +//template +//inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V, +// memory_order MO) { +// DCHECK(!(reinterpret_cast(A) % sizeof(*A))); +// return __atomic_fetch_and(&A->ValDoNotUse, V, MO); +//} +// +//template +//inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V, +// memory_order MO) { +// DCHECK(!(reinterpret_cast(A) % sizeof(*A))); +// return __atomic_fetch_or(&A->ValDoNotUse, V, MO); +//} +// +//template +//inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V, +// memory_order MO) { +// DCHECK(!(reinterpret_cast(A) % sizeof(*A))); +// typename T::Type R; +// __atomic_exchange(&A->ValDoNotUse, &V, &R, MO); +// return R; +//} +// +//template +//inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp, +// typename T::Type Xchg, +// memory_order MO) { +// return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO, +// __ATOMIC_RELAXED); +//} +// +//// Clutter-reducing helpers. +// +//template +//inline typename T::Type atomic_load_relaxed(const volatile T *A) { +// return atomic_load(A, memory_order_relaxed); +//} +// +//template +//inline void atomic_store_relaxed(volatile T *A, typename T::Type V) { +// atomic_store(A, V, memory_order_relaxed); +//} +// +//template +//inline typename T::Type atomic_compare_exchange(volatile T *A, +// typename T::Type Cmp, +// typename T::Type Xchg) { +// atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire); +// return Cmp; +//} } // namespace scudo #endif // SCUDO_ATOMIC_H_ -- cgit v1.2.1