summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell Gallop <russell.gallop@sony.com>2020-09-16 09:56:12 +0100
committerRussell Gallop <russell.gallop@sony.com>2021-01-28 11:14:29 +0000
commit66507f1f9d246d83f61c34750c05c31cc377dfcb (patch)
tree2e21d9923e7580320e13a799e7ce929da916692b
parent18b989790c8359f80aa63944478cf8f72e2bc696 (diff)
downloadllvm-66507f1f9d246d83f61c34750c05c31cc377dfcb.tar.gz
Comment out all of atomic_helpers.h to see what needs implementing on Windows.
-rw-r--r--compiler-rt/lib/scudo/standalone/atomic_helpers.h253
1 files changed, 126 insertions, 127 deletions
diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
index d88f5d7be642..c373950b063c 100644
--- a/compiler-rt/lib/scudo/standalone/atomic_helpers.h
+++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -13,133 +13,132 @@
namespace scudo {
-enum memory_order {
- memory_order_relaxed = 0,
- memory_order_consume = 1,
- memory_order_acquire = 2,
- memory_order_release = 3,
- memory_order_acq_rel = 4,
- memory_order_seq_cst = 5
-};
-static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
-static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
-static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
-static_assert(memory_order_release == __ATOMIC_RELEASE, "");
-static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
-static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
-
-struct atomic_u8 {
- typedef u8 Type;
- volatile Type ValDoNotUse;
-};
-
-struct atomic_u16 {
- typedef u16 Type;
- volatile Type ValDoNotUse;
-};
-
-struct atomic_s32 {
- typedef s32 Type;
- volatile Type ValDoNotUse;
-};
-
-struct atomic_u32 {
- typedef u32 Type;
- volatile Type ValDoNotUse;
-};
-
-struct atomic_u64 {
- typedef u64 Type;
- // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
- alignas(8) volatile Type ValDoNotUse;
-};
-
-struct atomic_uptr {
- typedef uptr Type;
- volatile Type ValDoNotUse;
-};
-
-template <typename T>
-inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
- DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
- typename T::Type V;
- __atomic_load(&A->ValDoNotUse, &V, MO);
- return V;
-}
-
-template <typename T>
-inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
- DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
- __atomic_store(&A->ValDoNotUse, &V, MO);
-}
-
-inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
-
-template <typename T>
-inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
- memory_order MO) {
- DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
- return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
-}
-
-template <typename T>
-inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
- memory_order MO) {
- DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
- return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
-}
-
-template <typename T>
-inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
- memory_order MO) {
- DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
- return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
-}
-
-template <typename T>
-inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
- memory_order MO) {
- DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
- return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
-}
-
-template <typename T>
-inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
- memory_order MO) {
- DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
- typename T::Type R;
- __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
- return R;
-}
-
-template <typename T>
-inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
- typename T::Type Xchg,
- memory_order MO) {
- return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
- __ATOMIC_RELAXED);
-}
-
-// Clutter-reducing helpers.
-
-template <typename T>
-inline typename T::Type atomic_load_relaxed(const volatile T *A) {
- return atomic_load(A, memory_order_relaxed);
-}
-
-template <typename T>
-inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
- atomic_store(A, V, memory_order_relaxed);
-}
-
-template <typename T>
-inline typename T::Type atomic_compare_exchange(volatile T *A,
- typename T::Type Cmp,
- typename T::Type Xchg) {
- atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
- return Cmp;
-}
-
+//enum memory_order {
+// memory_order_relaxed = 0,
+// memory_order_consume = 1,
+// memory_order_acquire = 2,
+// memory_order_release = 3,
+// memory_order_acq_rel = 4,
+// memory_order_seq_cst = 5
+//};
+//static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
+//static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
+//static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
+//static_assert(memory_order_release == __ATOMIC_RELEASE, "");
+//static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
+//static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
+//
+//struct atomic_u8 {
+// typedef u8 Type;
+// volatile Type ValDoNotUse;
+//};
+//
+//struct atomic_u16 {
+// typedef u16 Type;
+// volatile Type ValDoNotUse;
+//};
+//
+//struct atomic_s32 {
+// typedef s32 Type;
+// volatile Type ValDoNotUse;
+//};
+//
+//struct atomic_u32 {
+// typedef u32 Type;
+// volatile Type ValDoNotUse;
+//};
+//
+//struct atomic_u64 {
+// typedef u64 Type;
+// // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
+// alignas(8) volatile Type ValDoNotUse;
+//};
+//
+//struct atomic_uptr {
+// typedef uptr Type;
+// volatile Type ValDoNotUse;
+//};
+//
+//template <typename T>
+//inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
+// DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+// typename T::Type V;
+// __atomic_load(&A->ValDoNotUse, &V, MO);
+// return V;
+//}
+//
+//template <typename T>
+//inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
+// DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+// __atomic_store(&A->ValDoNotUse, &V, MO);
+//}
+//
+//inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
+//
+//template <typename T>
+//inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
+// memory_order MO) {
+// DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+// return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
+//}
+//
+//template <typename T>
+//inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
+// memory_order MO) {
+// DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+// return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
+//}
+//
+//template <typename T>
+//inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
+// memory_order MO) {
+// DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+// return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
+//}
+//
+//template <typename T>
+//inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
+// memory_order MO) {
+// DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+// return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
+//}
+//
+//template <typename T>
+//inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
+// memory_order MO) {
+// DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+// typename T::Type R;
+// __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
+// return R;
+//}
+//
+//template <typename T>
+//inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
+// typename T::Type Xchg,
+// memory_order MO) {
+// return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
+// __ATOMIC_RELAXED);
+//}
+//
+//// Clutter-reducing helpers.
+//
+//template <typename T>
+//inline typename T::Type atomic_load_relaxed(const volatile T *A) {
+// return atomic_load(A, memory_order_relaxed);
+//}
+//
+//template <typename T>
+//inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
+// atomic_store(A, V, memory_order_relaxed);
+//}
+//
+//template <typename T>
+//inline typename T::Type atomic_compare_exchange(volatile T *A,
+// typename T::Type Cmp,
+// typename T::Type Xchg) {
+// atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+// return Cmp;
+//}
} // namespace scudo
#endif // SCUDO_ATOMIC_H_