summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_atomic_clang.h
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2013-09-19 06:02:56 +0000
committerDmitry Vyukov <dvyukov@google.com>2013-09-19 06:02:56 +0000
commit6fa061978b138b39d29d98a6d28c67684a23eef0 (patch)
tree6fde0e3a06ea20da8ead0122afdfb13c16191b57 /lib/sanitizer_common/sanitizer_atomic_clang.h
parent9d4956eca0894f74be09ba7e93cf722e8b400d00 (diff)
downloadcompiler-rt-6fa061978b138b39d29d98a6d28c67684a23eef0.tar.gz
asan/tsan/msan: use builtin atomic operations when available
this should fix episodic crashes on ARM/PPC x86_32 is still broken git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@190991 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/sanitizer_common/sanitizer_atomic_clang.h')
-rw-r--r--lib/sanitizer_common/sanitizer_atomic_clang.h33
1 files changed, 31 insertions, 2 deletions
diff --git a/lib/sanitizer_common/sanitizer_atomic_clang.h b/lib/sanitizer_common/sanitizer_atomic_clang.h
index 30158b496..38286b239 100644
--- a/lib/sanitizer_common/sanitizer_atomic_clang.h
+++ b/lib/sanitizer_common/sanitizer_atomic_clang.h
@@ -15,6 +15,18 @@
#ifndef SANITIZER_ATOMIC_CLANG_H
#define SANITIZER_ATOMIC_CLANG_H
+#ifndef __has_builtin
+# define __has_builtin(x) 0
+#endif
+
+#define ATOMIC_ORDER(mo) \
+ ((mo) == memory_order_relaxed ? __ATOMIC_RELAXED : \
+ (mo) == memory_order_consume ? __ATOMIC_CONSUME : \
+ (mo) == memory_order_acquire ? __ATOMIC_ACQUIRE : \
+ (mo) == memory_order_release ? __ATOMIC_RELEASE : \
+ (mo) == memory_order_acq_rel ? __ATOMIC_ACQ_REL : \
+ __ATOMIC_SEQ_CST)
+
namespace __sanitizer {
INLINE void atomic_signal_fence(memory_order) {
@@ -41,7 +53,16 @@ INLINE typename T::Type atomic_load(
| memory_order_acquire | memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
typename T::Type v;
- // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
+// Use builtin atomic operations if available.
+// But not on x86_64 because they lead to vastly inefficient code generation
+// (http://llvm.org/bugs/show_bug.cgi?id=17281).
+// And not on x86_32 because they are not implemented
+// (http://llvm.org/bugs/show_bug.cgi?id=15034)
+// Have to use them on ARM/PPC/etc, because our implementation lacks necessary
+// memory fences.
+#if __has_builtin(__atomic_load_n) && !defined(__x86_64__) && !defined(__i386__)
+ v = __atomic_load_n(&a->val_dont_use, ATOMIC_ORDER(mo));
+#else
if (mo == memory_order_relaxed) {
v = a->val_dont_use;
} else {
@@ -49,6 +70,7 @@ INLINE typename T::Type atomic_load(
v = a->val_dont_use;
atomic_signal_fence(memory_order_seq_cst);
}
+#endif
return v;
}
@@ -57,7 +79,11 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
DCHECK(mo & (memory_order_relaxed | memory_order_release
| memory_order_seq_cst));
DCHECK(!((uptr)a % sizeof(*a)));
- // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
+// See the comment in atomic_load.
+#if __has_builtin(__atomic_store_n) && !defined(__x86_64__) \
+ && !defined(__i386__)
+ __atomic_store_n(&a->val_dont_use, v, ATOMIC_ORDER(mo));
+#else
if (mo == memory_order_relaxed) {
a->val_dont_use = v;
} else {
@@ -67,6 +93,7 @@ INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
}
if (mo == memory_order_seq_cst)
atomic_thread_fence(memory_order_seq_cst);
+#endif
}
template<typename T>
@@ -121,4 +148,6 @@ INLINE bool atomic_compare_exchange_weak(volatile T *a,
} // namespace __sanitizer
+#undef ATOMIC_ORDER
+
#endif // SANITIZER_ATOMIC_CLANG_H