From fd6cba40e198d145bc53502e258db560936f2226 Mon Sep 17 00:00:00 2001
From: Jakub Jelinek <jakub@redhat.com>
Date: Wed, 22 Mar 2017 19:46:54 +0100
Subject: re PR sanitizer/78158 (Strange data race detection with thread
 sanitizer)

	PR sanitizer/78158
	* tsan/tsan_interface_atomic.cc: Cherry-pick upstream r298378.

From-SVN: r246402
---
 libsanitizer/tsan/tsan_interface_atomic.cc | 19 ++++++++++++++++++-
 1 file changed, 18 insertions(+), 1 deletion(-)

(limited to 'libsanitizer/tsan')

diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 5c5c34f3b87..deb4206a624 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -448,10 +448,27 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
 
 // C/C++
 
+static morder covert_morder(morder mo) {
+  if (flags()->force_seq_cst_atomics)
+    return (morder)mo_seq_cst;
+
+  // Filter out additional memory order flags:
+  // MEMMODEL_SYNC        = 1 << 15
+  // __ATOMIC_HLE_ACQUIRE = 1 << 16
+  // __ATOMIC_HLE_RELEASE = 1 << 17
+  //
+  // HLE is an optimization, and we pretend that elision always fails.
+  // MEMMODEL_SYNC is used when lowering __sync_ atomics,
+  // since we use __sync_ atomics for actual atomic operations,
+  // we can safely ignore it as well. It also subtly affects semantics,
+  // but we don't model the difference.
+  return (morder)(mo & 0x7fff);
+}
+
 #define SCOPED_ATOMIC(func, ...) \
     const uptr callpc = (uptr)__builtin_return_address(0); \
     uptr pc = StackTrace::GetCurrentPc(); \
-    mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
+    mo = covert_morder(mo); \
     ThreadState *const thr = cur_thread(); \
     if (thr->ignore_interceptors) \
       return NoTsanAtomic##func(__VA_ARGS__); \
-- 
cgit v1.2.1