summaryrefslogtreecommitdiff
path: root/libsanitizer/tsan
diff options
context:
space:
mode:
authorkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2012-11-27 14:01:46 +0000
committerkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2012-11-27 14:01:46 +0000
commit2862f959a43c6d8652cfee7fb932828961cfb674 (patch)
tree689d358a17fc5081b93d1dae5d2cd4e1ee168c01 /libsanitizer/tsan
parent645c25972d625cb0cfc2af2c4cdd76fbf96a826a (diff)
downloadgcc-2862f959a43c6d8652cfee7fb932828961cfb674.tar.gz
[libsanitizer] merge from upstream r168699
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@193849 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer/tsan')
-rw-r--r--libsanitizer/tsan/tsan_interceptors.cc6
-rw-r--r--libsanitizer/tsan/tsan_interface.h3
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.cc307
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.h85
-rw-r--r--libsanitizer/tsan/tsan_interface_inl.h8
-rw-r--r--libsanitizer/tsan/tsan_platform.h2
-rw-r--r--libsanitizer/tsan/tsan_rtl.h1
-rw-r--r--libsanitizer/tsan/tsan_stat.cc6
-rw-r--r--libsanitizer/tsan/tsan_stat.h2
9 files changed, 322 insertions, 98 deletions
diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc
index 191dea7387a..7ceb153c42b 100644
--- a/libsanitizer/tsan/tsan_interceptors.cc
+++ b/libsanitizer/tsan/tsan_interceptors.cc
@@ -564,13 +564,13 @@ TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
SCOPED_TSAN_INTERCEPTOR(valloc, sz);
- return user_alloc(thr, pc, sz, kPageSize);
+ return user_alloc(thr, pc, sz, GetPageSizeCached());
}
TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
SCOPED_TSAN_INTERCEPTOR(pvalloc, sz);
- sz = RoundUp(sz, kPageSize);
- return user_alloc(thr, pc, sz, kPageSize);
+ sz = RoundUp(sz, GetPageSizeCached());
+ return user_alloc(thr, pc, sz, GetPageSizeCached());
}
TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
diff --git a/libsanitizer/tsan/tsan_interface.h b/libsanitizer/tsan/tsan_interface.h
index dede9be5f06..e3c89714d09 100644
--- a/libsanitizer/tsan/tsan_interface.h
+++ b/libsanitizer/tsan/tsan_interface.h
@@ -42,6 +42,9 @@ void __tsan_vptr_update(void **vptr_p, void *new_val);
void __tsan_func_entry(void *call_pc);
void __tsan_func_exit();
+void __tsan_read_range(void *addr, unsigned long size); // NOLINT
+void __tsan_write_range(void *addr, unsigned long size); // NOLINT
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 037a080da67..7193e7f52f4 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -9,6 +9,14 @@
//
//===----------------------------------------------------------------------===//
+// ThreadSanitizer atomic operations are based on C++11/C1x standards.
+// For background see C++11 standard. A slightly older, publically
+// available draft of the standard (not entirely up-to-date, but close enough
+// for casual browsing) is available here:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
+// The following page contains more background information:
+// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
+
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_interface_atomic.h"
#include "tsan_flags.h"
@@ -37,6 +45,7 @@ typedef __tsan_atomic8 a8;
typedef __tsan_atomic16 a16;
typedef __tsan_atomic32 a32;
typedef __tsan_atomic64 a64;
+typedef __tsan_atomic128 a128;
const morder mo_relaxed = __tsan_memory_order_relaxed;
const morder mo_consume = __tsan_memory_order_consume;
const morder mo_acquire = __tsan_memory_order_acquire;
@@ -50,7 +59,8 @@ static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
StatInc(thr, size == 1 ? StatAtomic1
: size == 2 ? StatAtomic2
: size == 4 ? StatAtomic4
- : StatAtomic8);
+ : size == 8 ? StatAtomic8
+ : StatAtomic16);
StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
: mo == mo_consume ? StatAtomicConsume
: mo == mo_acquire ? StatAtomicAcquire
@@ -77,6 +87,10 @@ static bool IsAcquireOrder(morder mo) {
|| mo == mo_acq_rel || mo == mo_seq_cst;
}
+static bool IsAcqRelOrder(morder mo) {
+ return mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
static morder ConvertOrder(morder mo) {
if (mo > (morder)100500) {
mo = morder(mo - 100500);
@@ -98,6 +112,34 @@ static morder ConvertOrder(morder mo) {
return mo;
}
+template<typename T> T func_xchg(T v, T op) {
+ return op;
+}
+
+template<typename T> T func_add(T v, T op) {
+ return v + op;
+}
+
+template<typename T> T func_sub(T v, T op) {
+ return v - op;
+}
+
+template<typename T> T func_and(T v, T op) {
+ return v & op;
+}
+
+template<typename T> T func_or(T v, T op) {
+ return v | op;
+}
+
+template<typename T> T func_xor(T v, T op) {
+ return v ^ op;
+}
+
+template<typename T> T func_nand(T v, T op) {
+ return ~v & op;
+}
+
#define SCOPED_ATOMIC(func, ...) \
mo = ConvertOrder(mo); \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
@@ -113,9 +155,15 @@ template<typename T>
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
morder mo) {
CHECK(IsLoadOrder(mo));
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a))
+ return *a;
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, false);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.acquire(&s->clock);
T v = *a;
- if (IsAcquireOrder(mo))
- Acquire(thr, pc, (uptr)a);
+ s->mtx.ReadUnlock();
return v;
}
@@ -123,100 +171,112 @@ template<typename T>
static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
CHECK(IsStoreOrder(mo));
- if (IsReleaseOrder(mo))
- ReleaseStore(thr, pc, (uptr)a);
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ // Strictly saying even relaxed store cuts off release sequence,
+ // so must reset the clock.
+ if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
+ *a = v;
+ return;
+ }
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ thr->clock.ReleaseStore(&s->clock);
*a = v;
+ s->mtx.Unlock();
+}
+
+template<typename T, T (*F)(T v, T op)>
+static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ if (IsAcqRelOrder(mo))
+ thr->clock.acq_rel(&s->clock);
+ else if (IsReleaseOrder(mo))
+ thr->clock.release(&s->clock);
+ else if (IsAcquireOrder(mo))
+ thr->clock.acquire(&s->clock);
+ T c = *a;
+ *a = F(c, v);
+ s->mtx.Unlock();
+ return c;
}
template<typename T>
static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (IsReleaseOrder(mo))
- Release(thr, pc, (uptr)a);
- v = __sync_lock_test_and_set(a, v);
- if (IsAcquireOrder(mo))
- Acquire(thr, pc, (uptr)a);
- return v;
+ return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
}
template<typename T>
static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (IsReleaseOrder(mo))
- Release(thr, pc, (uptr)a);
- v = __sync_fetch_and_add(a, v);
- if (IsAcquireOrder(mo))
- Acquire(thr, pc, (uptr)a);
- return v;
+ return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
}
template<typename T>
static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (IsReleaseOrder(mo))
- Release(thr, pc, (uptr)a);
- v = __sync_fetch_and_sub(a, v);
- if (IsAcquireOrder(mo))
- Acquire(thr, pc, (uptr)a);
- return v;
+ return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
}
template<typename T>
static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (IsReleaseOrder(mo))
- Release(thr, pc, (uptr)a);
- v = __sync_fetch_and_and(a, v);
- if (IsAcquireOrder(mo))
- Acquire(thr, pc, (uptr)a);
- return v;
+ return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
}
template<typename T>
static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (IsReleaseOrder(mo))
- Release(thr, pc, (uptr)a);
- v = __sync_fetch_and_or(a, v);
- if (IsAcquireOrder(mo))
- Acquire(thr, pc, (uptr)a);
- return v;
+ return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
}
template<typename T>
static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
morder mo) {
- if (IsReleaseOrder(mo))
- Release(thr, pc, (uptr)a);
- v = __sync_fetch_and_xor(a, v);
- if (IsAcquireOrder(mo))
- Acquire(thr, pc, (uptr)a);
- return v;
+ return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
}
template<typename T>
static bool AtomicCAS(ThreadState *thr, uptr pc,
- volatile T *a, T *c, T v, morder mo) {
- if (IsReleaseOrder(mo))
- Release(thr, pc, (uptr)a);
- T cc = *c;
- T pr = __sync_val_compare_and_swap(a, cc, v);
- if (IsAcquireOrder(mo))
- Acquire(thr, pc, (uptr)a);
- if (pr == cc)
- return true;
- *c = pr;
- return false;
+ volatile T *a, T *c, T v, morder mo, morder fmo) {
+ (void)fmo; // Unused because llvm does not pass it yet.
+ SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
+ thr->clock.set(thr->tid, thr->fast_state.epoch());
+ if (IsAcqRelOrder(mo))
+ thr->clock.acq_rel(&s->clock);
+ else if (IsReleaseOrder(mo))
+ thr->clock.release(&s->clock);
+ else if (IsAcquireOrder(mo))
+ thr->clock.acquire(&s->clock);
+ T cur = *a;
+ bool res = false;
+ if (cur == *c) {
+ *a = v;
+ res = true;
+ } else {
+ *c = cur;
+ }
+ s->mtx.Unlock();
+ return res;
}
template<typename T>
static T AtomicCAS(ThreadState *thr, uptr pc,
- volatile T *a, T c, T v, morder mo) {
- AtomicCAS(thr, pc, a, &c, v, mo);
+ volatile T *a, T c, T v, morder mo, morder fmo) {
+ AtomicCAS(thr, pc, a, &c, v, mo, fmo);
return c;
}
static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
+ // FIXME(dvyukov): not implemented.
__sync_synchronize();
}
@@ -236,6 +296,12 @@ a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
SCOPED_ATOMIC(Load, a, mo);
}
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+#endif
+
void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
@@ -252,6 +318,12 @@ void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Store, a, v, mo);
}
+#if __TSAN_HAS_INT128
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+#endif
+
a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
@@ -268,6 +340,12 @@ a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(Exchange, a, v, mo);
}
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+#endif
+
a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
@@ -284,6 +362,12 @@ a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAdd, a, v, mo);
}
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+#endif
+
a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
@@ -300,6 +384,12 @@ a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchSub, a, v, mo);
}
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+#endif
+
a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
@@ -316,6 +406,12 @@ a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchAnd, a, v, mo);
}
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+#endif
+
a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
@@ -332,6 +428,12 @@ a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchOr, a, v, mo);
}
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+#endif
+
a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
@@ -348,64 +450,113 @@ a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
SCOPED_ATOMIC(FetchXor, a, v, mo);
}
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+#endif
+
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+#endif
+
int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+#endif
int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+#if __TSAN_HAS_INT128
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
- morder mo) {
- SCOPED_ATOMIC(CAS, a, c, v, mo);
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
}
+#endif
void __tsan_atomic_thread_fence(morder mo) {
char* a;
diff --git a/libsanitizer/tsan/tsan_interface_atomic.h b/libsanitizer/tsan/tsan_interface_atomic.h
index fea97b69deb..15a0cc6594f 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.h
+++ b/libsanitizer/tsan/tsan_interface_atomic.h
@@ -15,10 +15,19 @@
extern "C" {
#endif
-typedef char __tsan_atomic8;
-typedef short __tsan_atomic16; // NOLINT
-typedef int __tsan_atomic32;
-typedef long __tsan_atomic64; // NOLINT
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+
+#if defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)
+typedef __int128 __tsan_atomic128;
+#define __TSAN_HAS_INT128 1
+#else
+typedef char __tsan_atomic128;
+#define __TSAN_HAS_INT128 0
+#endif
// Part of ABI, do not change.
// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
@@ -39,6 +48,8 @@ __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
__tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
__tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+ __tsan_memory_order mo);
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
__tsan_memory_order mo);
@@ -48,6 +59,8 @@ void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
__tsan_memory_order mo);
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
__tsan_memory_order mo);
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+ __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
@@ -57,6 +70,8 @@ __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
@@ -66,6 +81,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
@@ -75,6 +92,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
@@ -84,6 +103,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
@@ -93,6 +114,8 @@ __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
__tsan_atomic8 v, __tsan_memory_order mo);
@@ -102,37 +125,67 @@ __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
__tsan_atomic32 v, __tsan_memory_order mo);
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
__tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
- __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
- __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
- __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
- __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
void __tsan_atomic_thread_fence(__tsan_memory_order mo);
void __tsan_atomic_signal_fence(__tsan_memory_order mo);
diff --git a/libsanitizer/tsan/tsan_interface_inl.h b/libsanitizer/tsan/tsan_interface_inl.h
index 493e3f7778e..133348a942c 100644
--- a/libsanitizer/tsan/tsan_interface_inl.h
+++ b/libsanitizer/tsan/tsan_interface_inl.h
@@ -61,3 +61,11 @@ void __tsan_func_entry(void *pc) {
void __tsan_func_exit() {
FuncExit(cur_thread());
}
+
+void __tsan_read_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
+}
diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h
index f849182503c..67af1b25dd8 100644
--- a/libsanitizer/tsan/tsan_platform.h
+++ b/libsanitizer/tsan/tsan_platform.h
@@ -50,7 +50,7 @@ static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
static const uptr kLinuxShadowBeg = MemToShadow(kLinuxAppMemBeg);
static const uptr kLinuxShadowEnd =
- MemToShadow(kLinuxAppMemEnd) | (kPageSize - 1);
+ MemToShadow(kLinuxAppMemEnd) | 0xff;
static inline bool IsAppMem(uptr mem) {
return mem >= kLinuxAppMemBeg && mem <= kLinuxAppMemEnd;
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index de357a5d5a0..ee60f532e4c 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -521,6 +521,7 @@ void AfterSleep(ThreadState *thr, uptr pc);
#define HACKY_CALL(f) \
__asm__ __volatile__("sub $1024, %%rsp;" \
"/*.cfi_adjust_cfa_offset 1024;*/" \
+ ".hidden " #f "_thunk;" \
"call " #f "_thunk;" \
"add $1024, %%rsp;" \
"/*.cfi_adjust_cfa_offset -1024;*/" \
diff --git a/libsanitizer/tsan/tsan_stat.cc b/libsanitizer/tsan/tsan_stat.cc
index 68acae81e66..8c6c9511581 100644
--- a/libsanitizer/tsan/tsan_stat.cc
+++ b/libsanitizer/tsan/tsan_stat.cc
@@ -75,6 +75,11 @@ void StatOutput(u64 *stat) {
name[StatAtomicStore] = " store ";
name[StatAtomicExchange] = " exchange ";
name[StatAtomicFetchAdd] = " fetch_add ";
+ name[StatAtomicFetchSub] = " fetch_sub ";
+ name[StatAtomicFetchAnd] = " fetch_and ";
+ name[StatAtomicFetchOr] = " fetch_or ";
+ name[StatAtomicFetchXor] = " fetch_xor ";
+ name[StatAtomicFetchNand] = " fetch_nand ";
name[StatAtomicCAS] = " compare_exchange ";
name[StatAtomicFence] = " fence ";
name[StatAtomicRelaxed] = " Including relaxed ";
@@ -87,6 +92,7 @@ void StatOutput(u64 *stat) {
name[StatAtomic2] = " size 2 ";
name[StatAtomic4] = " size 4 ";
name[StatAtomic8] = " size 8 ";
+ name[StatAtomic16] = " size 16 ";
name[StatInterceptor] = "Interceptors ";
name[StatInt_longjmp] = " longjmp ";
diff --git a/libsanitizer/tsan/tsan_stat.h b/libsanitizer/tsan/tsan_stat.h
index 25bff8b7080..73d8c046e9a 100644
--- a/libsanitizer/tsan/tsan_stat.h
+++ b/libsanitizer/tsan/tsan_stat.h
@@ -75,6 +75,7 @@ enum StatType {
StatAtomicFetchAnd,
StatAtomicFetchOr,
StatAtomicFetchXor,
+ StatAtomicFetchNand,
StatAtomicCAS,
StatAtomicFence,
StatAtomicRelaxed,
@@ -87,6 +88,7 @@ enum StatType {
StatAtomic2,
StatAtomic4,
StatAtomic8,
+ StatAtomic16,
// Interceptors.
StatInterceptor,