summaryrefslogtreecommitdiff
path: root/libsanitizer/tsan
diff options
context:
space:
mode:
Diffstat (limited to 'libsanitizer/tsan')
-rw-r--r--libsanitizer/tsan/tsan_defs.h12
-rw-r--r--libsanitizer/tsan/tsan_flags.cc14
-rw-r--r--libsanitizer/tsan/tsan_flags.h15
-rw-r--r--libsanitizer/tsan/tsan_interceptors.cc84
-rw-r--r--libsanitizer/tsan/tsan_interceptors.h52
-rw-r--r--libsanitizer/tsan/tsan_interface.h47
-rw-r--r--libsanitizer/tsan/tsan_interface_ann.cc100
-rw-r--r--libsanitizer/tsan/tsan_interface_ann.h6
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.cc119
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.h130
-rw-r--r--libsanitizer/tsan/tsan_platform.h58
-rw-r--r--libsanitizer/tsan/tsan_platform_linux.cc50
-rw-r--r--libsanitizer/tsan/tsan_report.cc12
-rw-r--r--libsanitizer/tsan/tsan_report.h2
-rw-r--r--libsanitizer/tsan/tsan_rtl.cc36
-rw-r--r--libsanitizer/tsan/tsan_rtl.h73
-rw-r--r--libsanitizer/tsan/tsan_rtl_mutex.cc12
-rw-r--r--libsanitizer/tsan/tsan_rtl_report.cc58
-rw-r--r--libsanitizer/tsan/tsan_rtl_thread.cc40
-rw-r--r--libsanitizer/tsan/tsan_stat.cc1
-rw-r--r--libsanitizer/tsan/tsan_stat.h1
-rw-r--r--libsanitizer/tsan/tsan_suppressions.cc2
-rw-r--r--libsanitizer/tsan/tsan_symbolize.cc8
-rw-r--r--libsanitizer/tsan/tsan_trace.h10
24 files changed, 663 insertions, 279 deletions
diff --git a/libsanitizer/tsan/tsan_defs.h b/libsanitizer/tsan/tsan_defs.h
index 6f3fd21e246..c14a6d19623 100644
--- a/libsanitizer/tsan/tsan_defs.h
+++ b/libsanitizer/tsan/tsan_defs.h
@@ -23,8 +23,12 @@
namespace __tsan {
#ifdef TSAN_GO
+const bool kGoMode = true;
+const bool kCppMode = false;
const char *const kTsanOptionsEnv = "GORACE";
#else
+const bool kGoMode = false;
+const bool kCppMode = true;
const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
#endif
@@ -122,11 +126,17 @@ T max(T a, T b) {
}
template<typename T>
-T RoundUp(T p, int align) {
+T RoundUp(T p, u64 align) {
DCHECK_EQ(align & (align - 1), 0);
return (T)(((u64)p + align - 1) & ~(align - 1));
}
+template<typename T>
+T RoundDown(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)((u64)p & ~(align - 1));
+}
+
struct MD5Hash {
u64 hash[2];
bool operator==(const MD5Hash &other) const;
diff --git a/libsanitizer/tsan/tsan_flags.cc b/libsanitizer/tsan/tsan_flags.cc
index a69c6a6b545..1b726e6c8de 100644
--- a/libsanitizer/tsan/tsan_flags.cc
+++ b/libsanitizer/tsan/tsan_flags.cc
@@ -38,6 +38,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->enable_annotations = true;
f->suppress_equal_stacks = true;
f->suppress_equal_addresses = true;
+ f->suppress_java = false;
f->report_bugs = true;
f->report_thread_leaks = true;
f->report_destroy_locked = true;
@@ -46,7 +47,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->strip_path_prefix = "";
f->suppressions = "";
f->exitcode = 66;
- f->log_fileno = kStderrFd;
+ f->log_path = "stderr";
f->atexit_sleep_ms = 1000;
f->verbosity = 0;
f->profile_memory = "";
@@ -54,6 +55,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->stop_on_start = false;
f->running_on_valgrind = false;
f->external_symbolizer_path = "";
+ f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go.
// Let a frontend override.
OverrideFlags(f);
@@ -62,6 +64,7 @@ void InitializeFlags(Flags *f, const char *env) {
ParseFlag(env, &f->enable_annotations, "enable_annotations");
ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks");
ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses");
+ ParseFlag(env, &f->suppress_java, "suppress_java");
ParseFlag(env, &f->report_bugs, "report_bugs");
ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks");
ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked");
@@ -70,19 +73,26 @@ void InitializeFlags(Flags *f, const char *env) {
ParseFlag(env, &f->strip_path_prefix, "strip_path_prefix");
ParseFlag(env, &f->suppressions, "suppressions");
ParseFlag(env, &f->exitcode, "exitcode");
- ParseFlag(env, &f->log_fileno, "log_fileno");
+ ParseFlag(env, &f->log_path, "log_path");
ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms");
ParseFlag(env, &f->verbosity, "verbosity");
ParseFlag(env, &f->profile_memory, "profile_memory");
ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms");
ParseFlag(env, &f->stop_on_start, "stop_on_start");
ParseFlag(env, &f->external_symbolizer_path, "external_symbolizer_path");
+ ParseFlag(env, &f->history_size, "history_size");
if (!f->report_bugs) {
f->report_thread_leaks = false;
f->report_destroy_locked = false;
f->report_signal_unsafe = false;
}
+
+ if (f->history_size < 0 || f->history_size > 7) {
+ Printf("ThreadSanitizer: incorrect value for history_size"
+ " (must be [0..7])\n");
+ Die();
+ }
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_flags.h b/libsanitizer/tsan/tsan_flags.h
index 451d3684729..6af96ec83be 100644
--- a/libsanitizer/tsan/tsan_flags.h
+++ b/libsanitizer/tsan/tsan_flags.h
@@ -29,6 +29,9 @@ struct Flags {
// Supress a race report if we've already output another race report
// on the same address.
bool suppress_equal_addresses;
+ // Suppress weird race reports that can be seen if JVM is embed
+ // into the process.
+ bool suppress_java;
// Turns off bug reporting entirely (useful for benchmarking).
bool report_bugs;
// Report thread leaks at exit?
@@ -47,8 +50,10 @@ struct Flags {
const char *suppressions;
// Override exit status if something was reported.
int exitcode;
- // Log fileno (1 - stdout, 2 - stderr).
- int log_fileno;
+ // Write logs to "log_path.pid".
+ // The special values are "stdout" and "stderr".
+ // The default is "stderr".
+ const char *log_path;
// Sleep in main thread before exiting for that many ms
// (useful to catch "at exit" races).
int atexit_sleep_ms;
@@ -64,6 +69,12 @@ struct Flags {
bool running_on_valgrind;
// Path to external symbolizer.
const char *external_symbolizer_path;
+ // Per-thread history size, controls how many previous memory accesses
+ // are remembered per thread. Possible values are [0..7].
+ // history_size=0 amounts to 32K memory accesses. Each next value doubles
+ // the amount of memory accesses, up to history_size=7 that amounts to
+ // 4M memory accesses. The default value is 2 (128K memory accesses).
+ int history_size;
};
Flags *flags();
diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc
index 7ceb153c42b..0463fbd4f28 100644
--- a/libsanitizer/tsan/tsan_interceptors.cc
+++ b/libsanitizer/tsan/tsan_interceptors.cc
@@ -13,7 +13,7 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "tsan_interceptors.h"
+#include "interception/interception.h"
#include "tsan_interface.h"
#include "tsan_platform.h"
#include "tsan_rtl.h"
@@ -135,6 +135,15 @@ static SignalContext *SigCtx(ThreadState *thr) {
static unsigned g_thread_finalize_key;
+class ScopedInterceptor {
+ public:
+ ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
+ ~ScopedInterceptor();
+ private:
+ ThreadState *const thr_;
+ const int in_rtl_;
+};
+
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
: thr_(thr)
@@ -158,6 +167,30 @@ ScopedInterceptor::~ScopedInterceptor() {
CHECK_EQ(in_rtl_, thr_->in_rtl);
}
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ ThreadState *thr = cur_thread(); \
+ StatInc(thr, StatInterceptor); \
+ StatInc(thr, StatInt_##func); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ ScopedInterceptor si(thr, #func, caller_pc); \
+ /* Subtract one from pc as we need current instruction address */ \
+ const uptr pc = __sanitizer::StackTrace::GetCurrentPc() - 1; \
+ (void)pc; \
+/**/
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ if (REAL(func) == 0) { \
+ Printf("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Die(); \
+ } \
+ if (thr->in_rtl > 1) \
+ return REAL(func)(__VA_ARGS__); \
+/**/
+
+#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+
#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
struct BlockingCall {
@@ -259,7 +292,6 @@ static void finalize(void *arg) {
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
SCOPED_TSAN_INTERCEPTOR(atexit, f);
return atexit_ctx->atexit(thr, pc, f);
- return 0;
}
TSAN_INTERCEPTOR(void, longjmp, void *env, int val) {
@@ -308,6 +340,11 @@ TSAN_INTERCEPTOR(void*, malloc, uptr size) {
return p;
}
+TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
+ SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
+ return user_alloc(thr, pc, sz, align);
+}
+
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
void *p = 0;
{
@@ -1347,6 +1384,35 @@ TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
return REAL(gettimeofday)(tv, tz);
}
+// Linux kernel has a bug that leads to kernel deadlock if a process
+// maps TBs of memory and then calls mlock().
+static void MlockIsUnsupported() {
+ static atomic_uint8_t printed;
+ if (atomic_exchange(&printed, 1, memory_order_relaxed))
+ return;
+ Printf("INFO: ThreadSanitizer ignores mlock/mlockall/munlock/munlockall\n");
+}
+
+TSAN_INTERCEPTOR(int, mlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, munlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, mlockall, int flags) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+TSAN_INTERCEPTOR(int, munlockall, void) {
+ MlockIsUnsupported();
+ return 0;
+}
+
namespace __tsan {
void ProcessPendingSignals(ThreadState *thr) {
@@ -1396,6 +1462,11 @@ void ProcessPendingSignals(ThreadState *thr) {
thr->in_signal_handler = false;
}
+static void unreachable() {
+ Printf("FATAL: ThreadSanitizer: unreachable called\n");
+ Die();
+}
+
void InitializeInterceptors() {
CHECK_GT(cur_thread()->in_rtl, 0);
@@ -1408,6 +1479,7 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(siglongjmp);
TSAN_INTERCEPT(malloc);
+ TSAN_INTERCEPT(__libc_memalign);
TSAN_INTERCEPT(calloc);
TSAN_INTERCEPT(realloc);
TSAN_INTERCEPT(free);
@@ -1524,6 +1596,14 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(nanosleep);
TSAN_INTERCEPT(gettimeofday);
+ TSAN_INTERCEPT(mlock);
+ TSAN_INTERCEPT(munlock);
+ TSAN_INTERCEPT(mlockall);
+ TSAN_INTERCEPT(munlockall);
+
+ // Need to setup it, because interceptors check that the function is resolved.
+ // But atexit is emitted directly into the module, so can't be resolved.
+ REAL(atexit) = (int(*)(void(*)()))unreachable;
atexit_ctx = new(internal_alloc(MBlockAtExit, sizeof(AtExitContext)))
AtExitContext();
diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h
deleted file mode 100644
index 2e8c553172c..00000000000
--- a/libsanitizer/tsan/tsan_interceptors.h
+++ /dev/null
@@ -1,52 +0,0 @@
-//===-- tsan_interceptors.h -------------------------------------*- C++ -*-===//
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file is a part of ThreadSanitizer (TSan), a race detector.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef TSAN_INTERCEPTORS_H
-#define TSAN_INTERCEPTORS_H
-
-#include "interception/interception.h"
-#include "sanitizer_common/sanitizer_stacktrace.h"
-#include "tsan_rtl.h"
-
-namespace __tsan {
-
-class ScopedInterceptor {
- public:
- ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
- ~ScopedInterceptor();
- private:
- ThreadState *const thr_;
- const int in_rtl_;
-};
-
-#define SCOPED_INTERCEPTOR_RAW(func, ...) \
- ThreadState *thr = cur_thread(); \
- StatInc(thr, StatInterceptor); \
- StatInc(thr, StatInt_##func); \
- const uptr caller_pc = GET_CALLER_PC(); \
- ScopedInterceptor si(thr, #func, caller_pc); \
- /* Subtract one from pc as we need current instruction address */ \
- const uptr pc = __sanitizer::StackTrace::GetCurrentPc() - 1; \
- (void)pc; \
-/**/
-
-#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
- SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
- if (thr->in_rtl > 1) \
- return REAL(func)(__VA_ARGS__); \
-/**/
-
-#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
-#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
-
-} // namespace __tsan
-
-#endif // TSAN_INTERCEPTORS_H
diff --git a/libsanitizer/tsan/tsan_interface.h b/libsanitizer/tsan/tsan_interface.h
index e3c89714d09..8f265ca27b5 100644
--- a/libsanitizer/tsan/tsan_interface.h
+++ b/libsanitizer/tsan/tsan_interface.h
@@ -14,6 +14,8 @@
#ifndef TSAN_INTERFACE_H
#define TSAN_INTERFACE_H
+#include <sanitizer/common_interface_defs.h>
+
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
@@ -23,27 +25,30 @@ extern "C" {
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
-void __tsan_init();
-
-void __tsan_read1(void *addr);
-void __tsan_read2(void *addr);
-void __tsan_read4(void *addr);
-void __tsan_read8(void *addr);
-void __tsan_read16(void *addr);
-
-void __tsan_write1(void *addr);
-void __tsan_write2(void *addr);
-void __tsan_write4(void *addr);
-void __tsan_write8(void *addr);
-void __tsan_write16(void *addr);
-
-void __tsan_vptr_update(void **vptr_p, void *new_val);
-
-void __tsan_func_entry(void *call_pc);
-void __tsan_func_exit();
-
-void __tsan_read_range(void *addr, unsigned long size); // NOLINT
-void __tsan_write_range(void *addr, unsigned long size); // NOLINT
+void __tsan_init() SANITIZER_INTERFACE_ATTRIBUTE;
+
+void __tsan_read1(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_read2(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_read4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_read8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_read16(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+
+void __tsan_write1(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_write2(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_write4(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_write8(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_write16(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+
+void __tsan_vptr_update(void **vptr_p, void *new_val)
+ SANITIZER_INTERFACE_ATTRIBUTE;
+
+void __tsan_func_entry(void *call_pc) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_func_exit() SANITIZER_INTERFACE_ATTRIBUTE;
+
+void __tsan_read_range(void *addr, unsigned long size) // NOLINT
+ SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_write_range(void *addr, unsigned long size) // NOLINT
+ SANITIZER_INTERFACE_ATTRIBUTE;
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_interface_ann.cc b/libsanitizer/tsan/tsan_interface_ann.cc
index b9e084b7327..7e49fb8b059 100644
--- a/libsanitizer/tsan/tsan_interface_ann.cc
+++ b/libsanitizer/tsan/tsan_interface_ann.cc
@@ -9,6 +9,7 @@
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "tsan_interface_ann.h"
#include "tsan_mutex.h"
@@ -157,48 +158,50 @@ bool IsExpectedReport(uptr addr, uptr size) {
using namespace __tsan; // NOLINT
extern "C" {
-void AnnotateHappensBefore(char *f, int l, uptr addr) {
+void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
SCOPED_ANNOTATION(AnnotateHappensBefore);
Release(cur_thread(), CALLERPC, addr);
}
-void AnnotateHappensAfter(char *f, int l, uptr addr) {
+void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
SCOPED_ANNOTATION(AnnotateHappensAfter);
Acquire(cur_thread(), CALLERPC, addr);
}
-void AnnotateCondVarSignal(char *f, int l, uptr cv) {
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
SCOPED_ANNOTATION(AnnotateCondVarSignal);
}
-void AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
}
-void AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
+void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
}
-void AnnotateCondVarWait(char *f, int l, uptr cv, uptr lock) {
+void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
+ uptr lock) {
SCOPED_ANNOTATION(AnnotateCondVarWait);
}
-void AnnotateRWLockCreate(char *f, int l, uptr m) {
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
SCOPED_ANNOTATION(AnnotateRWLockCreate);
MutexCreate(thr, pc, m, true, true, false);
}
-void AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
MutexCreate(thr, pc, m, true, true, true);
}
-void AnnotateRWLockDestroy(char *f, int l, uptr m) {
+void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
SCOPED_ANNOTATION(AnnotateRWLockDestroy);
MutexDestroy(thr, pc, m);
}
-void AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) {
+void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
+ uptr is_w) {
SCOPED_ANNOTATION(AnnotateRWLockAcquired);
if (is_w)
MutexLock(thr, pc, m);
@@ -206,7 +209,8 @@ void AnnotateRWLockAcquired(char *f, int l, uptr m, uptr is_w) {
MutexReadLock(thr, pc, m);
}
-void AnnotateRWLockReleased(char *f, int l, uptr m, uptr is_w) {
+void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
+ uptr is_w) {
SCOPED_ANNOTATION(AnnotateRWLockReleased);
if (is_w)
MutexUnlock(thr, pc, m);
@@ -214,19 +218,20 @@ void AnnotateRWLockReleased(char *f, int l, uptr m, uptr is_w) {
MutexReadUnlock(thr, pc, m);
}
-void AnnotateTraceMemory(char *f, int l, uptr mem) {
+void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
SCOPED_ANNOTATION(AnnotateTraceMemory);
}
-void AnnotateFlushState(char *f, int l) {
+void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
SCOPED_ANNOTATION(AnnotateFlushState);
}
-void AnnotateNewMemory(char *f, int l, uptr mem, uptr size) {
+void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
+ uptr size) {
SCOPED_ANNOTATION(AnnotateNewMemory);
}
-void AnnotateNoOp(char *f, int l, uptr mem) {
+void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
SCOPED_ANNOTATION(AnnotateNoOp);
}
@@ -238,7 +243,7 @@ static void ReportMissedExpectedRace(ExpectRace *race) {
Printf("==================\n");
}
-void AnnotateFlushExpectedRaces(char *f, int l) {
+void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
Lock lock(&dyn_ann_ctx->mtx);
while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
@@ -253,32 +258,39 @@ void AnnotateFlushExpectedRaces(char *f, int l) {
}
}
-void AnnotateEnableRaceDetection(char *f, int l, int enable) {
+void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
+ char *f, int l, int enable) {
SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
// FIXME: Reconsider this functionality later. It may be irrelevant.
}
-void AnnotateMutexIsUsedAsCondVar(char *f, int l, uptr mu) {
+void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
+ char *f, int l, uptr mu) {
SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
}
-void AnnotatePCQGet(char *f, int l, uptr pcq) {
+void INTERFACE_ATTRIBUTE AnnotatePCQGet(
+ char *f, int l, uptr pcq) {
SCOPED_ANNOTATION(AnnotatePCQGet);
}
-void AnnotatePCQPut(char *f, int l, uptr pcq) {
+void INTERFACE_ATTRIBUTE AnnotatePCQPut(
+ char *f, int l, uptr pcq) {
SCOPED_ANNOTATION(AnnotatePCQPut);
}
-void AnnotatePCQDestroy(char *f, int l, uptr pcq) {
+void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
+ char *f, int l, uptr pcq) {
SCOPED_ANNOTATION(AnnotatePCQDestroy);
}
-void AnnotatePCQCreate(char *f, int l, uptr pcq) {
+void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
+ char *f, int l, uptr pcq) {
SCOPED_ANNOTATION(AnnotatePCQCreate);
}
-void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) {
+void INTERFACE_ATTRIBUTE AnnotateExpectRace(
+ char *f, int l, uptr mem, char *desc) {
SCOPED_ANNOTATION(AnnotateExpectRace);
Lock lock(&dyn_ann_ctx->mtx);
AddExpectRace(&dyn_ann_ctx->expect,
@@ -286,7 +298,8 @@ void AnnotateExpectRace(char *f, int l, uptr mem, char *desc) {
DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
}
-static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
+static void BenignRaceImpl(
+ char *f, int l, uptr mem, uptr size, char *desc) {
Lock lock(&dyn_ann_ctx->mtx);
AddExpectRace(&dyn_ann_ctx->benign,
f, l, mem, size, desc);
@@ -294,69 +307,76 @@ static void BenignRaceImpl(char *f, int l, uptr mem, uptr size, char *desc) {
}
// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
-void AnnotateBenignRaceSized(char *f, int l, uptr mem, uptr size, char *desc) {
+void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr size, char *desc) {
SCOPED_ANNOTATION(AnnotateBenignRaceSized);
BenignRaceImpl(f, l, mem, size, desc);
}
-void AnnotateBenignRace(char *f, int l, uptr mem, char *desc) {
+void INTERFACE_ATTRIBUTE AnnotateBenignRace(
+ char *f, int l, uptr mem, char *desc) {
SCOPED_ANNOTATION(AnnotateBenignRace);
BenignRaceImpl(f, l, mem, 1, desc);
}
-void AnnotateIgnoreReadsBegin(char *f, int l) {
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
IgnoreCtl(cur_thread(), false, true);
}
-void AnnotateIgnoreReadsEnd(char *f, int l) {
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
IgnoreCtl(cur_thread(), false, false);
}
-void AnnotateIgnoreWritesBegin(char *f, int l) {
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
IgnoreCtl(cur_thread(), true, true);
}
-void AnnotateIgnoreWritesEnd(char *f, int l) {
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
- IgnoreCtl(cur_thread(), true, false);
+ IgnoreCtl(thr, true, false);
}
-void AnnotatePublishMemoryRange(char *f, int l, uptr addr, uptr size) {
+void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
}
-void AnnotateUnpublishMemoryRange(char *f, int l, uptr addr, uptr size) {
+void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
}
-void AnnotateThreadName(char *f, int l, char *name) {
+void INTERFACE_ATTRIBUTE AnnotateThreadName(
+ char *f, int l, char *name) {
SCOPED_ANNOTATION(AnnotateThreadName);
+ ThreadSetName(thr, name);
}
-void WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
SCOPED_ANNOTATION(AnnotateHappensBefore);
}
-void WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
SCOPED_ANNOTATION(AnnotateHappensAfter);
}
-void WTFAnnotateBenignRaceSized(char *f, int l, uptr mem, uptr sz, char *desc) {
+void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr sz, char *desc) {
SCOPED_ANNOTATION(AnnotateBenignRaceSized);
}
-int RunningOnValgrind() {
+int INTERFACE_ATTRIBUTE RunningOnValgrind() {
return flags()->running_on_valgrind;
}
-double __attribute__((weak)) ValgrindSlowdown(void) {
+double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
return 10.0;
}
-const char *ThreadSanitizerQuery(const char *query) {
+const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
if (internal_strcmp(query, "pure_happens_before") == 0)
return "1";
else
diff --git a/libsanitizer/tsan/tsan_interface_ann.h b/libsanitizer/tsan/tsan_interface_ann.h
index 1dafe6152a8..b10264cdd6f 100644
--- a/libsanitizer/tsan/tsan_interface_ann.h
+++ b/libsanitizer/tsan/tsan_interface_ann.h
@@ -12,6 +12,8 @@
#ifndef TSAN_INTERFACE_ANN_H
#define TSAN_INTERFACE_ANN_H
+#include <sanitizer/common_interface_defs.h>
+
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
@@ -19,8 +21,8 @@
extern "C" {
#endif
-void __tsan_acquire(void *addr);
-void __tsan_release(void *addr);
+void __tsan_acquire(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
+void __tsan_release(void *addr) SANITIZER_INTERFACE_ATTRIBUTE;
#ifdef __cplusplus
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 7193e7f52f4..25f171ed07a 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -112,34 +112,101 @@ static morder ConvertOrder(morder mo) {
return mo;
}
-template<typename T> T func_xchg(T v, T op) {
- return op;
+template<typename T> T func_xchg(volatile T *v, T op) {
+ return __sync_lock_test_and_set(v, op);
}
-template<typename T> T func_add(T v, T op) {
- return v + op;
+template<typename T> T func_add(volatile T *v, T op) {
+ return __sync_fetch_and_add(v, op);
}
-template<typename T> T func_sub(T v, T op) {
- return v - op;
+template<typename T> T func_sub(volatile T *v, T op) {
+ return __sync_fetch_and_sub(v, op);
}
-template<typename T> T func_and(T v, T op) {
- return v & op;
+template<typename T> T func_and(volatile T *v, T op) {
+ return __sync_fetch_and_and(v, op);
}
-template<typename T> T func_or(T v, T op) {
- return v | op;
+template<typename T> T func_or(volatile T *v, T op) {
+ return __sync_fetch_and_or(v, op);
}
-template<typename T> T func_xor(T v, T op) {
- return v ^ op;
+template<typename T> T func_xor(volatile T *v, T op) {
+ return __sync_fetch_and_xor(v, op);
}
-template<typename T> T func_nand(T v, T op) {
- return ~v & op;
+template<typename T> T func_nand(volatile T *v, T op) {
+ // clang does not support __sync_fetch_and_nand.
+ T cmp = *v;
+ for (;;) {
+ T newv = ~(cmp & op);
+ T cur = __sync_val_compare_and_swap(v, cmp, newv);
+ if (cmp == cur)
+ return cmp;
+ cmp = cur;
+ }
+}
+
+template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
+ return __sync_val_compare_and_swap(v, cmp, xch);
+}
+
+// clang does not support 128-bit atomic ops.
+// Atomic ops are executed under tsan internal mutex,
+// here we assume that the atomic variables are not accessed
+// from non-instrumented code.
+#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+a128 func_xchg(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = op;
+ return cmp;
+}
+
+a128 func_add(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp + op;
+ return cmp;
+}
+
+a128 func_sub(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp - op;
+ return cmp;
+}
+
+a128 func_and(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp & op;
+ return cmp;
+}
+
+a128 func_or(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp | op;
+ return cmp;
}
+a128 func_xor(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = cmp ^ op;
+ return cmp;
+}
+
+a128 func_nand(volatile a128 *v, a128 op) {
+ a128 cmp = *v;
+ *v = ~(cmp & op);
+ return cmp;
+}
+
+a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ a128 cur = *v;
+ if (cur == cmp)
+ *v = xch;
+ return cur;
+}
+#endif
+
#define SCOPED_ATOMIC(func, ...) \
mo = ConvertOrder(mo); \
mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
@@ -164,6 +231,7 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
thr->clock.acquire(&s->clock);
T v = *a;
s->mtx.ReadUnlock();
+ __sync_synchronize();
return v;
}
@@ -179,6 +247,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
*a = v;
return;
}
+ __sync_synchronize();
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->clock.ReleaseStore(&s->clock);
@@ -186,7 +255,7 @@ static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
s->mtx.Unlock();
}
-template<typename T, T (*F)(T v, T op)>
+template<typename T, T (*F)(volatile T *v, T op)>
static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, (uptr)a, true);
thr->clock.set(thr->tid, thr->fast_state.epoch());
@@ -196,10 +265,9 @@ static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
thr->clock.release(&s->clock);
else if (IsAcquireOrder(mo))
thr->clock.acquire(&s->clock);
- T c = *a;
- *a = F(c, v);
+ v = F(a, v);
s->mtx.Unlock();
- return c;
+ return v;
}
template<typename T>
@@ -256,16 +324,13 @@ static bool AtomicCAS(ThreadState *thr, uptr pc,
thr->clock.release(&s->clock);
else if (IsAcquireOrder(mo))
thr->clock.acquire(&s->clock);
- T cur = *a;
- bool res = false;
- if (cur == *c) {
- *a = v;
- res = true;
- } else {
- *c = cur;
- }
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
s->mtx.Unlock();
- return res;
+ if (pr == cc)
+ return true;
+ *c = pr;
+ return false;
}
template<typename T>
diff --git a/libsanitizer/tsan/tsan_interface_atomic.h b/libsanitizer/tsan/tsan_interface_atomic.h
index 15a0cc6594f..9db31da0638 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.h
+++ b/libsanitizer/tsan/tsan_interface_atomic.h
@@ -11,6 +11,10 @@
#ifndef TSAN_INTERFACE_ATOMIC_H
#define TSAN_INTERFACE_ATOMIC_H
+#ifndef INTERFACE_ATTRIBUTE
+# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+#endif
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -41,157 +45,159 @@ typedef enum {
} __tsan_memory_order;
__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
- __tsan_memory_order mo);
+ __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
- __tsan_atomic8 v, __tsan_memory_order mo);
+ __tsan_atomic8 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
- __tsan_atomic16 v, __tsan_memory_order mo);
+ __tsan_atomic16 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
- __tsan_atomic32 v, __tsan_memory_order mo);
+ __tsan_atomic32 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
- __tsan_atomic64 v, __tsan_memory_order mo);
+ __tsan_atomic64 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
- __tsan_atomic128 v, __tsan_memory_order mo);
+ __tsan_atomic128 v, __tsan_memory_order mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
__tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
__tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
__tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
__tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
__tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
- __tsan_memory_order fail_mo);
+ __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
- __tsan_memory_order mo, __tsan_memory_order fail_mo);
+ __tsan_memory_order mo, __tsan_memory_order fail_mo) INTERFACE_ATTRIBUTE;
-void __tsan_atomic_thread_fence(__tsan_memory_order mo);
-void __tsan_atomic_signal_fence(__tsan_memory_order mo);
+void __tsan_atomic_thread_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
+void __tsan_atomic_signal_fence(__tsan_memory_order mo) INTERFACE_ATTRIBUTE;
#ifdef __cplusplus
} // extern "C"
#endif
+#undef INTERFACE_ATTRIBUTE
+
#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h
index 67af1b25dd8..5c776f14e8a 100644
--- a/libsanitizer/tsan/tsan_platform.h
+++ b/libsanitizer/tsan/tsan_platform.h
@@ -10,10 +10,53 @@
// Platform-specific code.
//===----------------------------------------------------------------------===//
+/*
+C++ linux memory layout:
+0000 0000 0000 - 03c0 0000 0000: protected
+03c0 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 6000 0000 0000: protected
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7d00 0000 0000: -
+7d00 0000 0000 - 7e00 0000 0000: heap
+7e00 0000 0000 - 7fff ffff ffff: modules and main thread stack
+
+C++ COMPAT linux memory layout:
+0000 0000 0000 - 0400 0000 0000: protected
+0400 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 2900 0000 0000: protected
+2900 0000 0000 - 2c00 0000 0000: modules
+2c00 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7d00 0000 0000: -
+7d00 0000 0000 - 7e00 0000 0000: heap
+7e00 0000 0000 - 7f00 0000 0000: -
+7f00 0000 0000 - 7fff ffff ffff: main thread stack
+
+Go linux and darwin memory layout:
+0000 0000 0000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00f8 0000 0000 - 0118 0000 0000: heap
+0118 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 1460 0000 0000: shadow
+1460 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7fff ffff ffff: -
+
+Go windows memory layout:
+0000 0000 0000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00f8 0000 0000 - 0118 0000 0000: heap
+0118 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0560 0000 0000: shadow
+0560 0000 0000 - 0760 0000 0000: traces
+0760 0000 0000 - 07ff ffff ffff: -
+*/
+
#ifndef TSAN_PLATFORM_H
#define TSAN_PLATFORM_H
-#include "tsan_rtl.h"
+#include "tsan_defs.h"
+#include "tsan_trace.h"
#if defined(__LP64__) || defined(_WIN64)
namespace __tsan {
@@ -39,6 +82,13 @@ static const uptr kLinuxAppMemEnd = 0x7fffffffffffULL;
static const uptr kLinuxAppMemMsk = 0x7c0000000000ULL;
+#if defined(_WIN32)
+const uptr kTraceMemBegin = 0x056000000000ULL;
+#else
+const uptr kTraceMemBegin = 0x600000000000ULL;
+#endif
+const uptr kTraceMemSize = 0x020000000000ULL;
+
// This has to be a macro to allow constant initialization of constants below.
#ifndef TSAN_GO
#define MemToShadow(addr) \
@@ -85,6 +135,12 @@ void FlushShadowMemory();
const char *InitializePlatform();
void FinalizePlatform();
+void MapThreadTrace(uptr addr, uptr size);
+uptr ALWAYS_INLINE INLINE GetThreadTrace(int tid) {
+ uptr p = kTraceMemBegin + (uptr)tid * kTraceSize * sizeof(Event);
+ DCHECK_LT(p, kTraceMemBegin + kTraceMemSize);
+ return p;
+}
void internal_start_thread(void(*func)(void*), void *arg);
diff --git a/libsanitizer/tsan/tsan_platform_linux.cc b/libsanitizer/tsan/tsan_platform_linux.cc
index b26065543fe..34221af16c9 100644
--- a/libsanitizer/tsan/tsan_platform_linux.cc
+++ b/libsanitizer/tsan/tsan_platform_linux.cc
@@ -101,7 +101,7 @@ void InitializeShadowMemory() {
const uptr kClosedLowBeg = 0x200000;
const uptr kClosedLowEnd = kLinuxShadowBeg - 1;
const uptr kClosedMidBeg = kLinuxShadowEnd + 1;
- const uptr kClosedMidEnd = kLinuxAppMemBeg - 1;
+ const uptr kClosedMidEnd = min(kLinuxAppMemBeg, kTraceMemBegin);
ProtectRange(kClosedLowBeg, kClosedLowEnd);
ProtectRange(kClosedMidBeg, kClosedMidEnd);
DPrintf("kClosedLow %zx-%zx (%zuGB)\n",
@@ -118,6 +118,16 @@ void InitializeShadowMemory() {
}
#endif
+void MapThreadTrace(uptr addr, uptr size) {
+ DPrintf("Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
+ CHECK_GE(addr, kTraceMemBegin);
+ CHECK_LE(addr + size, kTraceMemBegin + kTraceMemSize);
+ if (addr != (uptr)MmapFixedNoReserve(addr, size)) {
+ Printf("FATAL: ThreadSanitizer can not mmap thread trace\n");
+ Die();
+ }
+}
+
static uptr g_data_start;
static uptr g_data_end;
@@ -190,28 +200,50 @@ static int InitTlsSize() {
}
#endif // #ifndef TSAN_GO
+static rlim_t getlim(int res) {
+ rlimit rlim;
+ CHECK_EQ(0, getrlimit(res, &rlim));
+ return rlim.rlim_cur;
+}
+
+static void setlim(int res, rlim_t lim) {
+ // The following magic is to prevent clang from replacing it with memset.
+ volatile rlimit rlim;
+ rlim.rlim_cur = lim;
+ rlim.rlim_max = lim;
+ setrlimit(res, (rlimit*)&rlim);
+}
+
const char *InitializePlatform() {
void *p = 0;
if (sizeof(p) == 8) {
// Disable core dumps, dumping of 16TB usually takes a bit long.
- // The following magic is to prevent clang from replacing it with memset.
- volatile rlimit lim;
- lim.rlim_cur = 0;
- lim.rlim_max = 0;
- setrlimit(RLIMIT_CORE, (rlimit*)&lim);
+ setlim(RLIMIT_CORE, 0);
}
+ bool reexec = false;
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
// we re-exec the program with limited stack size as a best effort.
- if (StackSizeIsUnlimited()) {
- const uptr kMaxStackSize = 32 * 1024 * 1024; // 32 Mb
+ if (getlim(RLIMIT_STACK) == (rlim_t)-1) {
+ const uptr kMaxStackSize = 32 * 1024 * 1024;
Report("WARNING: Program is run with unlimited stack size, which "
"wouldn't work with ThreadSanitizer.\n");
Report("Re-execing with stack size limited to %zd bytes.\n", kMaxStackSize);
SetStackSizeLimitInBytes(kMaxStackSize);
- ReExec();
+ reexec = true;
}
+ if (getlim(RLIMIT_AS) != (rlim_t)-1) {
+ Report("WARNING: Program is run with limited virtual address space, which "
+ "wouldn't work with ThreadSanitizer.\n");
+ Report("Re-execing with unlimited virtual address space.\n");
+ setlim(RLIMIT_AS, -1);
+ reexec = true;
+ }
+
+ if (reexec)
+ ReExec();
+
#ifndef TSAN_GO
CheckPIE();
g_tls_size = (uptr)InitTlsSize();
diff --git a/libsanitizer/tsan/tsan_report.cc b/libsanitizer/tsan/tsan_report.cc
index 62e0b0c3314..18870a76eb7 100644
--- a/libsanitizer/tsan/tsan_report.cc
+++ b/libsanitizer/tsan/tsan_report.cc
@@ -24,6 +24,7 @@ ReportDesc::ReportDesc()
}
ReportDesc::~ReportDesc() {
+ // FIXME(dvyukov): it must be leaking a lot of memory.
}
#ifndef TSAN_GO
@@ -78,8 +79,9 @@ static void PrintMop(const ReportMop *mop, bool first) {
static void PrintLocation(const ReportLocation *loc) {
if (loc->type == ReportLocationGlobal) {
- Printf(" Location is global '%s' of size %zu at %zx %s:%d\n",
- loc->name, loc->size, loc->addr, loc->file, loc->line);
+ Printf(" Location is global '%s' of size %zu at %zx %s:%d (%s+%p)\n\n",
+ loc->name, loc->size, loc->addr, loc->file, loc->line,
+ loc->module, loc->offset);
} else if (loc->type == ReportLocationHeap) {
Printf(" Location is heap block of size %zu at %p allocated",
loc->size, loc->addr);
@@ -89,7 +91,7 @@ static void PrintLocation(const ReportLocation *loc) {
Printf(" by thread %d:\n", loc->tid);
PrintStack(loc->stack);
} else if (loc->type == ReportLocationStack) {
- Printf(" Location is stack of thread %d:\n", loc->tid);
+ Printf(" Location is stack of thread %d:\n\n", loc->tid);
}
}
@@ -149,6 +151,10 @@ void PrintReport(const ReportDesc *rep) {
#else
void PrintStack(const ReportStack *ent) {
+ if (ent == 0) {
+ Printf(" [failed to restore the stack]\n\n");
+ return;
+ }
for (int i = 0; ent; ent = ent->next, i++) {
Printf(" %s()\n %s:%d +0x%zx\n",
ent->func, ent->file, ent->line, (void*)ent->offset);
diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h
index 99728291317..6776f1e078c 100644
--- a/libsanitizer/tsan/tsan_report.h
+++ b/libsanitizer/tsan/tsan_report.h
@@ -56,6 +56,8 @@ struct ReportLocation {
ReportLocationType type;
uptr addr;
uptr size;
+ char *module;
+ uptr offset;
int tid;
char *name;
char *file;
diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc
index a3e82710d90..2778ac3e490 100644
--- a/libsanitizer/tsan/tsan_rtl.cc
+++ b/libsanitizer/tsan/tsan_rtl.cc
@@ -82,7 +82,8 @@ ThreadContext::ThreadContext(int tid)
, epoch0()
, epoch1()
, dead_info()
- , dead_next() {
+ , dead_next()
+ , name() {
}
static void WriteMemoryProfile(char *buf, uptr buf_size, int num) {
@@ -189,7 +190,12 @@ void Initialize(ThreadState *thr) {
ctx->dead_list_tail = 0;
InitializeFlags(&ctx->flags, env);
// Setup correct file descriptor for error reports.
- __sanitizer_set_report_fd(flags()->log_fileno);
+ if (internal_strcmp(flags()->log_path, "stdout") == 0)
+ __sanitizer_set_report_fd(kStdoutFd);
+ else if (internal_strcmp(flags()->log_path, "stderr") == 0)
+ __sanitizer_set_report_fd(kStderrFd);
+ else
+ __sanitizer_set_report_path(flags()->log_path);
InitializeSuppressions();
#ifndef TSAN_GO
// Initialize external symbolizer before internal threads are started.
@@ -279,13 +285,27 @@ void TraceSwitch(ThreadState *thr) {
thr->nomalloc++;
ScopedInRtl in_rtl;
Lock l(&thr->trace.mtx);
- unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % kTraceParts;
+ unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
TraceHeader *hdr = &thr->trace.headers[trace];
hdr->epoch0 = thr->fast_state.epoch();
hdr->stack0.ObtainCurrent(thr, 0);
thr->nomalloc--;
}
+uptr TraceTopPC(ThreadState *thr) {
+ Event *events = (Event*)GetThreadTrace(thr->tid);
+ uptr pc = events[thr->fast_state.GetTracePos()];
+ return pc;
+}
+
+uptr TraceSize() {
+ return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
+}
+
+uptr TraceParts() {
+ return TraceSize() / kTracePartSize;
+}
+
#ifndef TSAN_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
@@ -342,7 +362,7 @@ static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
}
static inline bool HappensBefore(Shadow old, ThreadState *thr) {
- return thr->clock.get(old.tid()) >= old.epoch();
+ return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
}
ALWAYS_INLINE
@@ -451,7 +471,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
// We must not store to the trace if we do not store to the shadow.
// That is, this call must be moved somewhere below.
- TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
shadow_mem, cur);
@@ -502,6 +522,7 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
MemoryAccessRange(thr, pc, addr, size, true);
Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
s.MarkAsFreed();
s.SetWrite(true);
s.SetAddr0AndSizeLog(0, 3);
@@ -510,6 +531,7 @@ void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
s.SetWrite(true);
s.SetAddr0AndSizeLog(0, 3);
MemoryRangeSet(thr, pc, addr, size, s.raw());
@@ -521,7 +543,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
StatInc(thr, StatFuncEnter);
DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncEnter, pc);
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
@@ -551,7 +573,7 @@ void FuncExit(ThreadState *thr) {
StatInc(thr, StatFuncExit);
DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeFuncExit, 0);
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
DCHECK_GT(thr->shadow_stack_pos, &thr->shadow_stack[0]);
#ifndef TSAN_GO
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index ee60f532e4c..56fcad1412f 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -25,7 +25,7 @@
#define TSAN_RTL_H
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_allocator64.h"
+#include "sanitizer_common/sanitizer_allocator.h"
#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
@@ -33,6 +33,11 @@
#include "tsan_trace.h"
#include "tsan_vector.h"
#include "tsan_report.h"
+#include "tsan_platform.h"
+
+#if SANITIZER_WORDSIZE != 64
+# error "ThreadSanitizer is supported only on 64-bit platforms"
+#endif
namespace __tsan {
@@ -55,8 +60,7 @@ const uptr kAllocatorSize = 0x10000000000ULL; // 1T.
typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(MBlock),
DefaultSizeClassMap> PrimaryAllocator;
-typedef SizeClassAllocatorLocalCache<PrimaryAllocator::kNumClasses,
- PrimaryAllocator> AllocatorCache;
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator SecondaryAllocator;
typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
SecondaryAllocator> Allocator;
@@ -67,18 +71,19 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2);
// FastState (from most significant bit):
-// unused : 1
+// ignore : 1
// tid : kTidBits
// epoch : kClkBits
// unused : -
-// ignore_bit : 1
+// history_size : 3
class FastState {
public:
FastState(u64 tid, u64 epoch) {
x_ = tid << kTidShift;
x_ |= epoch << kClkShift;
- DCHECK(tid == this->tid());
- DCHECK(epoch == this->epoch());
+ DCHECK_EQ(tid, this->tid());
+ DCHECK_EQ(epoch, this->epoch());
+ DCHECK_EQ(GetIgnoreBit(), false);
}
explicit FastState(u64 x)
@@ -90,6 +95,11 @@ class FastState {
}
u64 tid() const {
+ u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
+ return res;
+ }
+
+ u64 TidWithIgnore() const {
u64 res = x_ >> kTidShift;
return res;
}
@@ -108,13 +118,34 @@ class FastState {
void SetIgnoreBit() { x_ |= kIgnoreBit; }
void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
- bool GetIgnoreBit() const { return x_ & kIgnoreBit; }
+ bool GetIgnoreBit() const { return (s64)x_ < 0; }
+
+ void SetHistorySize(int hs) {
+ CHECK_GE(hs, 0);
+ CHECK_LE(hs, 7);
+ x_ = (x_ & ~7) | hs;
+ }
+
+ int GetHistorySize() const {
+ return (int)(x_ & 7);
+ }
+
+ void ClearHistorySize() {
+ x_ &= ~7;
+ }
+
+ u64 GetTracePos() const {
+ const int hs = GetHistorySize();
+ // When hs == 0, the trace consists of 2 parts.
+ const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
+ return epoch() & mask;
+ }
private:
friend class Shadow;
static const int kTidShift = 64 - kTidBits - 1;
static const int kClkShift = kTidShift - kClkBits;
- static const u64 kIgnoreBit = 1ull;
+ static const u64 kIgnoreBit = 1ull << 63;
static const u64 kFreedBit = 1ull << 63;
u64 x_;
};
@@ -128,9 +159,14 @@ class FastState {
// addr0 : 3
class Shadow : public FastState {
public:
- explicit Shadow(u64 x) : FastState(x) { }
+ explicit Shadow(u64 x)
+ : FastState(x) {
+ }
- explicit Shadow(const FastState &s) : FastState(s.x_) { }
+ explicit Shadow(const FastState &s)
+ : FastState(s.x_) {
+ ClearHistorySize();
+ }
void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
DCHECK_EQ(x_ & 31, 0);
@@ -152,7 +188,7 @@ class Shadow : public FastState {
static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
- DCHECK_EQ(shifted_xor == 0, s1.tid() == s2.tid());
+ DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
return shifted_xor == 0;
}
@@ -335,6 +371,7 @@ struct ThreadContext {
StackTrace creation_stack;
ThreadDeadInfo *dead_info;
ThreadContext *dead_next; // In dead thread list.
+ char *name; // As annotated by user.
explicit ThreadContext(int tid);
};
@@ -491,6 +528,7 @@ int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
void ThreadJoin(ThreadState *thr, uptr pc, int tid);
void ThreadDetach(ThreadState *thr, uptr pc, int tid);
void ThreadFinalize(ThreadState *thr);
+void ThreadSetName(ThreadState *thr, const char *name);
int ThreadCount(ThreadState *thr);
void ProcessPendingSignals(ThreadState *thr);
@@ -531,19 +569,24 @@ void AfterSleep(ThreadState *thr, uptr pc);
#endif
void TraceSwitch(ThreadState *thr);
+uptr TraceTopPC(ThreadState *thr);
+uptr TraceSize();
+uptr TraceParts();
extern "C" void __tsan_trace_switch();
-void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, u64 epoch,
+void ALWAYS_INLINE INLINE TraceAddEvent(ThreadState *thr, FastState fs,
EventType typ, uptr addr) {
StatInc(thr, StatEvents);
- if (UNLIKELY((epoch % kTracePartSize) == 0)) {
+ u64 pos = fs.GetTracePos();
+ if (UNLIKELY((pos % kTracePartSize) == 0)) {
#ifndef TSAN_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
#endif
}
- Event *evp = &thr->trace.events[epoch % kTraceSize];
+ Event *trace = (Event*)GetThreadTrace(fs.tid());
+ Event *evp = &trace[pos];
Event ev = (u64)addr | ((u64)typ << 61);
*evp = ev;
}
diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cc b/libsanitizer/tsan/tsan_rtl_mutex.cc
index 9ace430bc58..e5b43be6a49 100644
--- a/libsanitizer/tsan/tsan_rtl_mutex.cc
+++ b/libsanitizer/tsan/tsan_rtl_mutex.cc
@@ -73,7 +73,7 @@ void MutexLock(ThreadState *thr, uptr pc, uptr addr) {
if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr);
thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeLock, addr);
+ TraceAddEvent(thr, thr->fast_state, EventTypeLock, addr);
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
if (s->owner_tid == SyncVar::kInvalidTid) {
CHECK_EQ(s->recursion, 0);
@@ -105,7 +105,7 @@ void MutexUnlock(ThreadState *thr, uptr pc, uptr addr) {
if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr);
thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
if (s->recursion == 0) {
if (!s->is_broken) {
@@ -142,7 +142,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr) {
if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr);
thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRLock, addr);
+ TraceAddEvent(thr, thr->fast_state, EventTypeRLock, addr);
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, false);
if (s->owner_tid != SyncVar::kInvalidTid) {
Printf("ThreadSanitizer WARNING: read lock of a write locked mutex\n");
@@ -162,7 +162,7 @@ void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
if (IsAppMem(addr))
MemoryRead1Byte(thr, pc, addr);
thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
SyncVar *s = CTX()->synctab.GetAndLock(thr, pc, addr, true);
if (s->owner_tid != SyncVar::kInvalidTid) {
Printf("ThreadSanitizer WARNING: read unlock of a write "
@@ -186,7 +186,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
// Seems to be read unlock.
StatInc(thr, StatMutexReadUnlock);
thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeRUnlock, addr);
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, addr);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&s->read_clock);
@@ -203,7 +203,7 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
// First, it's a bug to increment the epoch w/o writing to the trace.
// Then, the acquire/release logic can be factored out as well.
thr->fast_state.IncrementEpoch();
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeUnlock, addr);
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, addr);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.ReleaseStore(&s->clock);
diff --git a/libsanitizer/tsan/tsan_rtl_report.cc b/libsanitizer/tsan/tsan_rtl_report.cc
index 24e29d25400..6aae6cf6e8a 100644
--- a/libsanitizer/tsan/tsan_rtl_report.cc
+++ b/libsanitizer/tsan/tsan_rtl_report.cc
@@ -123,8 +123,7 @@ ScopedReport::ScopedReport(ReportType typ) {
ScopedReport::~ScopedReport() {
ctx_->report_mtx.Unlock();
- rep_->~ReportDesc();
- internal_free(rep_);
+ DestroyAndFree(rep_);
}
void ScopedReport::AddStack(const StackTrace *stack) {
@@ -156,6 +155,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx) {
rt->id = tctx->tid;
rt->pid = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning);
+ rt->name = tctx->name ? internal_strdup(tctx->name) : 0;
rt->stack = SymbolizeStack(tctx->creation_stack);
}
@@ -218,9 +218,11 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
loc->type = ReportLocationGlobal;
loc->addr = addr;
loc->size = size;
+ loc->module = symb->module ? internal_strdup(symb->module) : 0;
+ loc->offset = symb->offset;
loc->tid = 0;
- loc->name = symb->func;
- loc->file = symb->file;
+ loc->name = symb->func ? internal_strdup(symb->func) : 0;
+ loc->file = symb->file ? internal_strdup(symb->file) : 0;
loc->line = symb->line;
loc->stack = 0;
internal_free(symb);
@@ -261,12 +263,12 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
return;
}
Lock l(&trace->mtx);
- const int partidx = (epoch / (kTraceSize / kTraceParts)) % kTraceParts;
+ const int partidx = (epoch / kTracePartSize) % TraceParts();
TraceHeader* hdr = &trace->headers[partidx];
if (epoch < hdr->epoch0)
return;
- const u64 eend = epoch % kTraceSize;
- const u64 ebegin = eend / kTracePartSize * kTracePartSize;
+ const u64 eend = epoch % TraceSize();
+ const u64 ebegin = RoundDown(eend, kTracePartSize);
DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
InternalScopedBuffer<uptr> stack(1024); // FIXME: de-hardcode 1024
@@ -275,8 +277,9 @@ void RestoreStack(int tid, const u64 epoch, StackTrace *stk) {
DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]);
}
uptr pos = hdr->stack0.Size();
+ Event *events = (Event*)GetThreadTrace(tid);
for (uptr i = ebegin; i <= eend; i++) {
- Event ev = trace->events[i];
+ Event ev = events[i];
EventType typ = (EventType)(ev >> 61);
uptr pc = (uptr)(ev & 0xffffffffffffull);
DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
@@ -382,6 +385,39 @@ bool IsFiredSuppression(Context *ctx,
return false;
}
+// On programs that use Java we see weird reports like:
+// WARNING: ThreadSanitizer: data race (pid=22512)
+// Read of size 8 at 0x7d2b00084318 by thread 100:
+// #0 memcpy tsan_interceptors.cc:406 (foo+0x00000d8dfae3)
+// #1 <null> <null>:0 (0x7f7ad9b40193)
+// Previous write of size 8 at 0x7d2b00084318 by thread 105:
+// #0 strncpy tsan_interceptors.cc:501 (foo+0x00000d8e0919)
+// #1 <null> <null>:0 (0x7f7ad9b42707)
+static bool IsJavaNonsense(const ReportDesc *rep) {
+ for (uptr i = 0; i < rep->mops.Size(); i++) {
+ ReportMop *mop = rep->mops[i];
+ ReportStack *frame = mop->stack;
+ if (frame != 0 && frame->func != 0
+ && (internal_strcmp(frame->func, "memset") == 0
+ || internal_strcmp(frame->func, "memcpy") == 0
+ || internal_strcmp(frame->func, "strcmp") == 0
+ || internal_strcmp(frame->func, "strncpy") == 0
+ || internal_strcmp(frame->func, "pthread_mutex_lock") == 0)) {
+ frame = frame->next;
+ if (frame == 0
+ || (frame->func == 0 && frame->file == 0 && frame->line == 0
+ && frame->module == 0)) {
+ if (frame) {
+ FiredSuppression supp = {rep->typ, frame->pc};
+ CTX()->fired_suppressions.PushBack(supp);
+ }
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
void ReportRace(ThreadState *thr) {
if (!flags()->report_bugs)
return;
@@ -414,8 +450,7 @@ void ReportRace(ThreadState *thr) {
ScopedReport rep(freed ? ReportTypeUseAfterFree : ReportTypeRace);
const uptr kMop = 2;
StackTrace traces[kMop];
- const uptr toppc = thr->trace.events[thr->fast_state.epoch() % kTraceSize]
- & ((1ull << 61) - 1);
+ const uptr toppc = TraceTopPC(thr);
traces[0].ObtainCurrent(thr, toppc);
if (IsFiredSuppression(ctx, rep, traces[0]))
return;
@@ -430,6 +465,9 @@ void ReportRace(ThreadState *thr) {
rep.AddMemoryAccess(addr, s, &traces[i]);
}
+ if (flags()->suppress_java && IsJavaNonsense(rep.GetReport()))
+ return;
+
for (uptr i = 0; i < kMop; i++) {
FastState s(thr->racy_state[i]);
ThreadContext *tctx = ctx->threads[s.tid()];
diff --git a/libsanitizer/tsan/tsan_rtl_thread.cc b/libsanitizer/tsan/tsan_rtl_thread.cc
index bb7f35c24ed..462d12c7dee 100644
--- a/libsanitizer/tsan/tsan_rtl_thread.cc
+++ b/libsanitizer/tsan/tsan_rtl_thread.cc
@@ -96,6 +96,7 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
ThreadContext *tctx = 0;
if (ctx->dead_list_size > kThreadQuarantineSize
|| ctx->thread_seq >= kMaxTid) {
+ // Reusing old thread descriptor and tid.
if (ctx->dead_list_size == 0) {
Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n",
kMaxTid);
@@ -115,12 +116,18 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
tctx->sync.Reset();
tid = tctx->tid;
DestroyAndFree(tctx->dead_info);
+ if (tctx->name) {
+ internal_free(tctx->name);
+ tctx->name = 0;
+ }
} else {
+ // Allocating new thread descriptor and tid.
StatInc(thr, StatThreadMaxTid);
tid = ctx->thread_seq++;
void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
tctx = new(mem) ThreadContext(tid);
ctx->threads[tid] = tctx;
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
}
CHECK_NE(tctx, 0);
CHECK_GE(tid, 0);
@@ -141,12 +148,11 @@ int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
if (tid) {
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&tctx->sync);
StatInc(thr, StatSyncRelease);
-
tctx->creation_stack.ObtainCurrent(thr, pc);
}
return tid;
@@ -185,7 +191,9 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
CHECK_EQ(tctx->status, ThreadStatusCreated);
tctx->status = ThreadStatusRunning;
tctx->os_id = os_id;
- tctx->epoch0 = tctx->epoch1 + 1;
+ // RoundUp so that one trace part does not contain events
+ // from different threads.
+ tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize);
tctx->epoch1 = (u64)-1;
new(thr) ThreadState(CTX(), tid, tctx->unique_id,
tctx->epoch0, stk_addr, stk_size,
@@ -202,6 +210,9 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
thr->fast_synch_epoch = tctx->epoch0;
thr->clock.set(tid, tctx->epoch0);
thr->clock.acquire(&tctx->sync);
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts();
+ thr->trace.headers[trace].epoch0 = tctx->epoch0;
StatInc(thr, StatSyncAcquire);
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
@@ -236,7 +247,7 @@ void ThreadFinish(ThreadState *thr) {
} else {
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
- TraceAddEvent(thr, thr->fast_state.epoch(), EventTypeMop, 0);
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
thr->clock.set(thr->tid, thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
thr->clock.release(&tctx->sync);
@@ -247,9 +258,8 @@ void ThreadFinish(ThreadState *thr) {
// Save from info about the thread.
tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo)))
ThreadDeadInfo();
- internal_memcpy(&tctx->dead_info->trace.events[0],
- &thr->trace.events[0], sizeof(thr->trace.events));
- for (int i = 0; i < kTraceParts; i++) {
+ for (uptr i = 0; i < TraceParts(); i++) {
+ tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0;
tctx->dead_info->trace.headers[i].stack0.CopyFrom(
thr->trace.headers[i].stack0);
}
@@ -318,6 +328,20 @@ void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
}
}
+void ThreadSetName(ThreadState *thr, const char *name) {
+ Context *ctx = CTX();
+ Lock l(&ctx->thread_mtx);
+ ThreadContext *tctx = ctx->threads[thr->tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(tctx->status, ThreadStatusRunning);
+ if (tctx->name) {
+ internal_free(tctx->name);
+ tctx->name = 0;
+ }
+ if (name)
+ tctx->name = internal_strdup(name);
+}
+
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
uptr size, bool is_write) {
if (size == 0)
@@ -356,7 +380,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
fast_state.IncrementEpoch();
thr->fast_state = fast_state;
- TraceAddEvent(thr, fast_state.epoch(), EventTypeMop, pc);
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
bool unaligned = (addr % kShadowCell) != 0;
diff --git a/libsanitizer/tsan/tsan_stat.cc b/libsanitizer/tsan/tsan_stat.cc
index 8c6c9511581..f8ecec1e207 100644
--- a/libsanitizer/tsan/tsan_stat.cc
+++ b/libsanitizer/tsan/tsan_stat.cc
@@ -98,6 +98,7 @@ void StatOutput(u64 *stat) {
name[StatInt_longjmp] = " longjmp ";
name[StatInt_siglongjmp] = " siglongjmp ";
name[StatInt_malloc] = " malloc ";
+ name[StatInt___libc_memalign] = " __libc_memalign ";
name[StatInt_calloc] = " calloc ";
name[StatInt_realloc] = " realloc ";
name[StatInt_free] = " free ";
diff --git a/libsanitizer/tsan/tsan_stat.h b/libsanitizer/tsan/tsan_stat.h
index 73d8c046e9a..51e4c4a377e 100644
--- a/libsanitizer/tsan/tsan_stat.h
+++ b/libsanitizer/tsan/tsan_stat.h
@@ -95,6 +95,7 @@ enum StatType {
StatInt_longjmp,
StatInt_siglongjmp,
StatInt_malloc,
+ StatInt___libc_memalign,
StatInt_calloc,
StatInt_realloc,
StatInt_free,
diff --git a/libsanitizer/tsan/tsan_suppressions.cc b/libsanitizer/tsan/tsan_suppressions.cc
index 3f825a804a1..42ba9b509eb 100644
--- a/libsanitizer/tsan/tsan_suppressions.cc
+++ b/libsanitizer/tsan/tsan_suppressions.cc
@@ -25,7 +25,7 @@ static char *ReadFile(const char *filename) {
if (filename == 0 || filename[0] == 0)
return 0;
InternalScopedBuffer<char> tmp(4*1024);
- if (filename[0] == '/')
+ if (filename[0] == '/' || GetPwd() == 0)
internal_snprintf(tmp.data(), tmp.size(), "%s", filename);
else
internal_snprintf(tmp.data(), tmp.size(), "%s/%s", GetPwd(), filename);
diff --git a/libsanitizer/tsan/tsan_symbolize.cc b/libsanitizer/tsan/tsan_symbolize.cc
index 439b8824c6a..50a4eaa518b 100644
--- a/libsanitizer/tsan/tsan_symbolize.cc
+++ b/libsanitizer/tsan/tsan_symbolize.cc
@@ -50,7 +50,7 @@ static ReportStack *NewReportStackEntry(const AddressInfo &info) {
}
ReportStack *SymbolizeCode(uptr addr) {
- if (0 != internal_strcmp(flags()->external_symbolizer_path, "")) {
+ if (flags()->external_symbolizer_path[0]) {
static const uptr kMaxAddrFrames = 16;
InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
for (uptr i = 0; i < kMaxAddrFrames; i++)
@@ -77,6 +77,12 @@ ReportStack *SymbolizeCode(uptr addr) {
}
ReportStack *SymbolizeData(uptr addr) {
+ if (flags()->external_symbolizer_path[0]) {
+ AddressInfo frame;
+ if (!__sanitizer::SymbolizeData(addr, &frame))
+ return 0;
+ return NewReportStackEntry(frame);
+ }
return SymbolizeDataAddr2Line(addr);
}
diff --git a/libsanitizer/tsan/tsan_trace.h b/libsanitizer/tsan/tsan_trace.h
index 69233a61bab..154cc15c083 100644
--- a/libsanitizer/tsan/tsan_trace.h
+++ b/libsanitizer/tsan/tsan_trace.h
@@ -17,12 +17,9 @@
namespace __tsan {
-#ifndef TSAN_HISTORY_SIZE // in kibitraces
-#define TSAN_HISTORY_SIZE 128
-#endif
-
-const int kTracePartSize = 16 * 1024;
-const int kTraceParts = TSAN_HISTORY_SIZE * 1024 / kTracePartSize;
+const int kTracePartSizeBits = 14;
+const int kTracePartSize = 1 << kTracePartSizeBits;
+const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize;
const int kTraceSize = kTracePartSize * kTraceParts;
// Must fit into 3 bits.
@@ -59,7 +56,6 @@ struct TraceHeader {
};
struct Trace {
- Event events[kTraceSize];
TraceHeader headers[kTraceParts];
Mutex mtx;