summaryrefslogtreecommitdiff
path: root/libsanitizer/tsan/tsan_rtl.cc
diff options
context:
space:
mode:
authorkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2013-02-13 10:46:01 +0000
committerkcc <kcc@138bc75d-0d04-0410-961f-82ee72b054a4>2013-02-13 10:46:01 +0000
commit7da89e8076fc3bc8b0eb22721afdce421d225d71 (patch)
treec4504a71a4de65630ff00dd7aa8e062235fc5076 /libsanitizer/tsan/tsan_rtl.cc
parentc31c80df29f1ac080fd4b89965599e1b337b15a1 (diff)
downloadgcc-7da89e8076fc3bc8b0eb22721afdce421d225d71.tar.gz
libsanitizer merge from upstream r175049
git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@196009 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer/tsan/tsan_rtl.cc')
-rw-r--r--libsanitizer/tsan/tsan_rtl.cc39
1 files changed, 22 insertions, 17 deletions
diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc
index 3615a7a9c2f..673a355f1dc 100644
--- a/libsanitizer/tsan/tsan_rtl.cc
+++ b/libsanitizer/tsan/tsan_rtl.cc
@@ -35,6 +35,11 @@ THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+// Can be overriden by a front-end.
+bool CPP_WEAK OnFinalize(bool failed) {
+ return failed;
+}
+
static Context *ctx;
Context *CTX() {
return ctx;
@@ -136,7 +141,7 @@ static void InitializeMemoryProfile() {
InternalScopedBuffer<char> filename(4096);
internal_snprintf(filename.data(), filename.size(), "%s.%d",
flags()->profile_memory, GetPid());
- fd_t fd = internal_open(filename.data(), true);
+ fd_t fd = OpenFile(filename.data(), true);
if (fd == kInvalidFd) {
Printf("Failed to open memory profile file '%s'\n", &filename[0]);
Die();
@@ -180,6 +185,7 @@ void Initialize(ThreadState *thr) {
if (is_initialized)
return;
is_initialized = true;
+ SanitizerToolName = "ThreadSanitizer";
// Install tool-specific callbacks in sanitizer_common.
SetCheckFailedCallback(TsanCheckFailed);
@@ -237,7 +243,7 @@ void Initialize(ThreadState *thr) {
Printf("ThreadSanitizer is suspended at startup (pid %d)."
" Call __tsan_resume().\n",
GetPid());
- while (__tsan_resumed == 0);
+ while (__tsan_resumed == 0) {}
}
}
@@ -253,6 +259,11 @@ int Finalize(ThreadState *thr) {
ctx->report_mtx.Lock();
ctx->report_mtx.Unlock();
+#ifndef TSAN_GO
+ if (ctx->flags.verbosity)
+ AllocatorPrintStats();
+#endif
+
ThreadFinalize(thr);
if (ctx->nreported) {
@@ -270,6 +281,8 @@ int Finalize(ThreadState *thr) {
ctx->nmissed_expected);
}
+ failed = OnFinalize(failed);
+
StatAggregate(ctx->stat, thr->stat);
StatOutput(ctx->stat);
return failed ? flags()->exitcode : 0;
@@ -356,18 +369,6 @@ static inline void HandleRace(ThreadState *thr, u64 *shadow_mem,
#endif
}
-static inline bool BothReads(Shadow s, int kAccessIsWrite) {
- return !kAccessIsWrite && !s.is_write();
-}
-
-static inline bool OldIsRWNotWeaker(Shadow old, int kAccessIsWrite) {
- return old.is_write() || !kAccessIsWrite;
-}
-
-static inline bool OldIsRWWeakerOrEqual(Shadow old, int kAccessIsWrite) {
- return !old.is_write() || kAccessIsWrite;
-}
-
static inline bool OldIsInSameSynchEpoch(Shadow old, ThreadState *thr) {
return old.epoch() >= thr->fast_synch_epoch;
}
@@ -378,7 +379,7 @@ static inline bool HappensBefore(Shadow old, ThreadState *thr) {
ALWAYS_INLINE
void MemoryAccessImpl(ThreadState *thr, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
StatInc(thr, StatMop);
StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
@@ -452,7 +453,7 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr,
ALWAYS_INLINE
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
- int kAccessSizeLog, bool kAccessIsWrite) {
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
u64 *shadow_mem = (u64*)MemToShadow(addr);
DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
" is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
@@ -479,12 +480,13 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
Shadow cur(fast_state);
cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
cur.SetWrite(kAccessIsWrite);
+ cur.SetAtomic(kIsAtomic);
// We must not store to the trace if we do not store to the shadow.
// That is, this call must be moved somewhere below.
TraceAddEvent(thr, fast_state, EventTypeMop, pc);
- MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite,
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
shadow_mem, cur);
}
@@ -531,7 +533,10 @@ void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
}
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ CHECK_EQ(thr->is_freeing, false);
+ thr->is_freeing = true;
MemoryAccessRange(thr, pc, addr, size, true);
+ thr->is_freeing = false;
Shadow s(thr->fast_state);
s.ClearIgnoreBit();
s.MarkAsFreed();