blob: 1ea733bfd07a19f7ea417c33664febdab12c5f4f (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
|
//===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#ifndef TSAN_TRACE_H
#define TSAN_TRACE_H
#include "tsan_defs.h"
#include "tsan_mutex.h"
#include "tsan_stack_trace.h"
#include "tsan_mutexset.h"
namespace __tsan {
const int kTracePartSizeBits = 13;
const int kTracePartSize = 1 << kTracePartSizeBits;
const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
const int kTraceSize = kTracePartSize * kTraceParts;
// Must fit into 3 bits.
enum EventType {
EventTypeMop,
EventTypeFuncEnter,
EventTypeFuncExit,
EventTypeLock,
EventTypeUnlock,
EventTypeRLock,
EventTypeRUnlock
};
// Represents a thread event (from most significant bit):
// u64 typ : 3; // EventType.
// u64 addr : 61; // Associated pc.
typedef u64 Event;
struct TraceHeader {
#if !SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
#else
VarSizeStackTrace stack0;
#endif
u64 epoch0; // Start epoch for the trace.
MutexSet mset0;
TraceHeader() : stack0(), epoch0() {}
};
struct Trace {
Mutex mtx;
#if !SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];
#endif
// Must be the last field, because we unmap the unused part in
// CreateThreadContext.
TraceHeader headers[kTraceParts];
Trace()
: mtx(MutexTypeTrace, StatMtxTrace) {
}
};
} // namespace __tsan
#endif // TSAN_TRACE_H
|