summaryrefslogtreecommitdiff
path: root/lib/lsan/lsan_common.cc
diff options
context:
space:
mode:
Diffstat (limited to 'lib/lsan/lsan_common.cc')
-rw-r--r--lib/lsan/lsan_common.cc235
1 files changed, 175 insertions, 60 deletions
diff --git a/lib/lsan/lsan_common.cc b/lib/lsan/lsan_common.cc
index 5ae3ad27e..622aae734 100644
--- a/lib/lsan/lsan_common.cc
+++ b/lib/lsan/lsan_common.cc
@@ -68,6 +68,19 @@ ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
static const char kSuppressionLeak[] = "leak";
static const char *kSuppressionTypes[] = { kSuppressionLeak };
+static const char kStdSuppressions[] =
+#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // definition.
+ "leak:*pthread_exit*\n"
+#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+#if SANITIZER_MAC
+ // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
+ "leak:*_os_trace*\n"
+#endif
+ // TLS leak in some glibc versions, described in
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+ "leak:*tls_get_addr*\n";
void InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
@@ -76,6 +89,7 @@ void InitializeSuppressions() {
suppression_ctx->ParseFromFile(flags()->suppressions);
if (&__lsan_default_suppressions)
suppression_ctx->Parse(__lsan_default_suppressions());
+ suppression_ctx->Parse(kStdSuppressions);
}
static SuppressionContext *GetSuppressionContext() {
@@ -83,12 +97,9 @@ static SuppressionContext *GetSuppressionContext() {
return suppression_ctx;
}
-struct RootRegion {
- const void *begin;
- uptr size;
-};
+static InternalMmapVector<RootRegion> *root_regions;
-InternalMmapVector<RootRegion> *root_regions;
+InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
void InitializeRootRegions() {
CHECK(!root_regions);
@@ -96,6 +107,10 @@ void InitializeRootRegions() {
root_regions = new(placeholder) InternalMmapVector<RootRegion>(1);
}
+const char *MaybeCallLsanDefaultOptions() {
+ return (&__lsan_default_options) ? __lsan_default_options() : "";
+}
+
void InitCommonLsan() {
InitializeRootRegions();
if (common_flags()->detect_leaks) {
@@ -111,7 +126,6 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator {
Decorator() : SanitizerCommonDecorator() { }
const char *Error() { return Red(); }
const char *Leak() { return Blue(); }
- const char *End() { return Default(); }
};
static inline bool CanBeAHeapPointer(uptr p) {
@@ -175,6 +189,23 @@ void ScanRangeForPointers(uptr begin, uptr end,
}
}
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+ uptr allocator_begin = 0, allocator_end = 0;
+ GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+ if (begin <= allocator_begin && allocator_begin < end) {
+ CHECK_LE(allocator_begin, allocator_end);
+ CHECK_LE(allocator_end, end);
+ if (begin < allocator_begin)
+ ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+ kReachable);
+ if (allocator_end < end)
+ ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+ }
+}
+
void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
Frontier *frontier = reinterpret_cast<Frontier *>(arg);
ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
@@ -183,11 +214,11 @@ void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
// Scans thread data (stacks and TLS) for heap pointers.
static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
Frontier *frontier) {
- InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
+ InternalScopedBuffer<uptr> registers(suspended_threads.RegisterCount());
uptr registers_begin = reinterpret_cast<uptr>(registers.data());
uptr registers_end = registers_begin + registers.size();
- for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
- uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
+ tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
DTLS *dtls;
@@ -201,11 +232,13 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
continue;
}
uptr sp;
- bool have_registers =
- (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
- if (!have_registers) {
- Report("Unable to get registers from thread %d.\n");
- // If unable to get SP, consider the entire stack to be reachable.
+ PtraceRegistersStatus have_registers =
+ suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+ if (have_registers != REGISTERS_AVAILABLE) {
+ Report("Unable to get registers from thread %d.\n", os_id);
+ // If unable to get SP, consider the entire stack to be reachable unless
+ // GetRegistersAndSP failed with ESRCH.
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
sp = stack_begin;
}
@@ -239,21 +272,23 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
}
if (flags()->use_tls) {
- LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
- if (cache_begin == cache_end) {
- ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
- } else {
- // Because LSan should not be loaded with dlopen(), we can assume
- // that allocator cache will be part of static TLS image.
- CHECK_LE(tls_begin, cache_begin);
- CHECK_GE(tls_end, cache_end);
- if (tls_begin < cache_begin)
- ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
- kReachable);
- if (tls_end > cache_end)
- ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
+ if (tls_begin) {
+ LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
+ // If the tls and cache ranges don't overlap, scan full tls range,
+ // otherwise, only scan the non-overlapping portions
+ if (cache_begin == cache_end || tls_end < cache_begin ||
+ tls_begin > cache_end) {
+ ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
+ } else {
+ if (tls_begin < cache_begin)
+ ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
+ kReachable);
+ if (tls_end > cache_end)
+ ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
+ kReachable);
+ }
}
- if (dtls) {
+ if (dtls && !DTLSInDestruction(dtls)) {
for (uptr j = 0; j < dtls->dtv_size; ++j) {
uptr dtls_beg = dtls->dtv[j].beg;
uptr dtls_end = dtls_beg + dtls->dtv[j].size;
@@ -263,28 +298,36 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
kReachable);
}
}
+ } else {
+ // We are handling a thread with DTLS under destruction. Log about
+ // this and continue.
+ LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
}
}
}
}
-static void ProcessRootRegion(Frontier *frontier, uptr root_begin,
- uptr root_end) {
- MemoryMappingLayout proc_maps(/*cache_enabled*/true);
- uptr begin, end, prot;
- while (proc_maps.Next(&begin, &end,
- /*offset*/ nullptr, /*filename*/ nullptr,
- /*filename_size*/ 0, &prot)) {
- uptr intersection_begin = Max(root_begin, begin);
- uptr intersection_end = Min(end, root_end);
- if (intersection_begin >= intersection_end) continue;
- bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
- LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
- root_begin, root_end, begin, end,
- is_readable ? "readable" : "unreadable");
- if (is_readable)
- ScanRangeForPointers(intersection_begin, intersection_end, frontier,
- "ROOT", kReachable);
+void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
+ uptr region_begin, uptr region_end, bool is_readable) {
+ uptr intersection_begin = Max(root_region.begin, region_begin);
+ uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
+ if (intersection_begin >= intersection_end) return;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ root_region.begin, root_region.begin + root_region.size,
+ region_begin, region_end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
+ kReachable);
+}
+
+static void ProcessRootRegion(Frontier *frontier,
+ const RootRegion &root_region) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ ScanRootRegion(frontier, root_region, segment.start, segment.end,
+ segment.IsReadable());
}
}
@@ -293,9 +336,7 @@ static void ProcessRootRegions(Frontier *frontier) {
if (!flags()->use_root_regions) return;
CHECK(root_regions);
for (uptr i = 0; i < root_regions->size(); i++) {
- RootRegion region = (*root_regions)[i];
- uptr begin_addr = reinterpret_cast<uptr>(region.begin);
- ProcessRootRegion(frontier, begin_addr, begin_addr + region.size);
+ ProcessRootRegion(frontier, (*root_regions)[i]);
}
}
@@ -333,6 +374,72 @@ static void CollectIgnoredCb(uptr chunk, void *arg) {
}
}
+static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
+ CHECK(stack_id);
+ StackTrace stack = map->Get(stack_id);
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
+
+struct InvalidPCParam {
+ Frontier *frontier;
+ StackDepotReverseMap *stack_depot_reverse_map;
+ bool skip_linker_allocations;
+};
+
+// ForEachChunk callback. If the caller pc is invalid or is within the linker,
+// mark as reachable. Called by ProcessPlatformSpecificAllocations.
+static void MarkInvalidPCCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
+ u32 stack_id = m.stack_trace_id();
+ uptr caller_pc = 0;
+ if (stack_id > 0)
+ caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ if (caller_pc == 0 || (param->skip_linker_allocations &&
+ GetLinker()->containsAddress(caller_pc))) {
+ m.set_tag(kReachable);
+ param->frontier->push_back(chunk);
+ }
+ }
+}
+
+// On Linux, handles dynamically allocated TLS blocks by treating all chunks
+// allocated from ld-linux.so as reachable.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+void ProcessPC(Frontier *frontier) {
+ StackDepotReverseMap stack_depot_reverse_map;
+ InvalidPCParam arg;
+ arg.frontier = frontier;
+ arg.stack_depot_reverse_map = &stack_depot_reverse_map;
+ arg.skip_linker_allocations =
+ flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
+ ForEachChunk(MarkInvalidPCCb, &arg);
+}
+
// Sets the appropriate tag on each chunk.
static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
// Holds the flood fill frontier.
@@ -344,11 +451,13 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
ProcessRootRegions(&frontier);
FloodFillTag(&frontier, kReachable);
+ CHECK_EQ(0, frontier.size());
+ ProcessPC(&frontier);
+
// The check here is relatively expensive, so we do this in a separate flood
// fill. That way we can skip the check for chunks that are reachable
// otherwise.
LOG_POINTERS("Processing platform-specific allocations.\n");
- CHECK_EQ(0, frontier.size());
ProcessPlatformSpecificAllocations(&frontier);
FloodFillTag(&frontier, kReachable);
@@ -458,7 +567,7 @@ static bool CheckForLeaks() {
"\n");
Printf("%s", d.Error());
Report("ERROR: LeakSanitizer: detected memory leaks\n");
- Printf("%s", d.End());
+ Printf("%s", d.Default());
param.leak_report.ReportTopLeaks(flags()->max_leaks);
}
if (common_flags()->print_suppressions)
@@ -470,18 +579,16 @@ static bool CheckForLeaks() {
return false;
}
+static bool has_reported_leaks = false;
+bool HasReportedLeaks() { return has_reported_leaks; }
+
void DoLeakCheck() {
BlockingMutexLock l(&global_mutex);
static bool already_done;
if (already_done) return;
already_done = true;
- bool have_leaks = CheckForLeaks();
- if (!have_leaks) {
- return;
- }
- if (common_flags()->exitcode) {
- Die();
- }
+ has_reported_leaks = CheckForLeaks();
+ if (has_reported_leaks) HandleLeaks();
}
static int DoRecoverableLeakCheck() {
@@ -490,6 +597,8 @@ static int DoRecoverableLeakCheck() {
return have_leaks ? 1 : 0;
}
+void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
+
static Suppression *GetSuppressionForAddr(uptr addr) {
Suppression *s = nullptr;
@@ -594,7 +703,7 @@ void LeakReport::PrintReportForLeak(uptr index) {
Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
leaks_[index].total_size, leaks_[index].hit_count);
- Printf("%s", d.End());
+ Printf("%s", d.Default());
PrintStackTraceById(leaks_[index].stack_trace_id);
@@ -652,6 +761,7 @@ uptr LeakReport::UnsuppressedLeakCount() {
namespace __lsan {
void InitCommonLsan() { }
void DoLeakCheck() { }
+void DoRecoverableLeakCheckVoid() { }
void DisableInThisThread() { }
void EnableInThisThread() { }
}
@@ -684,7 +794,7 @@ void __lsan_register_root_region(const void *begin, uptr size) {
#if CAN_SANITIZE_LEAKS
BlockingMutexLock l(&global_mutex);
CHECK(root_regions);
- RootRegion region = {begin, size};
+ RootRegion region = {reinterpret_cast<uptr>(begin), size};
root_regions->push_back(region);
VReport(1, "Registered root region at %p of size %llu\n", begin, size);
#endif // CAN_SANITIZE_LEAKS
@@ -698,7 +808,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
bool removed = false;
for (uptr i = 0; i < root_regions->size(); i++) {
RootRegion region = (*root_regions)[i];
- if (region.begin == begin && region.size == size) {
+ if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
removed = true;
uptr last_index = root_regions->size() - 1;
(*root_regions)[i] = (*root_regions)[last_index];
@@ -750,6 +860,11 @@ int __lsan_do_recoverable_leak_check() {
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char * __lsan_default_options() {
+ return "";
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
int __lsan_is_turned_off() {
return 0;
}