diff options
author | Max Ostapenko <m.ostapenko@partner.samsung.com> | 2015-10-21 10:32:45 +0300 |
---|---|---|
committer | Maxim Ostapenko <chefmax@gcc.gnu.org> | 2015-10-21 10:32:45 +0300 |
commit | 696d846a56cc12549f080c6c87e6a0272bdb29f1 (patch) | |
tree | 2bdaf703dd35e1806b59bd7d74c7eee290a1054f | |
parent | 013a8899f5d9469a835cf1f6ccb1b29f69344959 (diff) | |
download | gcc-696d846a56cc12549f080c6c87e6a0272bdb29f1.tar.gz |
libsanitizer merge from upstream r250806.
libsanitizer/
2015-10-20 Maxim Ostapenko <m.ostapenko@partner.samsung.com>
* All source files: Merge from upstream r250806.
* configure.ac (link_sanitizer_common): Add -lrt flag.
* configure.tgt: Enable TSAN and LSAN for aarch64-linux targets.
Set CXX_ABI_NEEDED=true for darwin.
* asan/Makefile.am (asan_files): Add new files.
(DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy
DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0.
* asan/Makefile.in: Regenerate.
* ubsan/Makefile.am (ubsan_files): Add new files.
(DEFS): Add DCAN_SANITIZE_UB=1.
(libubsan_la_LIBADD): Add -lc++abi if CXX_ABI_NEEDED is true.
* ubsan/Makefile.in: Regenerate.
* tsan/Makefile.am (tsan_files): Add new files.
(DEFS): Add DCAN_SANITIZE_UB=0.
* tsan/Makefile.in: Regenerate.
* sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
* sanitizer_common/Makefile.in: Regenerate.
* asan/libtool-version: Bump the libasan SONAME.
From-SVN: r229111
218 files changed, 12307 insertions, 6443 deletions
diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog index ba4c4239509..3316629c809 100644 --- a/libsanitizer/ChangeLog +++ b/libsanitizer/ChangeLog @@ -1,3 +1,24 @@ +2015-10-21 Maxim Ostapenko <m.ostapenko@partner.samsung.com> + + * All source files: Merge from upstream r250806. + * configure.ac (link_sanitizer_common): Add -lrt flag. + * configure.tgt: Enable TSAN and LSAN for aarch64-linux targets. + Set USE_CXX_ABI_FLAG=true for darwin. + * asan/Makefile.am (asan_files): Add new files. + (DEFS): Add DCAN_SANITIZE_UB=0 and remove unused and legacy + DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0. + * asan/Makefile.in: Regenerate. + * ubsan/Makefile.am (ubsan_files): Add new files. + (DEFS): Add DCAN_SANITIZE_UB=1. + (libubsan_la_LIBADD): Add -lc++abi if USE_CXX_ABI_FLAG is true. + * ubsan/Makefile.in: Regenerate. + * tsan/Makefile.am (tsan_files): Add new files. + (DEFS): Add DCAN_SANITIZE_UB=0. + * tsan/Makefile.in: Regenerate. + * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. + * sanitizer_common/Makefile.in: Regenerate. + * asan/libtool-version: Bump the libasan SONAME. + 2015-09-09 Markus Trippelsdorf <markus@trippelsdorf.de> PR sanitizer/67258 diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE index ef893cb306e..6ff9a9acc6b 100644 --- a/libsanitizer/MERGE +++ b/libsanitizer/MERGE @@ -1,4 +1,4 @@ -221802 +250806 The first line of this file holds the svn revision number of the last merge done from the master library sources. diff --git a/libsanitizer/asan/Makefile.am b/libsanitizer/asan/Makefile.am index 54c74ce3e42..bd3cd735a98 100644 --- a/libsanitizer/asan/Makefile.am +++ b/libsanitizer/asan/Makefile.am @@ -3,7 +3,7 @@ AM_CPPFLAGS = -I $(top_srcdir)/include -I $(top_srcdir) # May be used by toolexeclibdir. gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) -DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 -DASAN_NEEDS_SEGV=1 +DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 -DCAN_SANITIZE_UB=0 if USING_MAC_INTERPOSE DEFS += -DMAC_INTERPOSE_FUNCTIONS -DMISSING_BLOCKS_SUPPORT endif @@ -17,9 +17,10 @@ nodist_toolexeclib_HEADERS = libasan_preinit.o asan_files = \ asan_activation.cc \ - asan_allocator2.cc \ + asan_allocator.cc \ asan_debugging.cc \ asan_fake_stack.cc \ + asan_flags.cc \ asan_globals.cc \ asan_interceptors.cc \ asan_linux.cc \ @@ -34,6 +35,7 @@ asan_files = \ asan_rtl.cc \ asan_stack.cc \ asan_stats.cc \ + asan_suppressions.cc \ asan_thread.cc \ asan_win.cc \ asan_win_dll_thunk.cc \ diff --git a/libsanitizer/asan/Makefile.in b/libsanitizer/asan/Makefile.in index edbed4d3d25..229c7b400d1 100644 --- a/libsanitizer/asan/Makefile.in +++ b/libsanitizer/asan/Makefile.in @@ -111,14 +111,14 @@ libasan_la_DEPENDENCIES = \ $(top_builddir)/sanitizer_common/libsanitizer_common.la \ $(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \ $(am__append_3) $(am__DEPENDENCIES_1) -am__objects_1 = asan_activation.lo asan_allocator2.lo \ - asan_debugging.lo asan_fake_stack.lo asan_globals.lo \ +am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \ + asan_fake_stack.lo asan_flags.lo asan_globals.lo \ asan_interceptors.lo asan_linux.lo asan_mac.lo \ asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \ asan_new_delete.lo asan_poisoning.lo asan_posix.lo \ asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \ - asan_thread.lo asan_win.lo asan_win_dll_thunk.lo \ - asan_win_dynamic_runtime_thunk.lo + asan_suppressions.lo asan_thread.lo asan_win.lo \ + asan_win_dll_thunk.lo asan_win_dynamic_runtime_thunk.lo am_libasan_la_OBJECTS = $(am__objects_1) libasan_la_OBJECTS = $(am_libasan_la_OBJECTS) libasan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ @@ -172,8 +172,8 @@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS \ -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS \ - -DASAN_HAS_EXCEPTIONS=1 -DASAN_FLEXIBLE_MAPPING_AND_OFFSET=0 \ - -DASAN_NEEDS_SEGV=1 $(am__append_1) + -DASAN_HAS_EXCEPTIONS=1 -DASAN_NEEDS_SEGV=1 \ + -DCAN_SANITIZE_UB=0 $(am__append_1) DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ @@ -306,9 +306,10 @@ toolexeclib_LTLIBRARIES = libasan.la nodist_toolexeclib_HEADERS = libasan_preinit.o asan_files = \ asan_activation.cc \ - asan_allocator2.cc \ + asan_allocator.cc \ asan_debugging.cc \ asan_fake_stack.cc \ + asan_flags.cc \ asan_globals.cc \ asan_interceptors.cc \ asan_linux.cc \ @@ -323,6 +324,7 @@ asan_files = \ asan_rtl.cc \ asan_stack.cc \ asan_stats.cc \ + asan_suppressions.cc \ asan_thread.cc \ asan_win.cc \ asan_win_dll_thunk.cc \ @@ -450,9 +452,10 @@ distclean-compile: -rm -f *.tab.c @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator2.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_interceptors.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_linux.Plo@am__quote@ @@ -467,6 +470,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_rtl.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stack.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_stats.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_suppressions.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_thread.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_win_dll_thunk.Plo@am__quote@ diff --git a/libsanitizer/asan/asan_activation.cc b/libsanitizer/asan/asan_activation.cc index 235451c8aec..58867956086 100644 --- a/libsanitizer/asan/asan_activation.cc +++ b/libsanitizer/asan/asan_activation.cc @@ -14,32 +14,106 @@ #include "asan_allocator.h" #include "asan_flags.h" #include "asan_internal.h" +#include "asan_poisoning.h" +#include "asan_stack.h" #include "sanitizer_common/sanitizer_flags.h" namespace __asan { static struct AsanDeactivatedFlags { - int quarantine_size; - int max_redzone; + AllocatorOptions allocator_options; int malloc_context_size; bool poison_heap; + bool coverage; + const char *coverage_dir; + + void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) { +#define ASAN_ACTIVATION_FLAG(Type, Name) \ + RegisterFlag(parser, #Name, "", &f->Name); +#define COMMON_ACTIVATION_FLAG(Type, Name) \ + RegisterFlag(parser, #Name, "", &cf->Name); +#include "asan_activation_flags.inc" +#undef ASAN_ACTIVATION_FLAG +#undef COMMON_ACTIVATION_FLAG + + RegisterIncludeFlags(parser, cf); + } + + void OverrideFromActivationFlags() { + Flags f; + CommonFlags cf; + FlagParser parser; + RegisterActivationFlags(&parser, &f, &cf); + + // Copy the current activation flags. + allocator_options.CopyTo(&f, &cf); + cf.malloc_context_size = malloc_context_size; + f.poison_heap = poison_heap; + cf.coverage = coverage; + cf.coverage_dir = coverage_dir; + cf.verbosity = Verbosity(); + cf.help = false; // this is activation-specific help + + // Check if activation flags need to be overriden. + if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) { + parser.ParseString(env); + } + + // Override from getprop asan.options. + char buf[100]; + GetExtraActivationFlags(buf, sizeof(buf)); + parser.ParseString(buf); + + SetVerbosity(cf.verbosity); + + if (Verbosity()) ReportUnrecognizedFlags(); + + if (cf.help) parser.PrintFlagDescriptions(); + + allocator_options.SetFrom(&f, &cf); + malloc_context_size = cf.malloc_context_size; + poison_heap = f.poison_heap; + coverage = cf.coverage; + coverage_dir = cf.coverage_dir; + } + + void Print() { + Report( + "quarantine_size_mb %d, max_redzone %d, poison_heap %d, " + "malloc_context_size %d, alloc_dealloc_mismatch %d, " + "allocator_may_return_null %d, coverage %d, coverage_dir %s\n", + allocator_options.quarantine_size_mb, allocator_options.max_redzone, + poison_heap, malloc_context_size, + allocator_options.alloc_dealloc_mismatch, + allocator_options.may_return_null, coverage, coverage_dir); + } } asan_deactivated_flags; static bool asan_is_deactivated; -void AsanStartDeactivated() { +void AsanDeactivate() { + CHECK(!asan_is_deactivated); VReport(1, "Deactivating ASan\n"); - // Save flag values. - asan_deactivated_flags.quarantine_size = flags()->quarantine_size; - asan_deactivated_flags.max_redzone = flags()->max_redzone; - asan_deactivated_flags.poison_heap = flags()->poison_heap; - asan_deactivated_flags.malloc_context_size = - common_flags()->malloc_context_size; - - flags()->quarantine_size = 0; - flags()->max_redzone = 16; - flags()->poison_heap = false; - common_flags()->malloc_context_size = 0; + + // Stash runtime state. + GetAllocatorOptions(&asan_deactivated_flags.allocator_options); + asan_deactivated_flags.malloc_context_size = GetMallocContextSize(); + asan_deactivated_flags.poison_heap = CanPoisonMemory(); + asan_deactivated_flags.coverage = common_flags()->coverage; + asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir; + + // Deactivate the runtime. + SetCanPoisonMemory(false); + SetMallocContextSize(1); + ReInitializeCoverage(false, nullptr); + + AllocatorOptions disabled = asan_deactivated_flags.allocator_options; + disabled.quarantine_size_mb = 0; + disabled.min_redzone = 16; // Redzone must be at least 16 bytes long. + disabled.max_redzone = 16; + disabled.alloc_dealloc_mismatch = false; + disabled.may_return_null = true; + ReInitializeAllocator(disabled); asan_is_deactivated = true; } @@ -48,25 +122,21 @@ void AsanActivate() { if (!asan_is_deactivated) return; VReport(1, "Activating ASan\n"); - // Restore flag values. - // FIXME: this is not atomic, and there may be other threads alive. - flags()->quarantine_size = asan_deactivated_flags.quarantine_size; - flags()->max_redzone = asan_deactivated_flags.max_redzone; - flags()->poison_heap = asan_deactivated_flags.poison_heap; - common_flags()->malloc_context_size = - asan_deactivated_flags.malloc_context_size; + UpdateProcessName(); - ParseExtraActivationFlags(); + asan_deactivated_flags.OverrideFromActivationFlags(); - ReInitializeAllocator(); + SetCanPoisonMemory(asan_deactivated_flags.poison_heap); + SetMallocContextSize(asan_deactivated_flags.malloc_context_size); + ReInitializeCoverage(asan_deactivated_flags.coverage, + asan_deactivated_flags.coverage_dir); + ReInitializeAllocator(asan_deactivated_flags.allocator_options); asan_is_deactivated = false; - VReport( - 1, - "quarantine_size %d, max_redzone %d, poison_heap %d, malloc_context_size " - "%d\n", - flags()->quarantine_size, flags()->max_redzone, flags()->poison_heap, - common_flags()->malloc_context_size); + if (Verbosity()) { + Report("Activated with flags:\n"); + asan_deactivated_flags.Print(); + } } } // namespace __asan diff --git a/libsanitizer/asan/asan_activation.h b/libsanitizer/asan/asan_activation.h index 01f2d46d222..162a5ebcea9 100644 --- a/libsanitizer/asan/asan_activation.h +++ b/libsanitizer/asan/asan_activation.h @@ -14,7 +14,7 @@ #define ASAN_ACTIVATION_H namespace __asan { -void AsanStartDeactivated(); +void AsanDeactivate(); void AsanActivate(); } // namespace __asan diff --git a/libsanitizer/asan/asan_activation_flags.inc b/libsanitizer/asan/asan_activation_flags.inc new file mode 100644 index 00000000000..4bab38213c1 --- /dev/null +++ b/libsanitizer/asan/asan_activation_flags.inc @@ -0,0 +1,33 @@ +//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// A subset of ASan (and common) runtime flags supported at activation time. +// +//===----------------------------------------------------------------------===// +#ifndef ASAN_ACTIVATION_FLAG +# error "Define ASAN_ACTIVATION_FLAG prior to including this file!" +#endif + +#ifndef COMMON_ACTIVATION_FLAG +# error "Define COMMON_ACTIVATION_FLAG prior to including this file!" +#endif + +// ASAN_ACTIVATION_FLAG(Type, Name) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +ASAN_ACTIVATION_FLAG(int, redzone) +ASAN_ACTIVATION_FLAG(int, max_redzone) +ASAN_ACTIVATION_FLAG(int, quarantine_size_mb) +ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch) +ASAN_ACTIVATION_FLAG(bool, poison_heap) + +COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null) +COMMON_ACTIVATION_FLAG(int, malloc_context_size) +COMMON_ACTIVATION_FLAG(bool, coverage) +COMMON_ACTIVATION_FLAG(const char *, coverage_dir) +COMMON_ACTIVATION_FLAG(int, verbosity) +COMMON_ACTIVATION_FLAG(bool, help) diff --git a/libsanitizer/asan/asan_allocator.cc b/libsanitizer/asan/asan_allocator.cc new file mode 100644 index 00000000000..187c405e118 --- /dev/null +++ b/libsanitizer/asan/asan_allocator.cc @@ -0,0 +1,906 @@ +//===-- asan_allocator.cc -------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Implementation of ASan's memory allocator, 2-nd version. +// This variant uses the allocator from sanitizer_common, i.e. the one shared +// with ThreadSanitizer and MemorySanitizer. +// +//===----------------------------------------------------------------------===// + +#include "asan_allocator.h" +#include "asan_mapping.h" +#include "asan_poisoning.h" +#include "asan_report.h" +#include "asan_stack.h" +#include "asan_thread.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_list.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_quarantine.h" +#include "lsan/lsan_common.h" + +namespace __asan { + +// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. +// We use adaptive redzones: for larger allocation larger redzones are used. +static u32 RZLog2Size(u32 rz_log) { + CHECK_LT(rz_log, 8); + return 16 << rz_log; +} + +static u32 RZSize2Log(u32 rz_size) { + CHECK_GE(rz_size, 16); + CHECK_LE(rz_size, 2048); + CHECK(IsPowerOfTwo(rz_size)); + u32 res = Log2(rz_size) - 4; + CHECK_EQ(rz_size, RZLog2Size(res)); + return res; +} + +static AsanAllocator &get_allocator(); + +// The memory chunk allocated from the underlying allocator looks like this: +// L L L L L L H H U U U U U U R R +// L -- left redzone words (0 or more bytes) +// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. +// U -- user memory. +// R -- right redzone (0 or more bytes) +// ChunkBase consists of ChunkHeader and other bytes that overlap with user +// memory. + +// If the left redzone is greater than the ChunkHeader size we store a magic +// value in the first uptr word of the memory block and store the address of +// ChunkBase in the next uptr. +// M B L L L L L L L L L H H U U U U U U +// | ^ +// ---------------------| +// M -- magic value kAllocBegMagic +// B -- address of ChunkHeader pointing to the first 'H' +static const uptr kAllocBegMagic = 0xCC6E96B9; + +struct ChunkHeader { + // 1-st 8 bytes. + u32 chunk_state : 8; // Must be first. + u32 alloc_tid : 24; + + u32 free_tid : 24; + u32 from_memalign : 1; + u32 alloc_type : 2; + u32 rz_log : 3; + u32 lsan_tag : 2; + // 2-nd 8 bytes + // This field is used for small sizes. For large sizes it is equal to + // SizeClassMap::kMaxSize and the actual size is stored in the + // SecondaryAllocator's metadata. + u32 user_requested_size; + u32 alloc_context_id; +}; + +struct ChunkBase : ChunkHeader { + // Header2, intersects with user memory. + u32 free_context_id; +}; + +static const uptr kChunkHeaderSize = sizeof(ChunkHeader); +static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; +COMPILER_CHECK(kChunkHeaderSize == 16); +COMPILER_CHECK(kChunkHeader2Size <= 16); + +// Every chunk of memory allocated by this allocator can be in one of 3 states: +// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. +// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. +// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. +enum { + CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. + CHUNK_ALLOCATED = 2, + CHUNK_QUARANTINE = 3 +}; + +struct AsanChunk: ChunkBase { + uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } + uptr UsedSize(bool locked_version = false) { + if (user_requested_size != SizeClassMap::kMaxSize) + return user_requested_size; + return *reinterpret_cast<uptr *>( + get_allocator().GetMetaData(AllocBeg(locked_version))); + } + void *AllocBeg(bool locked_version = false) { + if (from_memalign) { + if (locked_version) + return get_allocator().GetBlockBeginFastLocked( + reinterpret_cast<void *>(this)); + return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this)); + } + return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); + } + bool AddrIsInside(uptr addr, bool locked_version = false) { + return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); + } +}; + +struct QuarantineCallback { + explicit QuarantineCallback(AllocatorCache *cache) + : cache_(cache) { + } + + void Recycle(AsanChunk *m) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); + CHECK_NE(m->alloc_tid, kInvalidTid); + CHECK_NE(m->free_tid, kInvalidTid); + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapLeftRedzoneMagic); + void *p = reinterpret_cast<void *>(m->AllocBeg()); + if (p != m) { + uptr *alloc_magic = reinterpret_cast<uptr *>(p); + CHECK_EQ(alloc_magic[0], kAllocBegMagic); + // Clear the magic value, as allocator internals may overwrite the + // contents of deallocated chunk, confusing GetAsanChunk lookup. + alloc_magic[0] = 0; + CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m)); + } + + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.real_frees++; + thread_stats.really_freed += m->UsedSize(); + + get_allocator().Deallocate(cache_, p); + } + + void *Allocate(uptr size) { + return get_allocator().Allocate(cache_, size, 1, false); + } + + void Deallocate(void *p) { + get_allocator().Deallocate(cache_, p); + } + + AllocatorCache *cache_; +}; + +typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; +typedef AsanQuarantine::Cache QuarantineCache; + +void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { + PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mmaps++; + thread_stats.mmaped += size; +} +void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { + PoisonShadow(p, size, 0); + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + FlushUnneededASanShadowMemory(p, size); + // Statistics. + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.munmaps++; + thread_stats.munmaped += size; +} + +// We can not use THREADLOCAL because it is not supported on some of the +// platforms we care about (OSX 10.6, Android). +// static THREADLOCAL AllocatorCache cache; +AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + return &ms->allocator_cache; +} + +QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); + return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); +} + +void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { + quarantine_size_mb = f->quarantine_size_mb; + min_redzone = f->redzone; + max_redzone = f->max_redzone; + may_return_null = cf->allocator_may_return_null; + alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; +} + +void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { + f->quarantine_size_mb = quarantine_size_mb; + f->redzone = min_redzone; + f->max_redzone = max_redzone; + cf->allocator_may_return_null = may_return_null; + f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; +} + +struct Allocator { + static const uptr kMaxAllowedMallocSize = + FIRST_32_SECOND_64(3UL << 30, 1UL << 40); + static const uptr kMaxThreadLocalQuarantine = + FIRST_32_SECOND_64(1 << 18, 1 << 20); + + AsanAllocator allocator; + AsanQuarantine quarantine; + StaticSpinMutex fallback_mutex; + AllocatorCache fallback_allocator_cache; + QuarantineCache fallback_quarantine_cache; + + // ------------------- Options -------------------------- + atomic_uint16_t min_redzone; + atomic_uint16_t max_redzone; + atomic_uint8_t alloc_dealloc_mismatch; + + // ------------------- Initialization ------------------------ + explicit Allocator(LinkerInitialized) + : quarantine(LINKER_INITIALIZED), + fallback_quarantine_cache(LINKER_INITIALIZED) {} + + void CheckOptions(const AllocatorOptions &options) const { + CHECK_GE(options.min_redzone, 16); + CHECK_GE(options.max_redzone, options.min_redzone); + CHECK_LE(options.max_redzone, 2048); + CHECK(IsPowerOfTwo(options.min_redzone)); + CHECK(IsPowerOfTwo(options.max_redzone)); + } + + void SharedInitCode(const AllocatorOptions &options) { + CheckOptions(options); + quarantine.Init((uptr)options.quarantine_size_mb << 20, + kMaxThreadLocalQuarantine); + atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch, + memory_order_release); + atomic_store(&min_redzone, options.min_redzone, memory_order_release); + atomic_store(&max_redzone, options.max_redzone, memory_order_release); + } + + void Initialize(const AllocatorOptions &options) { + allocator.Init(options.may_return_null); + SharedInitCode(options); + } + + void ReInitialize(const AllocatorOptions &options) { + allocator.SetMayReturnNull(options.may_return_null); + SharedInitCode(options); + } + + void GetOptions(AllocatorOptions *options) const { + options->quarantine_size_mb = quarantine.GetSize() >> 20; + options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); + options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); + options->may_return_null = allocator.MayReturnNull(); + options->alloc_dealloc_mismatch = + atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); + } + + // -------------------- Helper methods. ------------------------- + uptr ComputeRZLog(uptr user_requested_size) { + u32 rz_log = + user_requested_size <= 64 - 16 ? 0 : + user_requested_size <= 128 - 32 ? 1 : + user_requested_size <= 512 - 64 ? 2 : + user_requested_size <= 4096 - 128 ? 3 : + user_requested_size <= (1 << 14) - 256 ? 4 : + user_requested_size <= (1 << 15) - 512 ? 5 : + user_requested_size <= (1 << 16) - 1024 ? 6 : 7; + u32 min_rz = atomic_load(&min_redzone, memory_order_acquire); + u32 max_rz = atomic_load(&max_redzone, memory_order_acquire); + return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz)); + } + + // We have an address between two chunks, and we want to report just one. + AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk, + AsanChunk *right_chunk) { + // Prefer an allocated chunk over freed chunk and freed chunk + // over available chunk. + if (left_chunk->chunk_state != right_chunk->chunk_state) { + if (left_chunk->chunk_state == CHUNK_ALLOCATED) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_ALLOCATED) + return right_chunk; + if (left_chunk->chunk_state == CHUNK_QUARANTINE) + return left_chunk; + if (right_chunk->chunk_state == CHUNK_QUARANTINE) + return right_chunk; + } + // Same chunk_state: choose based on offset. + sptr l_offset = 0, r_offset = 0; + CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); + CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); + if (l_offset < r_offset) + return left_chunk; + return right_chunk; + } + + // -------------------- Allocation/Deallocation routines --------------- + void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, + AllocType alloc_type, bool can_fill) { + if (UNLIKELY(!asan_inited)) + AsanInitFromRtl(); + Flags &fl = *flags(); + CHECK(stack); + const uptr min_alignment = SHADOW_GRANULARITY; + if (alignment < min_alignment) + alignment = min_alignment; + if (size == 0) { + // We'd be happy to avoid allocating memory for zero-size requests, but + // some programs/tests depend on this behavior and assume that malloc + // would not return NULL even for zero-size allocations. Moreover, it + // looks like operator new should never return NULL, and results of + // consecutive "new" calls must be different even if the allocated size + // is zero. + size = 1; + } + CHECK(IsPowerOfTwo(alignment)); + uptr rz_log = ComputeRZLog(size); + uptr rz_size = RZLog2Size(rz_log); + uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); + uptr needed_size = rounded_size + rz_size; + if (alignment > min_alignment) + needed_size += alignment; + bool using_primary_allocator = true; + // If we are allocating from the secondary allocator, there will be no + // automatic right redzone, so add the right redzone manually. + if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { + needed_size += rz_size; + using_primary_allocator = false; + } + CHECK(IsAligned(needed_size, min_alignment)); + if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { + Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n", + (void*)size); + return allocator.ReturnNullOrDie(); + } + + AsanThread *t = GetCurrentThread(); + void *allocated; + bool check_rss_limit = true; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = + allocator.Allocate(cache, needed_size, 8, false, check_rss_limit); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = + allocator.Allocate(cache, needed_size, 8, false, check_rss_limit); + } + + if (!allocated) + return allocator.ReturnNullOrDie(); + + if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) { + // Heap poisoning is enabled, but the allocator provides an unpoisoned + // chunk. This is possible if CanPoisonMemory() was false for some + // time, for example, due to flags()->start_disabled. + // Anyway, poison the block before using it for anything else. + uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); + PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); + } + + uptr alloc_beg = reinterpret_cast<uptr>(allocated); + uptr alloc_end = alloc_beg + needed_size; + uptr beg_plus_redzone = alloc_beg + rz_size; + uptr user_beg = beg_plus_redzone; + if (!IsAligned(user_beg, alignment)) + user_beg = RoundUpTo(user_beg, alignment); + uptr user_end = user_beg + size; + CHECK_LE(user_end, alloc_end); + uptr chunk_beg = user_beg - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + m->alloc_type = alloc_type; + m->rz_log = rz_log; + u32 alloc_tid = t ? t->tid() : 0; + m->alloc_tid = alloc_tid; + CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? + m->free_tid = kInvalidTid; + m->from_memalign = user_beg != beg_plus_redzone; + if (alloc_beg != chunk_beg) { + CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); + reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; + reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; + } + if (using_primary_allocator) { + CHECK(size); + m->user_requested_size = size; + CHECK(allocator.FromPrimary(allocated)); + } else { + CHECK(!allocator.FromPrimary(allocated)); + m->user_requested_size = SizeClassMap::kMaxSize; + uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); + meta[0] = size; + meta[1] = chunk_beg; + } + + m->alloc_context_id = StackDepotPut(*stack); + + uptr size_rounded_down_to_granularity = + RoundDownTo(size, SHADOW_GRANULARITY); + // Unpoison the bulk of the memory region. + if (size_rounded_down_to_granularity) + PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); + // Deal with the end of the region if size is not aligned to granularity. + if (size != size_rounded_down_to_granularity && CanPoisonMemory()) { + u8 *shadow = + (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity); + *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; + } + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.mallocs++; + thread_stats.malloced += size; + thread_stats.malloced_redzones += needed_size - size; + if (needed_size > SizeClassMap::kMaxSize) + thread_stats.malloc_large++; + else + thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; + + void *res = reinterpret_cast<void *>(user_beg); + if (can_fill && fl.max_malloc_fill_size) { + uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); + REAL(memset)(res, fl.malloc_fill_byte, fill_size); + } +#if CAN_SANITIZE_LEAKS + m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored + : __lsan::kDirectlyLeaked; +#endif + // Must be the last mutation of metadata in this function. + atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); + ASAN_MALLOC_HOOK(res, size); + return res; + } + + void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr, + BufferedStackTrace *stack) { + u8 old_chunk_state = CHUNK_ALLOCATED; + // Flip the chunk_state atomically to avoid race on double-free. + if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, + CHUNK_QUARANTINE, memory_order_acquire)) + ReportInvalidFree(ptr, old_chunk_state, stack); + CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); + } + + // Expects the chunk to already be marked as quarantined by using + // AtomicallySetQuarantineFlag. + void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack, + AllocType alloc_type) { + CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + + if (m->alloc_type != alloc_type) { + if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) { + ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type, + (AllocType)alloc_type); + } + } + + CHECK_GE(m->alloc_tid, 0); + if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. + CHECK_EQ(m->free_tid, kInvalidTid); + AsanThread *t = GetCurrentThread(); + m->free_tid = t ? t->tid() : 0; + m->free_context_id = StackDepotPut(*stack); + // Poison the region. + PoisonShadow(m->Beg(), + RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), + kAsanHeapFreeMagic); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.frees++; + thread_stats.freed += m->UsedSize(); + + // Push into quarantine. + if (t) { + AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), m, + m->UsedSize()); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *ac = &fallback_allocator_cache; + quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), m, + m->UsedSize()); + } + } + + void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack, + AllocType alloc_type) { + uptr p = reinterpret_cast<uptr>(ptr); + if (p == 0) return; + + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + if (delete_size && flags()->new_delete_type_mismatch && + delete_size != m->UsedSize()) { + ReportNewDeleteSizeMismatch(p, delete_size, stack); + } + ASAN_FREE_HOOK(ptr); + // Must mark the chunk as quarantined before any changes to its metadata. + AtomicallySetQuarantineFlag(m, ptr, stack); + QuarantineChunk(m, ptr, stack, alloc_type); + } + + void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { + CHECK(old_ptr && new_size); + uptr p = reinterpret_cast<uptr>(old_ptr); + uptr chunk_beg = p - kChunkHeaderSize; + AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); + + AsanStats &thread_stats = GetCurrentThreadStats(); + thread_stats.reallocs++; + thread_stats.realloced += new_size; + + void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); + if (new_ptr) { + u8 chunk_state = m->chunk_state; + if (chunk_state != CHUNK_ALLOCATED) + ReportInvalidFree(old_ptr, chunk_state, stack); + CHECK_NE(REAL(memcpy), nullptr); + uptr memcpy_size = Min(new_size, m->UsedSize()); + // If realloc() races with free(), we may start copying freed memory. + // However, we will report racy double-free later anyway. + REAL(memcpy)(new_ptr, old_ptr, memcpy_size); + Deallocate(old_ptr, 0, stack, FROM_MALLOC); + } + return new_ptr; + } + + void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + if (CallocShouldReturnNullDueToOverflow(size, nmemb)) + return allocator.ReturnNullOrDie(); + void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); + // If the memory comes from the secondary allocator no need to clear it + // as it comes directly from mmap. + if (ptr && allocator.FromPrimary(ptr)) + REAL(memset)(ptr, 0, nmemb * size); + return ptr; + } + + void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { + if (chunk_state == CHUNK_QUARANTINE) + ReportDoubleFree((uptr)ptr, stack); + else + ReportFreeNotMalloced((uptr)ptr, stack); + } + + void CommitBack(AsanThreadLocalMallocStorage *ms) { + AllocatorCache *ac = GetAllocatorCache(ms); + quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac)); + allocator.SwallowCache(ac); + } + + // -------------------------- Chunk lookup ---------------------- + + // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). + AsanChunk *GetAsanChunk(void *alloc_beg) { + if (!alloc_beg) return nullptr; + if (!allocator.FromPrimary(alloc_beg)) { + uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); + AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); + return m; + } + uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); + if (alloc_magic[0] == kAllocBegMagic) + return reinterpret_cast<AsanChunk *>(alloc_magic[1]); + return reinterpret_cast<AsanChunk *>(alloc_beg); + } + + AsanChunk *GetAsanChunkByAddr(uptr p) { + void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); + return GetAsanChunk(alloc_beg); + } + + // Allocator must be locked when this function is called. + AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { + void *alloc_beg = + allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); + return GetAsanChunk(alloc_beg); + } + + uptr AllocationSize(uptr p) { + AsanChunk *m = GetAsanChunkByAddr(p); + if (!m) return 0; + if (m->chunk_state != CHUNK_ALLOCATED) return 0; + if (m->Beg() != p) return 0; + return m->UsedSize(); + } + + AsanChunkView FindHeapChunkByAddress(uptr addr) { + AsanChunk *m1 = GetAsanChunkByAddr(addr); + if (!m1) return AsanChunkView(m1); + sptr offset = 0; + if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { + // The address is in the chunk's left redzone, so maybe it is actually + // a right buffer overflow from the other chunk to the left. + // Search a bit to the left to see if there is another chunk. + AsanChunk *m2 = nullptr; + for (uptr l = 1; l < GetPageSizeCached(); l++) { + m2 = GetAsanChunkByAddr(addr - l); + if (m2 == m1) continue; // Still the same chunk. + break; + } + if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) + m1 = ChooseChunk(addr, m2, m1); + } + return AsanChunkView(m1); + } + + void PrintStats() { + allocator.PrintStats(); + } + + void ForceLock() { + allocator.ForceLock(); + fallback_mutex.Lock(); + } + + void ForceUnlock() { + fallback_mutex.Unlock(); + allocator.ForceUnlock(); + } +}; + +static Allocator instance(LINKER_INITIALIZED); + +static AsanAllocator &get_allocator() { + return instance.allocator; +} + +bool AsanChunkView::IsValid() { + return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE; +} +uptr AsanChunkView::Beg() { return chunk_->Beg(); } +uptr AsanChunkView::End() { return Beg() + UsedSize(); } +uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } +uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } +uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } + +static StackTrace GetStackTraceFromId(u32 id) { + CHECK(id); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; +} + +StackTrace AsanChunkView::GetAllocStack() { + return GetStackTraceFromId(chunk_->alloc_context_id); +} + +StackTrace AsanChunkView::GetFreeStack() { + return GetStackTraceFromId(chunk_->free_context_id); +} + +void InitializeAllocator(const AllocatorOptions &options) { + instance.Initialize(options); +} + +void ReInitializeAllocator(const AllocatorOptions &options) { + instance.ReInitialize(options); +} + +void GetAllocatorOptions(AllocatorOptions *options) { + instance.GetOptions(options); +} + +AsanChunkView FindHeapChunkByAddress(uptr addr) { + return instance.FindHeapChunkByAddress(addr); +} + +void AsanThreadLocalMallocStorage::CommitBack() { + instance.CommitBack(this); +} + +void PrintInternalAllocatorStats() { + instance.PrintStats(); +} + +void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + return instance.Allocate(size, alignment, stack, alloc_type, true); +} + +void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { + instance.Deallocate(ptr, 0, stack, alloc_type); +} + +void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, + AllocType alloc_type) { + instance.Deallocate(ptr, size, stack, alloc_type); +} + +void *asan_malloc(uptr size, BufferedStackTrace *stack) { + return instance.Allocate(size, 8, stack, FROM_MALLOC, true); +} + +void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { + return instance.Calloc(nmemb, size, stack); +} + +void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { + if (!p) + return instance.Allocate(size, 8, stack, FROM_MALLOC, true); + if (size == 0) { + instance.Deallocate(p, 0, stack, FROM_MALLOC); + return nullptr; + } + return instance.Reallocate(p, size, stack); +} + +void *asan_valloc(uptr size, BufferedStackTrace *stack) { + return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); +} + +void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + size = RoundUpTo(size, PageSize); + if (size == 0) { + // pvalloc(0) should allocate one page. + size = PageSize; + } + return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true); +} + +int asan_posix_memalign(void **memptr, uptr alignment, uptr size, + BufferedStackTrace *stack) { + void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true); + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) { + if (!ptr) return 0; + uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr)); + if (flags()->check_malloc_usable_size && (usable_size == 0)) { + GET_STACK_TRACE_FATAL(pc, bp); + ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); + } + return usable_size; +} + +uptr asan_mz_size(const void *ptr) { + return instance.AllocationSize(reinterpret_cast<uptr>(ptr)); +} + +void asan_mz_force_lock() { + instance.ForceLock(); +} + +void asan_mz_force_unlock() { + instance.ForceUnlock(); +} + +void AsanSoftRssLimitExceededCallback(bool exceeded) { + instance.allocator.SetRssLimitIsExceeded(exceeded); +} + +} // namespace __asan + +// --- Implementation of LSan-specific functions --- {{{1 +namespace __lsan { +void LockAllocator() { + __asan::get_allocator().ForceLock(); +} + +void UnlockAllocator() { + __asan::get_allocator().ForceUnlock(); +} + +void GetAllocatorGlobalRange(uptr *begin, uptr *end) { + *begin = (uptr)&__asan::get_allocator(); + *end = *begin + sizeof(__asan::get_allocator()); +} + +uptr PointsIntoChunk(void* p) { + uptr addr = reinterpret_cast<uptr>(p); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); + if (!m) return 0; + uptr chunk = m->Beg(); + if (m->chunk_state != __asan::CHUNK_ALLOCATED) + return 0; + if (m->AddrIsInside(addr, /*locked_version=*/true)) + return chunk; + if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), + addr)) + return chunk; + return 0; +} + +uptr GetUserBegin(uptr chunk) { + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk); + CHECK(m); + return m->Beg(); +} + +LsanMetadata::LsanMetadata(uptr chunk) { + metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); +} + +bool LsanMetadata::allocated() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->chunk_state == __asan::CHUNK_ALLOCATED; +} + +ChunkTag LsanMetadata::tag() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return static_cast<ChunkTag>(m->lsan_tag); +} + +void LsanMetadata::set_tag(ChunkTag value) { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + m->lsan_tag = value; +} + +uptr LsanMetadata::requested_size() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->UsedSize(/*locked_version=*/true); +} + +u32 LsanMetadata::stack_trace_id() const { + __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); + return m->alloc_context_id; +} + +void ForEachChunk(ForEachChunkCallback callback, void *arg) { + __asan::get_allocator().ForEachChunk(callback, arg); +} + +IgnoreObjectResult IgnoreObjectLocked(const void *p) { + uptr addr = reinterpret_cast<uptr>(p); + __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); + if (!m) return kIgnoreObjectInvalid; + if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { + if (m->lsan_tag == kIgnored) + return kIgnoreObjectAlreadyIgnored; + m->lsan_tag = __lsan::kIgnored; + return kIgnoreObjectSuccess; + } else { + return kIgnoreObjectInvalid; + } +} +} // namespace __lsan + +// ---------------------- Interface ---------------- {{{1 +using namespace __asan; // NOLINT + +// ASan allocator doesn't reserve extra bytes, so normally we would +// just return "size". We don't want to expose our redzone sizes, etc here. +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} + +int __sanitizer_get_ownership(const void *p) { + uptr ptr = reinterpret_cast<uptr>(p); + return instance.AllocationSize(ptr) > 0; +} + +uptr __sanitizer_get_allocated_size(const void *p) { + if (!p) return 0; + uptr ptr = reinterpret_cast<uptr>(p); + uptr allocated_size = instance.AllocationSize(ptr); + // Die if p is not malloced or if it is already freed. + if (allocated_size == 0) { + GET_STACK_TRACE_FATAL_HERE; + ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); + } + return allocated_size; +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +// Provide default (no-op) implementation of malloc hooks. +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_malloc_hook(void *ptr, uptr size) { + (void)ptr; + (void)size; +} +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_free_hook(void *ptr) { + (void)ptr; +} +} // extern "C" +#endif diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h index d2f30af30d6..921131ab4e9 100644 --- a/libsanitizer/asan/asan_allocator.h +++ b/libsanitizer/asan/asan_allocator.h @@ -7,12 +7,13 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// ASan-private header for asan_allocator2.cc. +// ASan-private header for asan_allocator.cc. //===----------------------------------------------------------------------===// #ifndef ASAN_ALLOCATOR_H #define ASAN_ALLOCATOR_H +#include "asan_flags.h" #include "asan_internal.h" #include "asan_interceptors.h" #include "sanitizer_common/sanitizer_allocator.h" @@ -26,11 +27,22 @@ enum AllocType { FROM_NEW_BR = 3 // Memory block came from operator new [ ] }; -static const uptr kNumberOfSizeClasses = 255; struct AsanChunk; -void InitializeAllocator(); -void ReInitializeAllocator(); +struct AllocatorOptions { + u32 quarantine_size_mb; + u16 min_redzone; + u16 max_redzone; + u8 may_return_null; + u8 alloc_dealloc_mismatch; + + void SetFrom(const Flags *f, const CommonFlags *cf); + void CopyTo(Flags *f, CommonFlags *cf); +}; + +void InitializeAllocator(const AllocatorOptions &options); +void ReInitializeAllocator(const AllocatorOptions &options); +void GetAllocatorOptions(AllocatorOptions *options); class AsanChunkView { public: @@ -100,6 +112,11 @@ struct AsanMapUnmapCallback { # if defined(__powerpc64__) const uptr kAllocatorSpace = 0xa0000000000ULL; const uptr kAllocatorSize = 0x20000000000ULL; // 2T. +# elif defined(__aarch64__) +// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA +// so no need to different values for different VMA. +const uptr kAllocatorSpace = 0x10000000000ULL; +const uptr kAllocatorSize = 0x10000000000ULL; // 3T. # else const uptr kAllocatorSpace = 0x600000000000ULL; const uptr kAllocatorSize = 0x40000000000ULL; // 4T. @@ -122,15 +139,16 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 16, AsanMapUnmapCallback> PrimaryAllocator; #endif // SANITIZER_CAN_USE_ALLOCATOR64 +static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses; typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef LargeMmapAllocator<AsanMapUnmapCallback> SecondaryAllocator; typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, - SecondaryAllocator> Allocator; + SecondaryAllocator> AsanAllocator; struct AsanThreadLocalMallocStorage { uptr quarantine_cache[16]; - AllocatorCache allocator2_cache; + AllocatorCache allocator_cache; void CommitBack(); private: // These objects are allocated via mmap() and are zero-initialized. @@ -158,6 +176,7 @@ void asan_mz_force_lock(); void asan_mz_force_unlock(); void PrintInternalAllocatorStats(); +void AsanSoftRssLimitExceededCallback(bool exceeded); } // namespace __asan #endif // ASAN_ALLOCATOR_H diff --git a/libsanitizer/asan/asan_allocator2.cc b/libsanitizer/asan/asan_allocator2.cc deleted file mode 100644 index 33d9fea70cb..00000000000 --- a/libsanitizer/asan/asan_allocator2.cc +++ /dev/null @@ -1,790 +0,0 @@ -//===-- asan_allocator2.cc ------------------------------------------------===// -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file is a part of AddressSanitizer, an address sanity checker. -// -// Implementation of ASan's memory allocator, 2-nd version. -// This variant uses the allocator from sanitizer_common, i.e. the one shared -// with ThreadSanitizer and MemorySanitizer. -// -//===----------------------------------------------------------------------===// -#include "asan_allocator.h" - -#include "asan_mapping.h" -#include "asan_poisoning.h" -#include "asan_report.h" -#include "asan_stack.h" -#include "asan_thread.h" -#include "sanitizer_common/sanitizer_allocator_interface.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_internal_defs.h" -#include "sanitizer_common/sanitizer_list.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_quarantine.h" -#include "lsan/lsan_common.h" - -namespace __asan { - -void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const { - PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic); - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.mmaps++; - thread_stats.mmaped += size; -} -void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const { - PoisonShadow(p, size, 0); - // We are about to unmap a chunk of user memory. - // Mark the corresponding shadow memory as not needed. - FlushUnneededASanShadowMemory(p, size); - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.munmaps++; - thread_stats.munmaped += size; -} - -// We can not use THREADLOCAL because it is not supported on some of the -// platforms we care about (OSX 10.6, Android). -// static THREADLOCAL AllocatorCache cache; -AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) { - CHECK(ms); - return &ms->allocator2_cache; -} - -static Allocator allocator; - -static const uptr kMaxAllowedMallocSize = - FIRST_32_SECOND_64(3UL << 30, 64UL << 30); - -static const uptr kMaxThreadLocalQuarantine = - FIRST_32_SECOND_64(1 << 18, 1 << 20); - -// Every chunk of memory allocated by this allocator can be in one of 3 states: -// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated. -// CHUNK_ALLOCATED: the chunk is allocated and not yet freed. -// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone. -enum { - CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it. - CHUNK_ALLOCATED = 2, - CHUNK_QUARANTINE = 3 -}; - -// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits. -// We use adaptive redzones: for larger allocation larger redzones are used. -static u32 RZLog2Size(u32 rz_log) { - CHECK_LT(rz_log, 8); - return 16 << rz_log; -} - -static u32 RZSize2Log(u32 rz_size) { - CHECK_GE(rz_size, 16); - CHECK_LE(rz_size, 2048); - CHECK(IsPowerOfTwo(rz_size)); - u32 res = Log2(rz_size) - 4; - CHECK_EQ(rz_size, RZLog2Size(res)); - return res; -} - -static uptr ComputeRZLog(uptr user_requested_size) { - u32 rz_log = - user_requested_size <= 64 - 16 ? 0 : - user_requested_size <= 128 - 32 ? 1 : - user_requested_size <= 512 - 64 ? 2 : - user_requested_size <= 4096 - 128 ? 3 : - user_requested_size <= (1 << 14) - 256 ? 4 : - user_requested_size <= (1 << 15) - 512 ? 5 : - user_requested_size <= (1 << 16) - 1024 ? 6 : 7; - return Min(Max(rz_log, RZSize2Log(flags()->redzone)), - RZSize2Log(flags()->max_redzone)); -} - -// The memory chunk allocated from the underlying allocator looks like this: -// L L L L L L H H U U U U U U R R -// L -- left redzone words (0 or more bytes) -// H -- ChunkHeader (16 bytes), which is also a part of the left redzone. -// U -- user memory. -// R -- right redzone (0 or more bytes) -// ChunkBase consists of ChunkHeader and other bytes that overlap with user -// memory. - -// If the left redzone is greater than the ChunkHeader size we store a magic -// value in the first uptr word of the memory block and store the address of -// ChunkBase in the next uptr. -// M B L L L L L L L L L H H U U U U U U -// | ^ -// ---------------------| -// M -- magic value kAllocBegMagic -// B -- address of ChunkHeader pointing to the first 'H' -static const uptr kAllocBegMagic = 0xCC6E96B9; - -struct ChunkHeader { - // 1-st 8 bytes. - u32 chunk_state : 8; // Must be first. - u32 alloc_tid : 24; - - u32 free_tid : 24; - u32 from_memalign : 1; - u32 alloc_type : 2; - u32 rz_log : 3; - u32 lsan_tag : 2; - // 2-nd 8 bytes - // This field is used for small sizes. For large sizes it is equal to - // SizeClassMap::kMaxSize and the actual size is stored in the - // SecondaryAllocator's metadata. - u32 user_requested_size; - u32 alloc_context_id; -}; - -struct ChunkBase : ChunkHeader { - // Header2, intersects with user memory. - u32 free_context_id; -}; - -static const uptr kChunkHeaderSize = sizeof(ChunkHeader); -static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize; -COMPILER_CHECK(kChunkHeaderSize == 16); -COMPILER_CHECK(kChunkHeader2Size <= 16); - -struct AsanChunk: ChunkBase { - uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } - uptr UsedSize(bool locked_version = false) { - if (user_requested_size != SizeClassMap::kMaxSize) - return user_requested_size; - return *reinterpret_cast<uptr *>( - allocator.GetMetaData(AllocBeg(locked_version))); - } - void *AllocBeg(bool locked_version = false) { - if (from_memalign) { - if (locked_version) - return allocator.GetBlockBeginFastLocked( - reinterpret_cast<void *>(this)); - return allocator.GetBlockBegin(reinterpret_cast<void *>(this)); - } - return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log)); - } - bool AddrIsInside(uptr addr, bool locked_version = false) { - return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version)); - } -}; - -bool AsanChunkView::IsValid() { - return chunk_ != 0 && chunk_->chunk_state != CHUNK_AVAILABLE; -} -uptr AsanChunkView::Beg() { return chunk_->Beg(); } -uptr AsanChunkView::End() { return Beg() + UsedSize(); } -uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); } -uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; } -uptr AsanChunkView::FreeTid() { return chunk_->free_tid; } - -static StackTrace GetStackTraceFromId(u32 id) { - CHECK(id); - StackTrace res = StackDepotGet(id); - CHECK(res.trace); - return res; -} - -StackTrace AsanChunkView::GetAllocStack() { - return GetStackTraceFromId(chunk_->alloc_context_id); -} - -StackTrace AsanChunkView::GetFreeStack() { - return GetStackTraceFromId(chunk_->free_context_id); -} - -struct QuarantineCallback; -typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine; -typedef AsanQuarantine::Cache QuarantineCache; -static AsanQuarantine quarantine(LINKER_INITIALIZED); -static QuarantineCache fallback_quarantine_cache(LINKER_INITIALIZED); -static AllocatorCache fallback_allocator_cache; -static SpinMutex fallback_mutex; - -QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) { - CHECK(ms); - CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache)); - return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache); -} - -struct QuarantineCallback { - explicit QuarantineCallback(AllocatorCache *cache) - : cache_(cache) { - } - - void Recycle(AsanChunk *m) { - CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); - atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed); - CHECK_NE(m->alloc_tid, kInvalidTid); - CHECK_NE(m->free_tid, kInvalidTid); - PoisonShadow(m->Beg(), - RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), - kAsanHeapLeftRedzoneMagic); - void *p = reinterpret_cast<void *>(m->AllocBeg()); - if (p != m) { - uptr *alloc_magic = reinterpret_cast<uptr *>(p); - CHECK_EQ(alloc_magic[0], kAllocBegMagic); - // Clear the magic value, as allocator internals may overwrite the - // contents of deallocated chunk, confusing GetAsanChunk lookup. - alloc_magic[0] = 0; - CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m)); - } - - // Statistics. - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.real_frees++; - thread_stats.really_freed += m->UsedSize(); - - allocator.Deallocate(cache_, p); - } - - void *Allocate(uptr size) { - return allocator.Allocate(cache_, size, 1, false); - } - - void Deallocate(void *p) { - allocator.Deallocate(cache_, p); - } - - AllocatorCache *cache_; -}; - -void InitializeAllocator() { - allocator.Init(); - quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); -} - -void ReInitializeAllocator() { - quarantine.Init((uptr)flags()->quarantine_size, kMaxThreadLocalQuarantine); -} - -static void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, - AllocType alloc_type, bool can_fill) { - if (UNLIKELY(!asan_inited)) - AsanInitFromRtl(); - Flags &fl = *flags(); - CHECK(stack); - const uptr min_alignment = SHADOW_GRANULARITY; - if (alignment < min_alignment) - alignment = min_alignment; - if (size == 0) { - // We'd be happy to avoid allocating memory for zero-size requests, but - // some programs/tests depend on this behavior and assume that malloc would - // not return NULL even for zero-size allocations. Moreover, it looks like - // operator new should never return NULL, and results of consecutive "new" - // calls must be different even if the allocated size is zero. - size = 1; - } - CHECK(IsPowerOfTwo(alignment)); - uptr rz_log = ComputeRZLog(size); - uptr rz_size = RZLog2Size(rz_log); - uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment); - uptr needed_size = rounded_size + rz_size; - if (alignment > min_alignment) - needed_size += alignment; - bool using_primary_allocator = true; - // If we are allocating from the secondary allocator, there will be no - // automatic right redzone, so add the right redzone manually. - if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) { - needed_size += rz_size; - using_primary_allocator = false; - } - CHECK(IsAligned(needed_size, min_alignment)); - if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) { - Report("WARNING: AddressSanitizer failed to allocate %p bytes\n", - (void*)size); - return AllocatorReturnNull(); - } - - AsanThread *t = GetCurrentThread(); - void *allocated; - if (t) { - AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); - allocated = allocator.Allocate(cache, needed_size, 8, false); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; - allocated = allocator.Allocate(cache, needed_size, 8, false); - } - - if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && flags()->poison_heap) { - // Heap poisoning is enabled, but the allocator provides an unpoisoned - // chunk. This is possible if flags()->poison_heap was disabled for some - // time, for example, due to flags()->start_disabled. - // Anyway, poison the block before using it for anything else. - uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated); - PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic); - } - - uptr alloc_beg = reinterpret_cast<uptr>(allocated); - uptr alloc_end = alloc_beg + needed_size; - uptr beg_plus_redzone = alloc_beg + rz_size; - uptr user_beg = beg_plus_redzone; - if (!IsAligned(user_beg, alignment)) - user_beg = RoundUpTo(user_beg, alignment); - uptr user_end = user_beg + size; - CHECK_LE(user_end, alloc_end); - uptr chunk_beg = user_beg - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); - m->alloc_type = alloc_type; - m->rz_log = rz_log; - u32 alloc_tid = t ? t->tid() : 0; - m->alloc_tid = alloc_tid; - CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield? - m->free_tid = kInvalidTid; - m->from_memalign = user_beg != beg_plus_redzone; - if (alloc_beg != chunk_beg) { - CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg); - reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic; - reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg; - } - if (using_primary_allocator) { - CHECK(size); - m->user_requested_size = size; - CHECK(allocator.FromPrimary(allocated)); - } else { - CHECK(!allocator.FromPrimary(allocated)); - m->user_requested_size = SizeClassMap::kMaxSize; - uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)); - meta[0] = size; - meta[1] = chunk_beg; - } - - m->alloc_context_id = StackDepotPut(*stack); - - uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); - // Unpoison the bulk of the memory region. - if (size_rounded_down_to_granularity) - PoisonShadow(user_beg, size_rounded_down_to_granularity, 0); - // Deal with the end of the region if size is not aligned to granularity. - if (size != size_rounded_down_to_granularity && fl.poison_heap) { - u8 *shadow = (u8*)MemToShadow(user_beg + size_rounded_down_to_granularity); - *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0; - } - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.mallocs++; - thread_stats.malloced += size; - thread_stats.malloced_redzones += needed_size - size; - uptr class_id = Min(kNumberOfSizeClasses, SizeClassMap::ClassID(needed_size)); - thread_stats.malloced_by_size[class_id]++; - if (needed_size > SizeClassMap::kMaxSize) - thread_stats.malloc_large++; - - void *res = reinterpret_cast<void *>(user_beg); - if (can_fill && fl.max_malloc_fill_size) { - uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size); - REAL(memset)(res, fl.malloc_fill_byte, fill_size); - } -#if CAN_SANITIZE_LEAKS - m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored - : __lsan::kDirectlyLeaked; -#endif - // Must be the last mutation of metadata in this function. - atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); - ASAN_MALLOC_HOOK(res, size); - return res; -} - -static void ReportInvalidFree(void *ptr, u8 chunk_state, - BufferedStackTrace *stack) { - if (chunk_state == CHUNK_QUARANTINE) - ReportDoubleFree((uptr)ptr, stack); - else - ReportFreeNotMalloced((uptr)ptr, stack); -} - -static void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr, - BufferedStackTrace *stack) { - u8 old_chunk_state = CHUNK_ALLOCATED; - // Flip the chunk_state atomically to avoid race on double-free. - if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state, - CHUNK_QUARANTINE, memory_order_acquire)) - ReportInvalidFree(ptr, old_chunk_state, stack); - CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state); -} - -// Expects the chunk to already be marked as quarantined by using -// AtomicallySetQuarantineFlag. -static void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack, - AllocType alloc_type) { - CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); - - if (m->alloc_type != alloc_type && flags()->alloc_dealloc_mismatch) - ReportAllocTypeMismatch((uptr)ptr, stack, - (AllocType)m->alloc_type, (AllocType)alloc_type); - - CHECK_GE(m->alloc_tid, 0); - if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area. - CHECK_EQ(m->free_tid, kInvalidTid); - AsanThread *t = GetCurrentThread(); - m->free_tid = t ? t->tid() : 0; - m->free_context_id = StackDepotPut(*stack); - // Poison the region. - PoisonShadow(m->Beg(), - RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY), - kAsanHeapFreeMagic); - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.frees++; - thread_stats.freed += m->UsedSize(); - - // Push into quarantine. - if (t) { - AsanThreadLocalMallocStorage *ms = &t->malloc_storage(); - AllocatorCache *ac = GetAllocatorCache(ms); - quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac), - m, m->UsedSize()); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *ac = &fallback_allocator_cache; - quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac), - m, m->UsedSize()); - } -} - -static void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack, - AllocType alloc_type) { - uptr p = reinterpret_cast<uptr>(ptr); - if (p == 0) return; - - uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); - if (delete_size && flags()->new_delete_type_mismatch && - delete_size != m->UsedSize()) { - ReportNewDeleteSizeMismatch(p, delete_size, stack); - } - ASAN_FREE_HOOK(ptr); - // Must mark the chunk as quarantined before any changes to its metadata. - AtomicallySetQuarantineFlag(m, ptr, stack); - QuarantineChunk(m, ptr, stack, alloc_type); -} - -static void *Reallocate(void *old_ptr, uptr new_size, - BufferedStackTrace *stack) { - CHECK(old_ptr && new_size); - uptr p = reinterpret_cast<uptr>(old_ptr); - uptr chunk_beg = p - kChunkHeaderSize; - AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg); - - AsanStats &thread_stats = GetCurrentThreadStats(); - thread_stats.reallocs++; - thread_stats.realloced += new_size; - - void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); - if (new_ptr) { - u8 chunk_state = m->chunk_state; - if (chunk_state != CHUNK_ALLOCATED) - ReportInvalidFree(old_ptr, chunk_state, stack); - CHECK_NE(REAL(memcpy), (void*)0); - uptr memcpy_size = Min(new_size, m->UsedSize()); - // If realloc() races with free(), we may start copying freed memory. - // However, we will report racy double-free later anyway. - REAL(memcpy)(new_ptr, old_ptr, memcpy_size); - Deallocate(old_ptr, 0, stack, FROM_MALLOC); - } - return new_ptr; -} - -// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). -static AsanChunk *GetAsanChunk(void *alloc_beg) { - if (!alloc_beg) return 0; - if (!allocator.FromPrimary(alloc_beg)) { - uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg)); - AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]); - return m; - } - uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg); - if (alloc_magic[0] == kAllocBegMagic) - return reinterpret_cast<AsanChunk *>(alloc_magic[1]); - return reinterpret_cast<AsanChunk *>(alloc_beg); -} - -static AsanChunk *GetAsanChunkByAddr(uptr p) { - void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); - return GetAsanChunk(alloc_beg); -} - -// Allocator must be locked when this function is called. -static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) { - void *alloc_beg = - allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p)); - return GetAsanChunk(alloc_beg); -} - -static uptr AllocationSize(uptr p) { - AsanChunk *m = GetAsanChunkByAddr(p); - if (!m) return 0; - if (m->chunk_state != CHUNK_ALLOCATED) return 0; - if (m->Beg() != p) return 0; - return m->UsedSize(); -} - -// We have an address between two chunks, and we want to report just one. -AsanChunk *ChooseChunk(uptr addr, - AsanChunk *left_chunk, AsanChunk *right_chunk) { - // Prefer an allocated chunk over freed chunk and freed chunk - // over available chunk. - if (left_chunk->chunk_state != right_chunk->chunk_state) { - if (left_chunk->chunk_state == CHUNK_ALLOCATED) - return left_chunk; - if (right_chunk->chunk_state == CHUNK_ALLOCATED) - return right_chunk; - if (left_chunk->chunk_state == CHUNK_QUARANTINE) - return left_chunk; - if (right_chunk->chunk_state == CHUNK_QUARANTINE) - return right_chunk; - } - // Same chunk_state: choose based on offset. - sptr l_offset = 0, r_offset = 0; - CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset)); - CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset)); - if (l_offset < r_offset) - return left_chunk; - return right_chunk; -} - -AsanChunkView FindHeapChunkByAddress(uptr addr) { - AsanChunk *m1 = GetAsanChunkByAddr(addr); - if (!m1) return AsanChunkView(m1); - sptr offset = 0; - if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) { - // The address is in the chunk's left redzone, so maybe it is actually - // a right buffer overflow from the other chunk to the left. - // Search a bit to the left to see if there is another chunk. - AsanChunk *m2 = 0; - for (uptr l = 1; l < GetPageSizeCached(); l++) { - m2 = GetAsanChunkByAddr(addr - l); - if (m2 == m1) continue; // Still the same chunk. - break; - } - if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset)) - m1 = ChooseChunk(addr, m2, m1); - } - return AsanChunkView(m1); -} - -void AsanThreadLocalMallocStorage::CommitBack() { - AllocatorCache *ac = GetAllocatorCache(this); - quarantine.Drain(GetQuarantineCache(this), QuarantineCallback(ac)); - allocator.SwallowCache(GetAllocatorCache(this)); -} - -void PrintInternalAllocatorStats() { - allocator.PrintStats(); -} - -void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, - AllocType alloc_type) { - return Allocate(size, alignment, stack, alloc_type, true); -} - -void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { - Deallocate(ptr, 0, stack, alloc_type); -} - -void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack, - AllocType alloc_type) { - Deallocate(ptr, size, stack, alloc_type); -} - -void *asan_malloc(uptr size, BufferedStackTrace *stack) { - return Allocate(size, 8, stack, FROM_MALLOC, true); -} - -void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { - if (CallocShouldReturnNullDueToOverflow(size, nmemb)) - return AllocatorReturnNull(); - void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false); - // If the memory comes from the secondary allocator no need to clear it - // as it comes directly from mmap. - if (ptr && allocator.FromPrimary(ptr)) - REAL(memset)(ptr, 0, nmemb * size); - return ptr; -} - -void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) { - if (p == 0) - return Allocate(size, 8, stack, FROM_MALLOC, true); - if (size == 0) { - Deallocate(p, 0, stack, FROM_MALLOC); - return 0; - } - return Reallocate(p, size, stack); -} - -void *asan_valloc(uptr size, BufferedStackTrace *stack) { - return Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true); -} - -void *asan_pvalloc(uptr size, BufferedStackTrace *stack) { - uptr PageSize = GetPageSizeCached(); - size = RoundUpTo(size, PageSize); - if (size == 0) { - // pvalloc(0) should allocate one page. - size = PageSize; - } - return Allocate(size, PageSize, stack, FROM_MALLOC, true); -} - -int asan_posix_memalign(void **memptr, uptr alignment, uptr size, - BufferedStackTrace *stack) { - void *ptr = Allocate(size, alignment, stack, FROM_MALLOC, true); - CHECK(IsAligned((uptr)ptr, alignment)); - *memptr = ptr; - return 0; -} - -uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) { - if (ptr == 0) return 0; - uptr usable_size = AllocationSize(reinterpret_cast<uptr>(ptr)); - if (flags()->check_malloc_usable_size && (usable_size == 0)) { - GET_STACK_TRACE_FATAL(pc, bp); - ReportMallocUsableSizeNotOwned((uptr)ptr, &stack); - } - return usable_size; -} - -uptr asan_mz_size(const void *ptr) { - return AllocationSize(reinterpret_cast<uptr>(ptr)); -} - -void asan_mz_force_lock() { - allocator.ForceLock(); - fallback_mutex.Lock(); -} - -void asan_mz_force_unlock() { - fallback_mutex.Unlock(); - allocator.ForceUnlock(); -} - -} // namespace __asan - -// --- Implementation of LSan-specific functions --- {{{1 -namespace __lsan { -void LockAllocator() { - __asan::allocator.ForceLock(); -} - -void UnlockAllocator() { - __asan::allocator.ForceUnlock(); -} - -void GetAllocatorGlobalRange(uptr *begin, uptr *end) { - *begin = (uptr)&__asan::allocator; - *end = *begin + sizeof(__asan::allocator); -} - -uptr PointsIntoChunk(void* p) { - uptr addr = reinterpret_cast<uptr>(p); - __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr); - if (!m) return 0; - uptr chunk = m->Beg(); - if (m->chunk_state != __asan::CHUNK_ALLOCATED) - return 0; - if (m->AddrIsInside(addr, /*locked_version=*/true)) - return chunk; - if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), - addr)) - return chunk; - return 0; -} - -uptr GetUserBegin(uptr chunk) { - __asan::AsanChunk *m = - __asan::GetAsanChunkByAddrFastLocked(chunk); - CHECK(m); - return m->Beg(); -} - -LsanMetadata::LsanMetadata(uptr chunk) { - metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize); -} - -bool LsanMetadata::allocated() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->chunk_state == __asan::CHUNK_ALLOCATED; -} - -ChunkTag LsanMetadata::tag() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return static_cast<ChunkTag>(m->lsan_tag); -} - -void LsanMetadata::set_tag(ChunkTag value) { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - m->lsan_tag = value; -} - -uptr LsanMetadata::requested_size() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->UsedSize(/*locked_version=*/true); -} - -u32 LsanMetadata::stack_trace_id() const { - __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->alloc_context_id; -} - -void ForEachChunk(ForEachChunkCallback callback, void *arg) { - __asan::allocator.ForEachChunk(callback, arg); -} - -IgnoreObjectResult IgnoreObjectLocked(const void *p) { - uptr addr = reinterpret_cast<uptr>(p); - __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr); - if (!m) return kIgnoreObjectInvalid; - if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { - if (m->lsan_tag == kIgnored) - return kIgnoreObjectAlreadyIgnored; - m->lsan_tag = __lsan::kIgnored; - return kIgnoreObjectSuccess; - } else { - return kIgnoreObjectInvalid; - } -} -} // namespace __lsan - -// ---------------------- Interface ---------------- {{{1 -using namespace __asan; // NOLINT - -// ASan allocator doesn't reserve extra bytes, so normally we would -// just return "size". We don't want to expose our redzone sizes, etc here. -uptr __sanitizer_get_estimated_allocated_size(uptr size) { - return size; -} - -int __sanitizer_get_ownership(const void *p) { - uptr ptr = reinterpret_cast<uptr>(p); - return (AllocationSize(ptr) > 0); -} - -uptr __sanitizer_get_allocated_size(const void *p) { - if (p == 0) return 0; - uptr ptr = reinterpret_cast<uptr>(p); - uptr allocated_size = AllocationSize(ptr); - // Die if p is not malloced or if it is already freed. - if (allocated_size == 0) { - GET_STACK_TRACE_FATAL_HERE; - ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack); - } - return allocated_size; -} - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -// Provide default (no-op) implementation of malloc hooks. -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_malloc_hook(void *ptr, uptr size) { - (void)ptr; - (void)size; -} -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -void __sanitizer_free_hook(void *ptr) { - (void)ptr; -} -} // extern "C" -#endif diff --git a/libsanitizer/asan/asan_debugging.cc b/libsanitizer/asan/asan_debugging.cc index 3efad658a4c..6e33a9d4c51 100644 --- a/libsanitizer/asan/asan_debugging.cc +++ b/libsanitizer/asan/asan_debugging.cc @@ -79,8 +79,8 @@ void AsanLocateAddress(uptr addr, AddressDescription *descr) { GetInfoForHeapAddress(addr, descr); } -uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id, - bool alloc_stack) { +static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id, + bool alloc_stack) { AsanChunkView chunk = FindHeapChunkByAddress(addr); if (!chunk.IsValid()) return 0; @@ -106,14 +106,14 @@ uptr AsanGetStack(uptr addr, uptr *trace, uptr size, u32 *thread_id, return 0; } -} // namespace __asan +} // namespace __asan using namespace __asan; SANITIZER_INTERFACE_ATTRIBUTE const char *__asan_locate_address(uptr addr, char *name, uptr name_size, uptr *region_address, uptr *region_size) { - AddressDescription descr = { name, name_size, 0, 0, 0 }; + AddressDescription descr = { name, name_size, 0, 0, nullptr }; AsanLocateAddress(addr, &descr); if (region_address) *region_address = descr.region_address; if (region_size) *region_size = descr.region_size; diff --git a/libsanitizer/asan/asan_fake_stack.cc b/libsanitizer/asan/asan_fake_stack.cc index cfe96a0882f..de190d11a16 100644 --- a/libsanitizer/asan/asan_fake_stack.cc +++ b/libsanitizer/asan/asan_fake_stack.cc @@ -9,6 +9,7 @@ // // FakeStack is used to detect use-after-return bugs. //===----------------------------------------------------------------------===// + #include "asan_allocator.h" #include "asan_poisoning.h" #include "asan_thread.h" @@ -20,13 +21,19 @@ static const u64 kMagic2 = (kMagic1 << 8) | kMagic1; static const u64 kMagic4 = (kMagic2 << 16) | kMagic2; static const u64 kMagic8 = (kMagic4 << 32) | kMagic4; +static const u64 kAllocaRedzoneSize = 32UL; +static const u64 kAllocaRedzoneMask = 31UL; + // For small size classes inline PoisonShadow for better performance. ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) { CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3. u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr)); if (class_id <= 6) { - for (uptr i = 0; i < (1U << class_id); i++) + for (uptr i = 0; i < (1U << class_id); i++) { shadow[i] = magic; + // Make sure this does not become memset. + SanitizerBreakOptimization(nullptr); + } } else { // The size class is too big, it's cheaper to poison only size bytes. PoisonShadow(ptr, size, static_cast<u8>(magic)); @@ -56,7 +63,7 @@ FakeStack *FakeStack::Create(uptr stack_size_log) { void FakeStack::Destroy(int tid) { PoisonAll(0); - if (common_flags()->verbosity >= 2) { + if (Verbosity() >= 2) { InternalScopedString str(kNumberOfSizeClasses * 50); for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id], @@ -73,7 +80,9 @@ void FakeStack::PoisonAll(u8 magic) { magic); } +#if !defined(_MSC_VER) || defined(__clang__) ALWAYS_INLINE USED +#endif FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, uptr real_stack) { CHECK_LT(class_id, kNumberOfSizeClasses); @@ -99,7 +108,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id, *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos]; return res; } - return 0; // We are out of fake stack. + return nullptr; // We are out of fake stack. } uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) { @@ -176,7 +185,7 @@ void SetTLSFakeStack(FakeStack *fs) { } static FakeStack *GetFakeStack() { AsanThread *t = GetCurrentThread(); - if (!t) return 0; + if (!t) return nullptr; return t->fake_stack(); } @@ -184,40 +193,39 @@ static FakeStack *GetFakeStackFast() { if (FakeStack *fs = GetTLSFakeStack()) return fs; if (!__asan_option_detect_stack_use_after_return) - return 0; + return nullptr; return GetFakeStack(); } -ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size, uptr real_stack) { +ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) { FakeStack *fs = GetFakeStackFast(); - if (!fs) return real_stack; + if (!fs) return 0; + uptr local_stack; + uptr real_stack = reinterpret_cast<uptr>(&local_stack); FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack); - if (!ff) - return real_stack; // Out of fake stack, return the real one. + if (!ff) return 0; // Out of fake stack. uptr ptr = reinterpret_cast<uptr>(ff); SetShadow(ptr, size, class_id, 0); return ptr; } -ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size, uptr real_stack) { - if (ptr == real_stack) - return; +ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) { FakeStack::Deallocate(ptr, class_id); SetShadow(ptr, size, class_id, kMagic8); } -} // namespace __asan +} // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; #define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \ - __asan_stack_malloc_##class_id(uptr size, uptr real_stack) { \ - return OnMalloc(class_id, size, real_stack); \ + __asan_stack_malloc_##class_id(uptr size) { \ + return OnMalloc(class_id, size); \ } \ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \ - uptr ptr, uptr size, uptr real_stack) { \ - OnFree(ptr, class_id, size, real_stack); \ + uptr ptr, uptr size) { \ + OnFree(ptr, class_id, size); \ } DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0) @@ -239,15 +247,35 @@ SANITIZER_INTERFACE_ATTRIBUTE void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg, void **end) { FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack); - if (!fs) return 0; + if (!fs) return nullptr; uptr frame_beg, frame_end; FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack( reinterpret_cast<uptr>(addr), &frame_beg, &frame_end)); - if (!frame) return 0; + if (!frame) return nullptr; if (frame->magic != kCurrentStackFrameMagic) - return 0; + return nullptr; if (beg) *beg = reinterpret_cast<void*>(frame_beg); if (end) *end = reinterpret_cast<void*>(frame_end); return reinterpret_cast<void*>(frame->real_stack); } -} // extern "C" + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_alloca_poison(uptr addr, uptr size) { + uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize; + uptr PartialRzAddr = addr + size; + uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask; + uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1); + FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic); + FastPoisonShadowPartialRightRedzone( + PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY, + RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic); + FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_allocas_unpoison(uptr top, uptr bottom) { + if ((!top) || (top > bottom)) return; + REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0, + (bottom - top) / SHADOW_GRANULARITY); +} +} // extern "C" diff --git a/libsanitizer/asan/asan_flags.cc b/libsanitizer/asan/asan_flags.cc new file mode 100644 index 00000000000..9a68ad4eaa8 --- /dev/null +++ b/libsanitizer/asan/asan_flags.cc @@ -0,0 +1,177 @@ +//===-- asan_flags.cc -------------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan flag parsing logic. +//===----------------------------------------------------------------------===// + +#include "asan_activation.h" +#include "asan_flags.h" +#include "asan_interface_internal.h" +#include "asan_stack.h" +#include "lsan/lsan_common.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "ubsan/ubsan_flags.h" +#include "ubsan/ubsan_platform.h" + +namespace __asan { + +Flags asan_flags_dont_use_directly; // use via flags(). + +static const char *MaybeCallAsanDefaultOptions() { + return (&__asan_default_options) ? __asan_default_options() : ""; +} + +static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { +#ifdef ASAN_DEFAULT_OPTIONS +// Stringize the macro value. +# define ASAN_STRINGIZE(x) #x +# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options) + return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS); +#else + return ""; +#endif +} + +void Flags::SetDefaults() { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "asan_flags.inc" +#undef ASAN_FLAG +} + +static void RegisterAsanFlags(FlagParser *parser, Flags *f) { +#define ASAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "asan_flags.inc" +#undef ASAN_FLAG +} + +void InitializeFlags() { + // Set the default values and prepare for parsing ASan and common flags. + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.detect_leaks = CAN_SANITIZE_LEAKS; + cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); + cf.malloc_context_size = kDefaultMallocContextSize; + cf.intercept_tls_get_addr = true; + cf.exitcode = 1; + OverrideCommonFlags(cf); + } + Flags *f = flags(); + f->SetDefaults(); + + FlagParser asan_parser; + RegisterAsanFlags(&asan_parser, f); + RegisterCommonFlags(&asan_parser); + + // Set the default values and prepare for parsing LSan and UBSan flags + // (which can also overwrite common flags). +#if CAN_SANITIZE_LEAKS + __lsan::Flags *lf = __lsan::flags(); + lf->SetDefaults(); + + FlagParser lsan_parser; + __lsan::RegisterLsanFlags(&lsan_parser, lf); + RegisterCommonFlags(&lsan_parser); +#endif + +#if CAN_SANITIZE_UB + __ubsan::Flags *uf = __ubsan::flags(); + uf->SetDefaults(); + + FlagParser ubsan_parser; + __ubsan::RegisterUbsanFlags(&ubsan_parser, uf); + RegisterCommonFlags(&ubsan_parser); +#endif + + // Override from ASan compile definition. + const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition(); + asan_parser.ParseString(asan_compile_def); + + // Override from user-specified string. + const char *asan_default_options = MaybeCallAsanDefaultOptions(); + asan_parser.ParseString(asan_default_options); +#if CAN_SANITIZE_UB + const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions(); + ubsan_parser.ParseString(ubsan_default_options); +#endif + + // Override from command line. + asan_parser.ParseString(GetEnv("ASAN_OPTIONS")); +#if CAN_SANITIZE_LEAKS + lsan_parser.ParseString(GetEnv("LSAN_OPTIONS")); +#endif +#if CAN_SANITIZE_UB + ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS")); +#endif + + // Let activation flags override current settings. On Android they come + // from a system property. On other platforms this is no-op. + if (!flags()->start_deactivated) { + char buf[100]; + GetExtraActivationFlags(buf, sizeof(buf)); + asan_parser.ParseString(buf); + } + + SetVerbosity(common_flags()->verbosity); + + // TODO(eugenis): dump all flags at verbosity>=2? + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) { + // TODO(samsonov): print all of the flags (ASan, LSan, common). + asan_parser.PrintFlagDescriptions(); + } + + // Flag validation: + if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) { + Report("%s: detect_leaks is not supported on this platform.\n", + SanitizerToolName); + Die(); + } + // Make "strict_init_order" imply "check_initialization_order". + // TODO(samsonov): Use a single runtime flag for an init-order checker. + if (f->strict_init_order) { + f->check_initialization_order = true; + } + CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax); + CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log); + CHECK_GE(f->redzone, 16); + CHECK_GE(f->max_redzone, f->redzone); + CHECK_LE(f->max_redzone, 2048); + CHECK(IsPowerOfTwo(f->redzone)); + CHECK(IsPowerOfTwo(f->max_redzone)); + + // quarantine_size is deprecated but we still honor it. + // quarantine_size can not be used together with quarantine_size_mb. + if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) { + Report("%s: please use either 'quarantine_size' (deprecated) or " + "quarantine_size_mb, but not both\n", SanitizerToolName); + Die(); + } + if (f->quarantine_size >= 0) + f->quarantine_size_mb = f->quarantine_size >> 20; + if (f->quarantine_size_mb < 0) { + const int kDefaultQuarantineSizeMb = + (ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8; + f->quarantine_size_mb = kDefaultQuarantineSizeMb; + } +} + +} // namespace __asan + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char* __asan_default_options() { return ""; } +} // extern "C" +#endif diff --git a/libsanitizer/asan/asan_flags.h b/libsanitizer/asan/asan_flags.h index 0b6a9857be0..6b33789b84c 100644 --- a/libsanitizer/asan/asan_flags.h +++ b/libsanitizer/asan/asan_flags.h @@ -14,6 +14,7 @@ #define ASAN_FLAGS_H #include "sanitizer_common/sanitizer_internal_defs.h" +#include "sanitizer_common/sanitizer_flag_parser.h" // ASan flag values can be defined in four ways: // 1) initialized with default values at startup. @@ -22,55 +23,24 @@ // 3) overriden from string returned by user-specified function // __asan_default_options(). // 4) overriden from env variable ASAN_OPTIONS. +// 5) overriden during ASan activation (for now used on Android only). namespace __asan { struct Flags { - // Flag descriptions are in asan_rtl.cc. - int quarantine_size; - int redzone; - int max_redzone; - bool debug; - int report_globals; - bool check_initialization_order; - bool replace_str; - bool replace_intrin; - bool mac_ignore_invalid_free; - bool detect_stack_use_after_return; - int min_uar_stack_size_log; - int max_uar_stack_size_log; - bool uar_noreserve; - int max_malloc_fill_size, malloc_fill_byte; - int exitcode; - bool allow_user_poisoning; - int sleep_before_dying; - bool check_malloc_usable_size; - bool unmap_shadow_on_exit; - bool abort_on_error; - bool print_stats; - bool print_legend; - bool atexit; - bool allow_reexec; - bool print_full_thread_history; - bool poison_heap; - bool poison_partial; - bool poison_array_cookie; - bool alloc_dealloc_mismatch; - bool new_delete_type_mismatch; - bool strict_memcmp; - bool strict_init_order; - bool start_deactivated; - int detect_invalid_pointer_pairs; - bool detect_container_overflow; - int detect_odr_violation; - bool dump_instruction_bytes; +#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "asan_flags.inc" +#undef ASAN_FLAG + + void SetDefaults(); }; extern Flags asan_flags_dont_use_directly; inline Flags *flags() { return &asan_flags_dont_use_directly; } -void InitializeFlags(Flags *f, const char *env); + +void InitializeFlags(); } // namespace __asan diff --git a/libsanitizer/asan/asan_flags.inc b/libsanitizer/asan/asan_flags.inc new file mode 100644 index 00000000000..3163c23cd09 --- /dev/null +++ b/libsanitizer/asan/asan_flags.inc @@ -0,0 +1,134 @@ +//===-- asan_flags.inc ------------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// ASan runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef ASAN_FLAG +# error "Define ASAN_FLAG prior to including this file!" +#endif + +// ASAN_FLAG(Type, Name, DefaultValue, Description) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +ASAN_FLAG(int, quarantine_size, -1, + "Deprecated, please use quarantine_size_mb.") +ASAN_FLAG(int, quarantine_size_mb, -1, + "Size (in Mb) of quarantine used to detect use-after-free " + "errors. Lower value may reduce memory usage but increase the " + "chance of false negatives.") +ASAN_FLAG(int, redzone, 16, + "Minimal size (in bytes) of redzones around heap objects. " + "Requirement: redzone >= 16, is a power of two.") +ASAN_FLAG(int, max_redzone, 2048, + "Maximal size (in bytes) of redzones around heap objects.") +ASAN_FLAG( + bool, debug, false, + "If set, prints some debugging information and does additional checks.") +ASAN_FLAG( + int, report_globals, 1, + "Controls the way to handle globals (0 - don't detect buffer overflow on " + "globals, 1 - detect buffer overflow, 2 - print data about registered " + "globals).") +ASAN_FLAG(bool, check_initialization_order, false, + "If set, attempts to catch initialization order issues.") +ASAN_FLAG( + bool, replace_str, true, + "If set, uses custom wrappers and replacements for libc string functions " + "to find more errors.") +ASAN_FLAG(bool, replace_intrin, true, + "If set, uses custom wrappers for memset/memcpy/memmove intinsics.") +ASAN_FLAG(bool, mac_ignore_invalid_free, false, + "Ignore invalid free() calls to work around some bugs. Used on OS X " + "only.") +ASAN_FLAG(bool, detect_stack_use_after_return, false, + "Enables stack-use-after-return checking at run-time.") +ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway. + "Minimum fake stack size log.") +ASAN_FLAG(int, max_uar_stack_size_log, + 20, // 1Mb per size class, i.e. ~11Mb per thread + "Maximum fake stack size log.") +ASAN_FLAG(bool, uar_noreserve, false, + "Use mmap with 'noreserve' flag to allocate fake stack.") +ASAN_FLAG( + int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K. + "ASan allocator flag. max_malloc_fill_size is the maximal amount of " + "bytes that will be filled with malloc_fill_byte on malloc.") +ASAN_FLAG(int, malloc_fill_byte, 0xbe, + "Value used to fill the newly allocated memory.") +ASAN_FLAG(bool, allow_user_poisoning, true, + "If set, user may manually mark memory regions as poisoned or " + "unpoisoned.") +ASAN_FLAG( + int, sleep_before_dying, 0, + "Number of seconds to sleep between printing an error report and " + "terminating the program. Useful for debugging purposes (e.g. when one " + "needs to attach gdb).") +ASAN_FLAG(bool, check_malloc_usable_size, true, + "Allows the users to work around the bug in Nvidia drivers prior to " + "295.*.") +ASAN_FLAG(bool, unmap_shadow_on_exit, false, + "If set, explicitly unmaps the (huge) shadow at exit.") +ASAN_FLAG(bool, print_stats, false, + "Print various statistics after printing an error message or if " + "atexit=1.") +ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.") +ASAN_FLAG(bool, atexit, false, + "If set, prints ASan exit stats even after program terminates " + "successfully.") +ASAN_FLAG( + bool, print_full_thread_history, true, + "If set, prints thread creation stacks for the threads involved in the " + "report and their ancestors up to the main thread.") +ASAN_FLAG( + bool, poison_heap, true, + "Poison (or not) the heap memory on [de]allocation. Zero value is useful " + "for benchmarking the allocator or instrumentator.") +ASAN_FLAG(bool, poison_partial, true, + "If true, poison partially addressable 8-byte aligned words " + "(default=true). This flag affects heap and global buffers, but not " + "stack buffers.") +ASAN_FLAG(bool, poison_array_cookie, true, + "Poison (or not) the array cookie after operator new[].") + +// Turn off alloc/dealloc mismatch checker on Mac and Windows for now. +// https://code.google.com/p/address-sanitizer/issues/detail?id=131 +// https://code.google.com/p/address-sanitizer/issues/detail?id=309 +// TODO(glider,timurrrr): Fix known issues and enable this back. +ASAN_FLAG(bool, alloc_dealloc_mismatch, + (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0), + "Report errors on malloc/delete, new/free, new/delete[], etc.") + +ASAN_FLAG(bool, new_delete_type_mismatch, true, + "Report errors on mismatch betwen size of new and delete.") +ASAN_FLAG( + bool, strict_init_order, false, + "If true, assume that dynamic initializers can never access globals from " + "other modules, even if the latter are already initialized.") +ASAN_FLAG( + bool, start_deactivated, false, + "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap " + "poisoning) to reduce memory consumption as much as possible, and " + "restores them to original values when the first instrumented module is " + "loaded into the process. This is mainly intended to be used on " + "Android. ") +ASAN_FLAG( + int, detect_invalid_pointer_pairs, 0, + "If non-zero, try to detect operations like <, <=, >, >= and - on " + "invalid pointer pairs (e.g. when pointers belong to different objects). " + "The bigger the value the harder we try.") +ASAN_FLAG( + bool, detect_container_overflow, true, + "If true, honor the container overflow annotations. " + "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow") +ASAN_FLAG(int, detect_odr_violation, 2, + "If >=2, detect violation of One-Definition-Rule (ODR); " + "If ==1, detect ODR-violation only if the two variables " + "have different sizes") +ASAN_FLAG(bool, dump_instruction_bytes, false, + "If true, dump 16 bytes starting at the instruction that caused SEGV") +ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.") diff --git a/libsanitizer/asan/asan_globals.cc b/libsanitizer/asan/asan_globals.cc index 4bb88cfa001..b132c8b09b9 100644 --- a/libsanitizer/asan/asan_globals.cc +++ b/libsanitizer/asan/asan_globals.cc @@ -9,6 +9,7 @@ // // Handle globals. //===----------------------------------------------------------------------===// + #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_mapping.h" @@ -16,6 +17,7 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" +#include "asan_suppressions.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_mutex.h" @@ -71,7 +73,7 @@ ALWAYS_INLINE void PoisonRedZones(const Global &g) { const uptr kMinimalDistanceFromAnotherGlobal = 64; -bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { +static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) { if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false; if (addr >= g.beg + g.size_with_redzone) return false; return true; @@ -88,36 +90,40 @@ static void ReportGlobal(const Global &g, const char *prefix) { } } -static bool DescribeOrGetInfoIfGlobal(uptr addr, uptr size, bool print, - Global *output_global) { - if (!flags()->report_globals) return false; +static u32 FindRegistrationSite(const Global *g) { + mu_for_globals.CheckLocked(); + CHECK(global_registration_site_vector); + for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { + GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; + if (g >= grs.g_first && g <= grs.g_last) + return grs.stack_id; + } + return 0; +} + +int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites, + int max_globals) { + if (!flags()->report_globals) return 0; BlockingMutexLock lock(&mu_for_globals); - bool res = false; + int res = 0; for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { const Global &g = *l->g; - if (print) { - if (flags()->report_globals >= 2) - ReportGlobal(g, "Search"); - res |= DescribeAddressRelativeToGlobal(addr, size, g); - } else { - if (IsAddressNearGlobal(addr, g)) { - CHECK(output_global); - *output_global = g; - return true; - } + if (flags()->report_globals >= 2) + ReportGlobal(g, "Search"); + if (IsAddressNearGlobal(addr, g)) { + globals[res] = g; + if (reg_sites) + reg_sites[res] = FindRegistrationSite(&g); + res++; + if (res == max_globals) break; } } return res; } -bool DescribeAddressIfGlobal(uptr addr, uptr size) { - return DescribeOrGetInfoIfGlobal(addr, size, /* print */ true, - /* output_global */ nullptr); -} - bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) { Global g = {}; - if (DescribeOrGetInfoIfGlobal(addr, /* size */ 1, /* print */ false, &g)) { + if (GetGlobalsForAddress(addr, &g, nullptr, 1)) { internal_strncpy(descr->name, g.name, descr->name_size); descr->region_address = g.beg; descr->region_size = g.size; @@ -127,16 +133,6 @@ bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) { return false; } -u32 FindRegistrationSite(const Global *g) { - CHECK(global_registration_site_vector); - for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) { - GlobalRegistrationSite &grs = (*global_registration_site_vector)[i]; - if (g >= grs.g_first && g <= grs.g_last) - return grs.stack_id; - } - return 0; -} - // Register a global variable. // This function may be called more than once for every global // so we store the globals in a map. @@ -148,9 +144,7 @@ static void RegisterGlobal(const Global *g) { CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - // This "ODR violation" detection is fundamentally incompatible with - // how GCC registers globals. Disable as useless until rewritten upstream. - if (0 && flags()->detect_odr_violation) { + if (flags()->detect_odr_violation) { // Try detecting ODR (One Definition Rule) violation, i.e. the situation // where two globals with the same name are defined in different modules. if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) { @@ -158,20 +152,21 @@ static void RegisterGlobal(const Global *g) { // the entire redzone of the second global may be within the first global. for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) { if (g->beg == l->g->beg && - (flags()->detect_odr_violation >= 2 || g->size != l->g->size)) + (flags()->detect_odr_violation >= 2 || g->size != l->g->size) && + !IsODRViolationSuppressed(g->name)) ReportODRViolation(g, FindRegistrationSite(g), l->g, FindRegistrationSite(l->g)); } } } - if (flags()->poison_heap) + if (CanPoisonMemory()) PoisonRedZones(*g); ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals; l->g = g; l->next = list_of_all_globals; list_of_all_globals = l; if (g->has_dynamic_init) { - if (dynamic_init_globals == 0) { + if (!dynamic_init_globals) { dynamic_init_globals = new(allocator_for_globals) VectorOfGlobals(kDynamicInitGlobalsInitialCapacity); } @@ -182,11 +177,13 @@ static void RegisterGlobal(const Global *g) { static void UnregisterGlobal(const Global *g) { CHECK(asan_inited); + if (flags()->report_globals >= 2) + ReportGlobal(*g, "Removed"); CHECK(flags()->report_globals); CHECK(AddrIsInMem(g->beg)); CHECK(AddrIsAlignedByGranularity(g->beg)); CHECK(AddrIsAlignedByGranularity(g->size_with_redzone)); - if (flags()->poison_heap) + if (CanPoisonMemory()) PoisonShadowForGlobal(g, 0); // We unpoison the shadow memory for the global but we do not remove it from // the list because that would require O(n^2) time with the current list @@ -208,7 +205,7 @@ void StopInitOrderChecking() { } } -} // namespace __asan +} // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT @@ -216,7 +213,7 @@ using namespace __asan; // NOLINT // Register an array of globals. void __asan_register_globals(__asan_global *globals, uptr n) { if (!flags()->report_globals) return; - GET_STACK_TRACE_FATAL_HERE; + GET_STACK_TRACE_MALLOC; u32 stack_id = StackDepotPut(stack); BlockingMutexLock lock(&mu_for_globals); if (!global_registration_site_vector) @@ -249,7 +246,7 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) { // initializer can only touch global variables in the same TU. void __asan_before_dynamic_init(const char *module_name) { if (!flags()->check_initialization_order || - !flags()->poison_heap) + !CanPoisonMemory()) return; bool strict_init_order = flags()->strict_init_order; CHECK(dynamic_init_globals); @@ -275,7 +272,7 @@ void __asan_before_dynamic_init(const char *module_name) { // TU are poisoned. It simply unpoisons all dynamically initialized globals. void __asan_after_dynamic_init() { if (!flags()->check_initialization_order || - !flags()->poison_heap) + !CanPoisonMemory()) return; CHECK(asan_inited); BlockingMutexLock lock(&mu_for_globals); diff --git a/libsanitizer/asan/asan_init_version.h b/libsanitizer/asan/asan_init_version.h index da232513a09..2cda18849dd 100644 --- a/libsanitizer/asan/asan_init_version.h +++ b/libsanitizer/asan/asan_init_version.h @@ -23,8 +23,10 @@ extern "C" { // contains the function PC as the 3-rd field (see // DescribeAddressIfStack). // v3=>v4: added '__asan_global_source_location' to __asan_global. - #define __asan_init __asan_init_v4 - #define __asan_init_name "__asan_init_v4" + // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and + // __asan_stack_free_ functions. + // v5=>v6: changed the name of the version check symbol + #define __asan_version_mismatch_check __asan_version_mismatch_check_v6 } #endif // ASAN_INIT_VERSION_H diff --git a/libsanitizer/asan/asan_interceptors.cc b/libsanitizer/asan/asan_interceptors.cc index 182b7842ed9..1b0592e027f 100644 --- a/libsanitizer/asan/asan_interceptors.cc +++ b/libsanitizer/asan/asan_interceptors.cc @@ -9,8 +9,8 @@ // // Intercept various libc functions. //===----------------------------------------------------------------------===// -#include "asan_interceptors.h" +#include "asan_interceptors.h" #include "asan_allocator.h" #include "asan_internal.h" #include "asan_mapping.h" @@ -18,8 +18,19 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" +#include "asan_suppressions.h" #include "sanitizer_common/sanitizer_libc.h" +#if SANITIZER_POSIX +#include "sanitizer_common/sanitizer_posix.h" +#endif + +#if defined(__i386) && SANITIZER_LINUX +#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" +#elif defined(__mips__) && SANITIZER_LINUX +#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" +#endif + namespace __asan { // Return true if we can quickly decide that the region is unpoisoned. @@ -32,12 +43,16 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { return false; } +struct AsanInterceptorContext { + const char *interceptor_name; +}; + // We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE, // and ASAN_WRITE_RANGE as macro instead of function so // that no extra frames are created, and stack trace contains // relevant information only. // We check all shadow bytes. -#define ACCESS_MEMORY_RANGE(offset, size, isWrite) do { \ +#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \ uptr __offset = (uptr)(offset); \ uptr __size = (uptr)(size); \ uptr __bad = 0; \ @@ -47,13 +62,33 @@ static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) { } \ if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \ (__bad = __asan_region_is_poisoned(__offset, __size))) { \ - GET_CURRENT_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, __bad, isWrite, __size); \ + AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \ + bool suppressed = false; \ + if (_ctx) { \ + suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \ + if (!suppressed && HaveStackTraceBasedSuppressions()) { \ + GET_STACK_TRACE_FATAL_HERE; \ + suppressed = IsStackTraceSuppressed(&stack); \ + } \ + } \ + if (!suppressed) { \ + GET_CURRENT_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0); \ + } \ } \ } while (0) -#define ASAN_READ_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, false) -#define ASAN_WRITE_RANGE(offset, size) ACCESS_MEMORY_RANGE(offset, size, true) +#define ASAN_READ_RANGE(ctx, offset, size) \ + ACCESS_MEMORY_RANGE(ctx, offset, size, false) +#define ASAN_WRITE_RANGE(ctx, offset, size) \ + ACCESS_MEMORY_RANGE(ctx, offset, size, true) + +#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \ + ASAN_READ_RANGE((ctx), (s), \ + common_flags()->strict_string_checks ? (len) + 1 : (n)) + +#define ASAN_READ_STRING(ctx, s, n) \ + ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) // Behavior of functions like "memcpy" or "strcpy" is undefined // if memory intervals overlap. We report error in this case. @@ -74,7 +109,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1, static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) { #if ASAN_INTERCEPT_STRNLEN - if (REAL(strnlen) != 0) { + if (REAL(strnlen)) { return REAL(strnlen)(s, maxlen); } #endif @@ -92,7 +127,7 @@ int OnExit() { return 0; } -} // namespace __asan +} // namespace __asan // ---------------------- Wrappers ---------------- {{{1 using namespace __asan; // NOLINT @@ -100,31 +135,28 @@ using namespace __asan; // NOLINT DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr) DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) -#if !SANITIZER_MAC -#define ASAN_INTERCEPT_FUNC(name) \ - do { \ - if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ - VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \ - } while (0) -#else -// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION. -#define ASAN_INTERCEPT_FUNC(name) -#endif // SANITIZER_MAC +#define ASAN_INTERCEPTOR_ENTER(ctx, func) \ + AsanInterceptorContext _ctx = {#func}; \ + ctx = (void *)&_ctx; \ + (void) ctx; \ #define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name) #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ - ASAN_WRITE_RANGE(ptr, size) -#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) ASAN_READ_RANGE(ptr, size) + ASAN_WRITE_RANGE(ctx, ptr, size) +#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ + ASAN_READ_RANGE(ctx, ptr, size) #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + ASAN_INTERCEPTOR_ENTER(ctx, func); \ do { \ if (asan_init_is_running) \ return REAL(func)(__VA_ARGS__); \ - ctx = 0; \ - (void) ctx; \ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \ return REAL(func)(__VA_ARGS__); \ ENSURE_ASAN_INITED(); \ } while (false) +#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ + do { \ + } while (false) #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ do { \ } while (false) @@ -143,14 +175,30 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) do { \ } while (false) #define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name) +// Strict init-order checking is dlopen-hostile: +// https://code.google.com/p/address-sanitizer/issues/detail?id=178 +#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \ + if (flags()->strict_init_order) { \ + StopInitOrderChecking(); \ + } #define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit() -#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) CovUpdateMapping() -#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CovUpdateMapping() +#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ + CoverageUpdateMapping() +#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() CoverageUpdateMapping() #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited) +#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ + if (AsanThread *t = GetCurrentThread()) { \ + *begin = t->tls_begin(); \ + *end = t->tls_end(); \ + } else { \ + *begin = *end = 0; \ + } #include "sanitizer_common/sanitizer_common_interceptors.inc" -#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(p, s) -#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(p, s) +// Syscall interceptors don't have contexts, we don't support suppressions +// for them. +#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s) +#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s) #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ do { \ (void)(p); \ @@ -163,56 +211,81 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *) } while (false) #include "sanitizer_common/sanitizer_common_syscalls.inc" +struct ThreadStartParam { + atomic_uintptr_t t; + atomic_uintptr_t is_registered; +}; + static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { - AsanThread *t = (AsanThread*)arg; + ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg); + AsanThread *t = nullptr; + while ((t = reinterpret_cast<AsanThread *>( + atomic_load(¶m->t, memory_order_acquire))) == nullptr) + internal_sched_yield(); SetCurrentThread(t); - return t->ThreadStart(GetTid()); + return t->ThreadStart(GetTid(), ¶m->is_registered); } #if ASAN_INTERCEPT_PTHREAD_CREATE INTERCEPTOR(int, pthread_create, void *thread, void *attr, void *(*start_routine)(void*), void *arg) { EnsureMainThreadIDIsCorrect(); - // Strict init-order checking in thread-hostile. + // Strict init-order checking is thread-hostile. if (flags()->strict_init_order) StopInitOrderChecking(); GET_STACK_TRACE_THREAD; int detached = 0; - if (attr != 0) + if (attr) REAL(pthread_attr_getdetachstate)(attr, &detached); + ThreadStartParam param; + atomic_store(¶m.t, 0, memory_order_relaxed); + atomic_store(¶m.is_registered, 0, memory_order_relaxed); + int result = REAL(pthread_create)(thread, attr, asan_thread_start, ¶m); + if (result == 0) { + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + atomic_store(¶m.t, reinterpret_cast<uptr>(t), memory_order_release); + // Wait until the AsanThread object is initialized and the ThreadRegistry + // entry is in "started" state. One reason for this is that after this + // interceptor exits, the child thread's stack may be the only thing holding + // the |arg| pointer. This may cause LSan to report a leak if leak checking + // happens at a point when the interceptor has already exited, but the stack + // range for the child thread is not yet known. + while (atomic_load(¶m.is_registered, memory_order_acquire) == 0) + internal_sched_yield(); + } + return result; +} - u32 current_tid = GetCurrentTidOrInvalid(); - AsanThread *t = AsanThread::Create(start_routine, arg); - CreateThreadContextArgs args = { t, &stack }; - asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args); - return REAL(pthread_create)(thread, attr, asan_thread_start, t); +INTERCEPTOR(int, pthread_join, void *t, void **arg) { + return real_pthread_join(t, arg); } + +DEFINE_REAL_PTHREAD_FUNCTIONS #endif // ASAN_INTERCEPT_PTHREAD_CREATE #if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION #if SANITIZER_ANDROID INTERCEPTOR(void*, bsd_signal, int signum, void *handler) { - if (!AsanInterceptsSignal(signum) || - common_flags()->allow_user_segv_handler) { + if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(bsd_signal)(signum, handler); } return 0; } -#else +#endif + INTERCEPTOR(void*, signal, int signum, void *handler) { - if (!AsanInterceptsSignal(signum) || - common_flags()->allow_user_segv_handler) { + if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(signal)(signum, handler); } - return 0; + return nullptr; } -#endif INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) { - if (!AsanInterceptsSignal(signum) || - common_flags()->allow_user_segv_handler) { + if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) { return REAL(sigaction)(signum, act, oldact); } return 0; @@ -220,10 +293,10 @@ INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act, namespace __sanitizer { int real_sigaction(int signum, const void *act, void *oldact) { - return REAL(sigaction)(signum, - (struct sigaction *)act, (struct sigaction *)oldact); + return REAL(sigaction)(signum, (const struct sigaction *)act, + (struct sigaction *)oldact); } -} // namespace __sanitizer +} // namespace __sanitizer #elif SANITIZER_POSIX // We need to have defined REAL(sigaction) on posix systems. @@ -239,7 +312,7 @@ static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) { ssize += stack - bottom; ssize = RoundUpTo(ssize, PageSize); static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb - if (ssize && ssize <= kMaxSaneContextStackSize) { + if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) { PoisonShadow(bottom, ssize, 0); } } @@ -294,113 +367,73 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) { } #endif -#if SANITIZER_WINDOWS -INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { - CHECK(REAL(RaiseException)); - __asan_handle_no_return(); - REAL(RaiseException)(a, b, c, d); -} - -INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { - CHECK(REAL(_except_handler3)); - __asan_handle_no_return(); - return REAL(_except_handler3)(a, b, c, d); -} - -#if ASAN_DYNAMIC -// This handler is named differently in -MT and -MD CRTs. -#define _except_handler4 _except_handler4_common -#endif -INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { - CHECK(REAL(_except_handler4)); - __asan_handle_no_return(); - return REAL(_except_handler4)(a, b, c, d); -} -#endif - -static inline int CharCmp(unsigned char c1, unsigned char c2) { - return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1; -} +// memcpy is called during __asan_init() from the internals of printf(...). +// We do not treat memcpy with to==from as a bug. +// See http://llvm.org/bugs/show_bug.cgi?id=11763. +#define ASAN_MEMCPY_IMPL(ctx, to, from, size) do { \ + if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \ + if (asan_init_is_running) { \ + return REAL(memcpy)(to, from, size); \ + } \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + if (to != from) { \ + CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \ + } \ + ASAN_READ_RANGE(ctx, from, size); \ + ASAN_WRITE_RANGE(ctx, to, size); \ + } \ + return REAL(memcpy)(to, from, size); \ + } while (0) -INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) { - if (UNLIKELY(!asan_inited)) return internal_memcmp(a1, a2, size); - ENSURE_ASAN_INITED(); - if (flags()->replace_intrin) { - if (flags()->strict_memcmp) { - // Check the entire regions even if the first bytes of the buffers are - // different. - ASAN_READ_RANGE(a1, size); - ASAN_READ_RANGE(a2, size); - // Fallthrough to REAL(memcmp) below. - } else { - unsigned char c1 = 0, c2 = 0; - const unsigned char *s1 = (const unsigned char*)a1; - const unsigned char *s2 = (const unsigned char*)a2; - uptr i; - for (i = 0; i < size; i++) { - c1 = s1[i]; - c2 = s2[i]; - if (c1 != c2) break; - } - ASAN_READ_RANGE(s1, Min(i + 1, size)); - ASAN_READ_RANGE(s2, Min(i + 1, size)); - return CharCmp(c1, c2); - } - } - return REAL(memcmp(a1, a2, size)); -} void *__asan_memcpy(void *to, const void *from, uptr size) { - if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); - // memcpy is called during __asan_init() from the internals - // of printf(...). - if (asan_init_is_running) { - return REAL(memcpy)(to, from, size); - } - ENSURE_ASAN_INITED(); - if (flags()->replace_intrin) { - if (to != from) { - // We do not treat memcpy with to==from as a bug. - // See http://llvm.org/bugs/show_bug.cgi?id=11763. - CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); - } - ASAN_READ_RANGE(from, size); - ASAN_WRITE_RANGE(to, size); - } - return REAL(memcpy)(to, from, size); + ASAN_MEMCPY_IMPL(nullptr, to, from, size); } +// memset is called inside Printf. +#define ASAN_MEMSET_IMPL(ctx, block, c, size) do { \ + if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \ + if (asan_init_is_running) { \ + return REAL(memset)(block, c, size); \ + } \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + ASAN_WRITE_RANGE(ctx, block, size); \ + } \ + return REAL(memset)(block, c, size); \ + } while (0) + void *__asan_memset(void *block, int c, uptr size) { - if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); - // memset is called inside Printf. - if (asan_init_is_running) { - return REAL(memset)(block, c, size); - } - ENSURE_ASAN_INITED(); - if (flags()->replace_intrin) { - ASAN_WRITE_RANGE(block, size); - } - return REAL(memset)(block, c, size); + ASAN_MEMSET_IMPL(nullptr, block, c, size); } +#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) do { \ + if (UNLIKELY(!asan_inited)) \ + return internal_memmove(to, from, size); \ + ENSURE_ASAN_INITED(); \ + if (flags()->replace_intrin) { \ + ASAN_READ_RANGE(ctx, from, size); \ + ASAN_WRITE_RANGE(ctx, to, size); \ + } \ + return internal_memmove(to, from, size); \ + } while (0) + void *__asan_memmove(void *to, const void *from, uptr size) { - if (UNLIKELY(!asan_inited)) - return internal_memmove(to, from, size); - ENSURE_ASAN_INITED(); - if (flags()->replace_intrin) { - ASAN_READ_RANGE(from, size); - ASAN_WRITE_RANGE(to, size); - } - return internal_memmove(to, from, size); + ASAN_MEMMOVE_IMPL(nullptr, to, from, size); } INTERCEPTOR(void*, memmove, void *to, const void *from, uptr size) { - return __asan_memmove(to, from, size); + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memmove); + ASAN_MEMMOVE_IMPL(ctx, to, from, size); } INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memcpy); #if !SANITIZER_MAC - return __asan_memcpy(to, from, size); + ASAN_MEMCPY_IMPL(ctx, to, from, size); #else // At least on 10.7 and 10.8 both memcpy() and memmove() are being replaced // with WRAP(memcpy). As a result, false positives are reported for memmove() @@ -408,15 +441,19 @@ INTERCEPTOR(void*, memcpy, void *to, const void *from, uptr size) { // ASAN_OPTIONS=replace_intrin=0, memmove() is still replaced with // internal_memcpy(), which may lead to crashes, see // http://llvm.org/bugs/show_bug.cgi?id=16362. - return __asan_memmove(to, from, size); + ASAN_MEMMOVE_IMPL(ctx, to, from, size); #endif // !SANITIZER_MAC } INTERCEPTOR(void*, memset, void *block, int c, uptr size) { - return __asan_memset(block, c, size); + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, memset); + ASAN_MEMSET_IMPL(ctx, block, c, size); } INTERCEPTOR(char*, strchr, const char *str, int c) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strchr); if (UNLIKELY(!asan_inited)) return internal_strchr(str, c); // strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is // used. @@ -426,8 +463,9 @@ INTERCEPTOR(char*, strchr, const char *str, int c) { ENSURE_ASAN_INITED(); char *result = REAL(strchr)(str, c); if (flags()->replace_str) { - uptr bytes_read = (result ? result - str : REAL(strlen)(str)) + 1; - ASAN_READ_RANGE(str, bytes_read); + uptr len = REAL(strlen)(str); + uptr bytes_read = (result ? result - str : len) + 1; + ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read); } return result; } @@ -449,13 +487,15 @@ DEFINE_REAL(char*, index, const char *string, int c) // For both strcat() and strncat() we need to check the validity of |to| // argument irrespective of the |from| length. INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_length = REAL(strlen)(from); - ASAN_READ_RANGE(from, from_length + 1); + ASAN_READ_RANGE(ctx, from, from_length + 1); uptr to_length = REAL(strlen)(to); - ASAN_READ_RANGE(to, to_length); - ASAN_WRITE_RANGE(to + to_length, from_length + 1); + ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); // If the copying actually happens, the |from| string should not overlap // with the resulting string starting at |to|, which has a length of // to_length + from_length + 1. @@ -468,14 +508,16 @@ INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT } INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncat); ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_length = MaybeRealStrnlen(from, size); uptr copy_length = Min(size, from_length + 1); - ASAN_READ_RANGE(from, copy_length); + ASAN_READ_RANGE(ctx, from, copy_length); uptr to_length = REAL(strlen)(to); - ASAN_READ_RANGE(to, to_length); - ASAN_WRITE_RANGE(to + to_length, from_length + 1); + ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length); + ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1); if (from_length > 0) { CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1, from, copy_length); @@ -485,6 +527,8 @@ INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) { } INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT #endif @@ -497,19 +541,21 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT if (flags()->replace_str) { uptr from_size = REAL(strlen)(from) + 1; CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size); - ASAN_READ_RANGE(from, from_size); - ASAN_WRITE_RANGE(to, from_size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, from_size); } return REAL(strcpy)(to, from); // NOLINT } #if ASAN_INTERCEPT_STRDUP INTERCEPTOR(char*, strdup, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strdup); if (UNLIKELY(!asan_inited)) return internal_strdup(s); ENSURE_ASAN_INITED(); uptr length = REAL(strlen)(s); if (flags()->replace_str) { - ASAN_READ_RANGE(s, length + 1); + ASAN_READ_RANGE(ctx, s, length + 1); } GET_STACK_TRACE_MALLOC; void *new_mem = asan_malloc(length + 1, &stack); @@ -519,6 +565,8 @@ INTERCEPTOR(char*, strdup, const char *s) { #endif INTERCEPTOR(SIZE_T, strlen, const char *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strlen); if (UNLIKELY(!asan_inited)) return internal_strlen(s); // strlen is called from malloc_default_purgeable_zone() // in __asan::ReplaceSystemAlloc() on Mac. @@ -528,78 +576,65 @@ INTERCEPTOR(SIZE_T, strlen, const char *s) { ENSURE_ASAN_INITED(); SIZE_T length = REAL(strlen)(s); if (flags()->replace_str) { - ASAN_READ_RANGE(s, length + 1); + ASAN_READ_RANGE(ctx, s, length + 1); } return length; } INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, wcslen); SIZE_T length = REAL(wcslen)(s); if (!asan_init_is_running) { ENSURE_ASAN_INITED(); - ASAN_READ_RANGE(s, (length + 1) * sizeof(wchar_t)); + ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t)); } return length; } INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strncpy); ENSURE_ASAN_INITED(); if (flags()->replace_str) { uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1); CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size); - ASAN_READ_RANGE(from, from_size); - ASAN_WRITE_RANGE(to, size); + ASAN_READ_RANGE(ctx, from, from_size); + ASAN_WRITE_RANGE(ctx, to, size); } return REAL(strncpy)(to, from, size); } #if ASAN_INTERCEPT_STRNLEN INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strnlen); ENSURE_ASAN_INITED(); uptr length = REAL(strnlen)(s, maxlen); if (flags()->replace_str) { - ASAN_READ_RANGE(s, Min(length + 1, maxlen)); + ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen)); } return length; } #endif // ASAN_INTERCEPT_STRNLEN -static inline bool IsValidStrtolBase(int base) { - return (base == 0) || (2 <= base && base <= 36); -} - -static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) { - CHECK(endptr); - if (nptr == *endptr) { - // No digits were found at strtol call, we need to find out the last - // symbol accessed by strtoll on our own. - // We get this symbol by skipping leading blanks and optional +/- sign. - while (IsSpace(*nptr)) nptr++; - if (*nptr == '+' || *nptr == '-') nptr++; - *endptr = (char*)nptr; - } - CHECK(*endptr >= nptr); -} - INTERCEPTOR(long, strtol, const char *nptr, // NOLINT char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtol); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(strtol)(nptr, endptr, base); } char *real_endptr; long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT - if (endptr != 0) { - *endptr = real_endptr; - } - if (IsValidStrtolBase(base)) { - FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); - } + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return result; } INTERCEPTOR(int, atoi, const char *nptr) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoi); #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr); #endif @@ -614,11 +649,13 @@ INTERCEPTOR(int, atoi, const char *nptr) { // different from int). So, we just imitate this behavior. int result = REAL(strtol)(nptr, &real_endptr, 10); FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } INTERCEPTOR(long, atol, const char *nptr) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atol); #if SANITIZER_MAC if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr); #endif @@ -629,33 +666,28 @@ INTERCEPTOR(long, atol, const char *nptr) { // NOLINT char *real_endptr; long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT char **endptr, int base) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, strtoll); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(strtoll)(nptr, endptr, base); } char *real_endptr; long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT - if (endptr != 0) { - *endptr = real_endptr; - } - // If base has unsupported value, strtoll can exit with EINVAL - // without reading any characters. So do additional checks only - // if base is valid. - if (IsValidStrtolBase(base)) { - FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); - } + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return result; } INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoll); ENSURE_ASAN_INITED(); if (!flags()->replace_str) { return REAL(atoll)(nptr); @@ -663,7 +695,7 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT char *real_endptr; long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT FixRealStrtolEndptr(nptr, &real_endptr); - ASAN_READ_RANGE(nptr, (real_endptr - nptr) + 1); + ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1); return result; } #endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL @@ -681,7 +713,7 @@ INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg, #endif ENSURE_ASAN_INITED(); int res = REAL(__cxa_atexit)(func, arg, dso_handle); - REAL(__cxa_atexit)(AtCxaAtexit, 0, 0); + REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr); return res; } #endif // ASAN_INTERCEPT___CXA_ATEXIT @@ -696,35 +728,6 @@ INTERCEPTOR(int, fork, void) { } #endif // ASAN_INTERCEPT_FORK -#if SANITIZER_WINDOWS -INTERCEPTOR_WINAPI(DWORD, CreateThread, - void* security, uptr stack_size, - DWORD (__stdcall *start_routine)(void*), void* arg, - DWORD thr_flags, void* tid) { - // Strict init-order checking in thread-hostile. - if (flags()->strict_init_order) - StopInitOrderChecking(); - GET_STACK_TRACE_THREAD; - u32 current_tid = GetCurrentTidOrInvalid(); - AsanThread *t = AsanThread::Create(start_routine, arg); - CreateThreadContextArgs args = { t, &stack }; - bool detached = false; // FIXME: how can we determine it on Windows? - asanThreadRegistry().CreateThread(*(uptr*)t, detached, current_tid, &args); - return REAL(CreateThread)(security, stack_size, - asan_thread_start, t, thr_flags, tid); -} - -namespace __asan { -void InitializeWindowsInterceptors() { - ASAN_INTERCEPT_FUNC(CreateThread); - ASAN_INTERCEPT_FUNC(RaiseException); - ASAN_INTERCEPT_FUNC(_except_handler3); - ASAN_INTERCEPT_FUNC(_except_handler4); -} - -} // namespace __asan -#endif - // ---------------------- InitializeAsanInterceptors ---------------- {{{1 namespace __asan { void InitializeAsanInterceptors() { @@ -734,7 +737,6 @@ void InitializeAsanInterceptors() { InitializeCommonInterceptors(); // Intercept mem* functions. - ASAN_INTERCEPT_FUNC(memcmp); ASAN_INTERCEPT_FUNC(memmove); ASAN_INTERCEPT_FUNC(memset); if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { @@ -773,9 +775,8 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(sigaction); #if SANITIZER_ANDROID ASAN_INTERCEPT_FUNC(bsd_signal); -#else - ASAN_INTERCEPT_FUNC(signal); #endif + ASAN_INTERCEPT_FUNC(signal); #endif #if ASAN_INTERCEPT_SWAPCONTEXT ASAN_INTERCEPT_FUNC(swapcontext); @@ -794,8 +795,13 @@ void InitializeAsanInterceptors() { // Intercept threading-related functions #if ASAN_INTERCEPT_PTHREAD_CREATE +#if defined(ASAN_PTHREAD_CREATE_VERSION) + ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION); +#else ASAN_INTERCEPT_FUNC(pthread_create); #endif + ASAN_INTERCEPT_FUNC(pthread_join); +#endif // Intercept atexit function. #if ASAN_INTERCEPT___CXA_ATEXIT @@ -806,12 +812,9 @@ void InitializeAsanInterceptors() { ASAN_INTERCEPT_FUNC(fork); #endif - // Some Windows-specific interceptors. -#if SANITIZER_WINDOWS - InitializeWindowsInterceptors(); -#endif + InitializePlatformInterceptors(); VReport(1, "AddressSanitizer: libc interceptors initialized\n"); } -} // namespace __asan +} // namespace __asan diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h index 95a75db4e02..46c74176ed3 100644 --- a/libsanitizer/asan/asan_interceptors.h +++ b/libsanitizer/asan/asan_interceptors.h @@ -13,7 +13,7 @@ #define ASAN_INTERCEPTORS_H #include "asan_internal.h" -#include "sanitizer_common/sanitizer_interception.h" +#include "interception/interception.h" #include "sanitizer_common/sanitizer_platform_interceptors.h" // Use macro to describe if specific function should be @@ -90,9 +90,27 @@ struct sigaction; DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act, struct sigaction *oldact) +#if !SANITIZER_MAC +#define ASAN_INTERCEPT_FUNC(name) \ + do { \ + if ((!INTERCEPT_FUNCTION(name) || !REAL(name))) \ + VReport(1, "AddressSanitizer: failed to intercept '" #name "'\n"); \ + } while (0) +#define ASAN_INTERCEPT_FUNC_VER(name, ver) \ + do { \ + if ((!INTERCEPT_FUNCTION_VER(name, ver) || !REAL(name))) \ + VReport( \ + 1, "AddressSanitizer: failed to intercept '" #name "@@" #ver "'\n"); \ + } while (0) +#else +// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION. +#define ASAN_INTERCEPT_FUNC(name) +#endif // SANITIZER_MAC + namespace __asan { void InitializeAsanInterceptors(); +void InitializePlatformInterceptors(); #define ENSURE_ASAN_INITED() do { \ CHECK(!asan_init_is_running); \ diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h index 939327eb8a4..b512bf8bd7a 100644 --- a/libsanitizer/asan/asan_interface_internal.h +++ b/libsanitizer/asan/asan_interface_internal.h @@ -7,8 +7,11 @@ // // This file is a part of AddressSanitizer, an address sanity checker. // -// This header can be included by the instrumented program to fetch -// data (mostly allocator statistics) from ASan runtime library. +// This header declares the AddressSanitizer runtime interface functions. +// The runtime library has to define these functions so the instrumented program +// could call them. +// +// See also include/sanitizer/asan_interface.h //===----------------------------------------------------------------------===// #ifndef ASAN_INTERFACE_INTERNAL_H #define ASAN_INTERFACE_INTERNAL_H @@ -22,10 +25,14 @@ using __sanitizer::uptr; extern "C" { // This function should be called at the very beginning of the process, // before any instrumented code is executed and before any call to malloc. - // Please note that __asan_init is a macro that is replaced with - // __asan_init_vXXX at compile-time. SANITIZER_INTERFACE_ATTRIBUTE void __asan_init(); + // This function exists purely to get a linker/loader error when using + // incompatible versions of instrumentation and runtime library. Please note + // that __asan_version_mismatch_check is a macro that is replaced with + // __asan_version_mismatch_check_vXXX at compile-time. + SANITIZER_INTERFACE_ATTRIBUTE void __asan_version_mismatch_check(); + // This structure is used to describe the source location of a place where // global was defined. struct __asan_global_source_location { @@ -123,11 +130,9 @@ extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __asan_report_error(uptr pc, uptr bp, uptr sp, - uptr addr, int is_write, uptr access_size); + uptr addr, int is_write, uptr access_size, u32 exp); SANITIZER_INTERFACE_ATTRIBUTE - int __asan_set_error_exit_code(int exit_code); - SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_death_callback(void (*callback)(void)); SANITIZER_INTERFACE_ATTRIBUTE void __asan_set_error_report_callback(void (*callback)(const char*)); @@ -160,6 +165,21 @@ extern "C" { SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load1(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load2(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load4(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load8(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load16(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store1(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store2(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store4(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store8(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store16(uptr p, u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr p, uptr size, + u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr p, uptr size, + u32 exp); + SANITIZER_INTERFACE_ATTRIBUTE void* __asan_memcpy(void *dst, const void *src, uptr size); SANITIZER_INTERFACE_ATTRIBUTE @@ -175,6 +195,10 @@ extern "C" { void __asan_poison_intra_object_redzone(uptr p, uptr size); SANITIZER_INTERFACE_ATTRIBUTE void __asan_unpoison_intra_object_redzone(uptr p, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_alloca_poison(uptr addr, uptr size); + SANITIZER_INTERFACE_ATTRIBUTE + void __asan_allocas_unpoison(uptr top, uptr bottom); } // extern "C" #endif // ASAN_INTERFACE_INTERNAL_H diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h index 8911575d84a..e31f2648b1a 100644 --- a/libsanitizer/asan/asan_internal.h +++ b/libsanitizer/asan/asan_internal.h @@ -19,8 +19,6 @@ #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_libc.h" -#define ASAN_DEFAULT_FAILURE_EXITCODE 1 - #if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__) # error "The AddressSanitizer run-time should not be" " instrumented by AddressSanitizer" @@ -73,13 +71,11 @@ void *AsanDoesNotSupportStaticLinkage(); void AsanCheckDynamicRTPrereqs(); void AsanCheckIncompatibleRT(); -void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp); -void AsanOnSIGSEGV(int, void *siginfo, void *context); +void AsanOnDeadlySignal(int, void *siginfo, void *context); +void DisableReexec(); void MaybeReexec(); -bool AsanInterceptsSignal(int signum); void ReadContextStack(void *context, uptr *stack, uptr *ssize); -void AsanPlatformThreadInit(); void StopInitOrderChecking(); // Wrapper for TLS/TSD. @@ -90,10 +86,10 @@ void PlatformTSDDtor(void *tsd); void AppendToErrorMessageBuffer(const char *buffer); -void ParseExtraActivationFlags(); - void *AsanDlSymNext(const char *sym); +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name); + // Platform-specific options. #if SANITIZER_MAC bool PlatformHasDifferentMemcpyAndMemmove(); @@ -134,6 +130,8 @@ const int kAsanGlobalRedzoneMagic = 0xf9; const int kAsanInternalHeapMagic = 0xfe; const int kAsanArrayCookieMagic = 0xac; const int kAsanIntraObjectRedzone = 0xbb; +const int kAsanAllocaLeftMagic = 0xca; +const int kAsanAllocaRightMagic = 0xcb; static const uptr kCurrentStackFrameMagic = 0x41B58AB3; static const uptr kRetiredStackFrameMagic = 0x45E0360E; diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc index c504168b614..4e47d5a0496 100644 --- a/libsanitizer/asan/asan_linux.cc +++ b/libsanitizer/asan/asan_linux.cc @@ -66,6 +66,12 @@ asan_rt_version_t __asan_rt_version; namespace __asan { +void InitializePlatformInterceptors() {} + +void DisableReexec() { + // No need to re-exec on Linux. +} + void MaybeReexec() { // No need to re-exec on Linux. } @@ -105,8 +111,11 @@ static void ReportIncompatibleRT() { } void AsanCheckDynamicRTPrereqs() { + if (!ASAN_DYNAMIC) + return; + // Ensure that dynamic RT is the first DSO in the list - const char *first_dso_name = 0; + const char *first_dso_name = nullptr; dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name); if (first_dso_name && !IsDynamicRTName(first_dso_name)) { Report("ASan runtime does not come first in initial library list; " @@ -131,7 +140,8 @@ void AsanCheckIncompatibleRT() { // system libraries, causing crashes later in ASan initialization. MemoryMappingLayout proc_maps(/*cache_enabled*/true); char filename[128]; - while (proc_maps.Next(0, 0, 0, filename, sizeof(filename), 0)) { + while (proc_maps.Next(nullptr, nullptr, nullptr, filename, + sizeof(filename), nullptr)) { if (IsDynamicRTName(filename)) { Report("Your application is linked against " "incompatible ASan runtimes.\n"); @@ -144,87 +154,7 @@ void AsanCheckIncompatibleRT() { } } } -#endif // SANITIZER_ANDROID - -void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { -#if defined(__arm__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.arm_pc; - *bp = ucontext->uc_mcontext.arm_fp; - *sp = ucontext->uc_mcontext.arm_sp; -#elif defined(__aarch64__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.pc; - *bp = ucontext->uc_mcontext.regs[29]; - *sp = ucontext->uc_mcontext.sp; -#elif defined(__hppa__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.sc_iaoq[0]; - /* GCC uses %r3 whenever a frame pointer is needed. */ - *bp = ucontext->uc_mcontext.sc_gr[3]; - *sp = ucontext->uc_mcontext.sc_gr[30]; -#elif defined(__x86_64__) -# if SANITIZER_FREEBSD - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.mc_rip; - *bp = ucontext->uc_mcontext.mc_rbp; - *sp = ucontext->uc_mcontext.mc_rsp; -# else - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.gregs[REG_RIP]; - *bp = ucontext->uc_mcontext.gregs[REG_RBP]; - *sp = ucontext->uc_mcontext.gregs[REG_RSP]; -# endif -#elif defined(__i386__) -# if SANITIZER_FREEBSD - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.mc_eip; - *bp = ucontext->uc_mcontext.mc_ebp; - *sp = ucontext->uc_mcontext.mc_esp; -# else - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.gregs[REG_EIP]; - *bp = ucontext->uc_mcontext.gregs[REG_EBP]; - *sp = ucontext->uc_mcontext.gregs[REG_ESP]; -# endif -#elif defined(__powerpc__) || defined(__powerpc64__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.regs->nip; - *sp = ucontext->uc_mcontext.regs->gpr[PT_R1]; - // The powerpc{,64}-linux ABIs do not specify r31 as the frame - // pointer, but GCC always uses r31 when we need a frame pointer. - *bp = ucontext->uc_mcontext.regs->gpr[PT_R31]; -#elif defined(__sparc__) - ucontext_t *ucontext = (ucontext_t*)context; - uptr *stk_ptr; -# if defined (__arch64__) - *pc = ucontext->uc_mcontext.mc_gregs[MC_PC]; - *sp = ucontext->uc_mcontext.mc_gregs[MC_O6]; - stk_ptr = (uptr *) (*sp + 2047); - *bp = stk_ptr[15]; -# else - *pc = ucontext->uc_mcontext.gregs[REG_PC]; - *sp = ucontext->uc_mcontext.gregs[REG_O6]; - stk_ptr = (uptr *) *sp; - *bp = stk_ptr[15]; -# endif -#elif defined(__mips__) - ucontext_t *ucontext = (ucontext_t*)context; - *pc = ucontext->uc_mcontext.gregs[31]; - *bp = ucontext->uc_mcontext.gregs[30]; - *sp = ucontext->uc_mcontext.gregs[29]; -#else -# error "Unsupported arch" -#endif -} - -bool AsanInterceptsSignal(int signum) { - return signum == SIGSEGV && common_flags()->handle_segv; -} - -void AsanPlatformThreadInit() { - // Nothing here for now. -} +#endif // SANITIZER_ANDROID #if !SANITIZER_ANDROID void ReadContextStack(void *context, uptr *stack, uptr *ssize) { @@ -242,6 +172,6 @@ void *AsanDlSymNext(const char *sym) { return dlsym(RTLD_NEXT, sym); } -} // namespace __asan +} // namespace __asan -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/libsanitizer/asan/asan_mac.cc b/libsanitizer/asan/asan_mac.cc index 70823bdef92..036d7ff65f1 100644 --- a/libsanitizer/asan/asan_mac.cc +++ b/libsanitizer/asan/asan_mac.cc @@ -22,7 +22,14 @@ #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_mac.h" -#include <crt_externs.h> // for _NSGetArgv +#if !SANITIZER_IOS +#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron +#else +extern "C" { + extern char ***_NSGetArgv(void); +} +#endif + #include <dlfcn.h> // for dladdr() #include <mach-o/dyld.h> #include <mach-o/loader.h> @@ -38,19 +45,7 @@ namespace __asan { -void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { - ucontext_t *ucontext = (ucontext_t*)context; -# if SANITIZER_WORDSIZE == 64 - *pc = ucontext->uc_mcontext->__ss.__rip; - *bp = ucontext->uc_mcontext->__ss.__rbp; - *sp = ucontext->uc_mcontext->__ss.__rsp; -# else - *pc = ucontext->uc_mcontext->__ss.__eip; - *bp = ucontext->uc_mcontext->__ss.__ebp; - *sp = ucontext->uc_mcontext->__ss.__esp; -# endif // SANITIZER_WORDSIZE -} - +void InitializePlatformInterceptors() {} bool PlatformHasDifferentMemcpyAndMemmove() { // On OS X 10.7 memcpy() and memmove() are both resolved @@ -72,35 +67,51 @@ LowLevelAllocator allocator_for_env; // otherwise the corresponding "NAME=value" string is replaced with // |name_value|. void LeakyResetEnv(const char *name, const char *name_value) { - char ***env_ptr = _NSGetEnviron(); - CHECK(env_ptr); - char **environ = *env_ptr; - CHECK(environ); + char **env = GetEnviron(); uptr name_len = internal_strlen(name); - while (*environ != 0) { - uptr len = internal_strlen(*environ); + while (*env != 0) { + uptr len = internal_strlen(*env); if (len > name_len) { - const char *p = *environ; + const char *p = *env; if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { // Match. if (name_value) { // Replace the old value with the new one. - *environ = const_cast<char*>(name_value); + *env = const_cast<char*>(name_value); } else { // Shift the subsequent pointers back. - char **del = environ; + char **del = env; do { del[0] = del[1]; } while (*del++); } } } - environ++; + env++; } } +static bool reexec_disabled = false; + +void DisableReexec() { + reexec_disabled = true; +} + +extern "C" double dyldVersionNumber; +static const double kMinDyldVersionWithAutoInterposition = 360.0; + +bool DyldNeedsEnvVariable() { + // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if + // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via + // GetMacosVersion() doesn't work for the simulator. Let's instead check + // `dyldVersionNumber`, which is exported by dyld, against a known version + // number from the first OS release where this appeared. + return dyldVersionNumber < kMinDyldVersionWithAutoInterposition; +} + void MaybeReexec() { - if (!flags()->allow_reexec) return; + if (reexec_disabled) return; + // Make sure the dynamic ASan runtime library is preloaded so that the // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec // ourselves. @@ -111,8 +122,12 @@ void MaybeReexec() { uptr old_env_len = dyld_insert_libraries ? internal_strlen(dyld_insert_libraries) : 0; uptr fname_len = internal_strlen(info.dli_fname); - if (!dyld_insert_libraries || - !REAL(strstr)(dyld_insert_libraries, info.dli_fname)) { + const char *dylib_name = StripModuleName(info.dli_fname); + uptr dylib_name_len = internal_strlen(dylib_name); + + bool lib_is_in_env = + dyld_insert_libraries && REAL(strstr)(dyld_insert_libraries, dylib_name); + if (DyldNeedsEnvVariable() && !lib_is_in_env) { // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime // library. char program_name[1024]; @@ -138,58 +153,77 @@ void MaybeReexec() { VReport(1, "exec()-ing the program with\n"); VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env); VReport(1, "to enable ASan wrappers.\n"); - VReport(1, "Set ASAN_OPTIONS=allow_reexec=0 to disable this.\n"); execv(program_name, *_NSGetArgv()); - } else { - // DYLD_INSERT_LIBRARIES is set and contains the runtime library. - if (old_env_len == fname_len) { - // It's just the runtime library name - fine to unset the variable. - LeakyResetEnv(kDyldInsertLibraries, NULL); + + // We get here only if execv() failed. + Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, " + "which is required for ASan to work. ASan tried to set the " + "environment variable and re-execute itself, but execv() failed, " + "possibly because of sandbox restrictions. Make sure to launch the " + "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env); + CHECK("execv failed" && 0); + } + + if (!lib_is_in_env) + return; + + // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove + // the dylib from the environment variable, because interceptors are installed + // and we don't want our children to inherit the variable. + + uptr env_name_len = internal_strlen(kDyldInsertLibraries); + // Allocate memory to hold the previous env var name, its value, the '=' + // sign and the '\0' char. + char *new_env = (char*)allocator_for_env.Allocate( + old_env_len + 2 + env_name_len); + CHECK(new_env); + internal_memset(new_env, '\0', old_env_len + 2 + env_name_len); + internal_strncpy(new_env, kDyldInsertLibraries, env_name_len); + new_env[env_name_len] = '='; + char *new_env_pos = new_env + env_name_len + 1; + + // Iterate over colon-separated pieces of |dyld_insert_libraries|. + char *piece_start = dyld_insert_libraries; + char *piece_end = NULL; + char *old_env_end = dyld_insert_libraries + old_env_len; + do { + if (piece_start[0] == ':') piece_start++; + piece_end = REAL(strchr)(piece_start, ':'); + if (!piece_end) piece_end = dyld_insert_libraries + old_env_len; + if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break; + uptr piece_len = piece_end - piece_start; + + char *filename_start = + (char *)internal_memrchr(piece_start, '/', piece_len); + uptr filename_len = piece_len; + if (filename_start) { + filename_start += 1; + filename_len = piece_len - (filename_start - piece_start); } else { - uptr env_name_len = internal_strlen(kDyldInsertLibraries); - // Allocate memory to hold the previous env var name, its value, the '=' - // sign and the '\0' char. - char *new_env = (char*)allocator_for_env.Allocate( - old_env_len + 2 + env_name_len); - CHECK(new_env); - internal_memset(new_env, '\0', old_env_len + 2 + env_name_len); - internal_strncpy(new_env, kDyldInsertLibraries, env_name_len); - new_env[env_name_len] = '='; - char *new_env_pos = new_env + env_name_len + 1; - - // Iterate over colon-separated pieces of |dyld_insert_libraries|. - char *piece_start = dyld_insert_libraries; - char *piece_end = NULL; - char *old_env_end = dyld_insert_libraries + old_env_len; - do { - if (piece_start[0] == ':') piece_start++; - piece_end = REAL(strchr)(piece_start, ':'); - if (!piece_end) piece_end = dyld_insert_libraries + old_env_len; - if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break; - uptr piece_len = piece_end - piece_start; - - // If the current piece isn't the runtime library name, - // append it to new_env. - if ((piece_len != fname_len) || - (internal_strncmp(piece_start, info.dli_fname, fname_len) != 0)) { - if (new_env_pos != new_env + env_name_len + 1) { - new_env_pos[0] = ':'; - new_env_pos++; - } - internal_strncpy(new_env_pos, piece_start, piece_len); - } - // Move on to the next piece. - new_env_pos += piece_len; - piece_start = piece_end; - } while (piece_start < old_env_end); - - // Can't use setenv() here, because it requires the allocator to be - // initialized. - // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in - // a separate function called after InitializeAllocator(). - LeakyResetEnv(kDyldInsertLibraries, new_env); + filename_start = piece_start; } - } + + // If the current piece isn't the runtime library name, + // append it to new_env. + if ((dylib_name_len != filename_len) || + (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) { + if (new_env_pos != new_env + env_name_len + 1) { + new_env_pos[0] = ':'; + new_env_pos++; + } + internal_strncpy(new_env_pos, piece_start, piece_len); + new_env_pos += piece_len; + } + // Move on to the next piece. + piece_start = piece_end; + } while (piece_start < old_env_end); + + // Can't use setenv() here, because it requires the allocator to be + // initialized. + // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in + // a separate function called after InitializeAllocator(). + if (new_env_pos == new_env + env_name_len + 1) new_env = NULL; + LeakyResetEnv(kDyldInsertLibraries, new_env); } // No-op. Mac does not support static linkage anyway. @@ -203,14 +237,6 @@ void AsanCheckDynamicRTPrereqs() {} // No-op. Mac does not support static linkage anyway. void AsanCheckIncompatibleRT() {} -bool AsanInterceptsSignal(int signum) { - return (signum == SIGSEGV || signum == SIGBUS) && - common_flags()->handle_segv; -} - -void AsanPlatformThreadInit() { -} - void ReadContextStack(void *context, uptr *stack, uptr *ssize) { UNIMPLEMENTED(); } @@ -262,9 +288,8 @@ ALWAYS_INLINE void asan_register_worker_thread(int parent_tid, StackTrace *stack) { AsanThread *t = GetCurrentThread(); if (!t) { - t = AsanThread::Create(0, 0); - CreateThreadContextArgs args = { t, stack }; - asanThreadRegistry().CreateThread(*(uptr*)t, true, parent_tid, &args); + t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr, + parent_tid, stack, /* detached */ true); t->Init(); asanThreadRegistry().StartThread(t->tid(), 0, 0); SetCurrentThread(t); @@ -311,7 +336,7 @@ asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func, dispatch_function_t func) { \ GET_STACK_TRACE_THREAD; \ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \ - if (common_flags()->verbosity >= 2) { \ + if (Verbosity() >= 2) { \ Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \ asan_ctxt, pthread_self()); \ PRINT_CURRENT_STACK(); \ @@ -329,7 +354,7 @@ INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_function_t func) { GET_STACK_TRACE_THREAD; asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); - if (common_flags()->verbosity >= 2) { + if (Verbosity() >= 2) { Report("dispatch_after_f: %p\n", asan_ctxt); PRINT_CURRENT_STACK(); } @@ -342,7 +367,7 @@ INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group, dispatch_function_t func) { GET_STACK_TRACE_THREAD; asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); - if (common_flags()->verbosity >= 2) { + if (Verbosity() >= 2) { Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n", asan_ctxt, pthread_self()); PRINT_CURRENT_STACK(); @@ -372,13 +397,6 @@ void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void)); work(); \ } -// Forces the compiler to generate a frame pointer in the function. -#define ENABLE_FRAME_POINTER \ - do { \ - volatile uptr enable_fp; \ - enable_fp = GET_CURRENT_FRAME(); \ - } while (0) - INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void(^work)(void)) { ENABLE_FRAME_POINTER; @@ -402,6 +420,10 @@ INTERCEPTOR(void, dispatch_after, INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds, void(^work)(void)) { + if (!work) { + REAL(dispatch_source_set_cancel_handler)(ds, work); + return; + } ENABLE_FRAME_POINTER; GET_ASAN_BLOCK(work); REAL(dispatch_source_set_cancel_handler)(ds, asan_block); diff --git a/libsanitizer/asan/asan_malloc_mac.cc b/libsanitizer/asan/asan_malloc_mac.cc index 6a93ce1e808..fbe05490ce7 100644 --- a/libsanitizer/asan/asan_malloc_mac.cc +++ b/libsanitizer/asan/asan_malloc_mac.cc @@ -88,9 +88,9 @@ INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) { ENSURE_ASAN_INITED(); // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes. size_t buflen = 6 + (name ? internal_strlen(name) : 0); - InternalScopedBuffer<char> new_name(buflen); + InternalScopedString new_name(buflen); if (name && zone->introspect == asan_zone.introspect) { - internal_snprintf(new_name.data(), buflen, "asan-%s", name); + new_name.append("asan-%s", name); name = new_name.data(); } @@ -150,13 +150,17 @@ INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) { namespace { -// TODO(glider): the mz_* functions should be united with the Linux wrappers, -// as they are basically copied from there. -size_t mz_size(malloc_zone_t* zone, const void* ptr) { +// TODO(glider): the __asan_mz_* functions should be united with the Linux +// wrappers, as they are basically copied from there. +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +size_t __asan_mz_size(malloc_zone_t* zone, const void* ptr) { return asan_mz_size(ptr); } -void *mz_malloc(malloc_zone_t *zone, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_malloc(malloc_zone_t *zone, uptr size) { if (UNLIKELY(!asan_inited)) { CHECK(system_malloc_zone); return malloc_zone_malloc(system_malloc_zone, size); @@ -165,7 +169,9 @@ void *mz_malloc(malloc_zone_t *zone, size_t size) { return asan_malloc(size, &stack); } -void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { if (UNLIKELY(!asan_inited)) { // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. const size_t kCallocPoolSize = 1024; @@ -181,7 +187,9 @@ void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { return asan_calloc(nmemb, size, &stack); } -void *mz_valloc(malloc_zone_t *zone, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_valloc(malloc_zone_t *zone, size_t size) { if (UNLIKELY(!asan_inited)) { CHECK(system_malloc_zone); return malloc_zone_valloc(system_malloc_zone, size); @@ -208,11 +216,15 @@ void ALWAYS_INLINE free_common(void *context, void *ptr) { } // TODO(glider): the allocation callbacks need to be refactored. -void mz_free(malloc_zone_t *zone, void *ptr) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_mz_free(malloc_zone_t *zone, void *ptr) { free_common(zone, ptr); } -void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { if (!ptr) { GET_STACK_TRACE_MALLOC; return asan_malloc(size, &stack); @@ -231,15 +243,16 @@ void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { } } -void mz_destroy(malloc_zone_t* zone) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void __asan_mz_destroy(malloc_zone_t* zone) { // A no-op -- we will not be destroyed! - Report("mz_destroy() called -- ignoring\n"); + Report("__asan_mz_destroy() called -- ignoring\n"); } - // from AvailabilityMacros.h -#if defined(MAC_OS_X_VERSION_10_6) && \ - MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 -void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { +extern "C" +SANITIZER_INTERFACE_ATTRIBUTE +void *__asan_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { if (UNLIKELY(!asan_inited)) { CHECK(system_malloc_zone); return malloc_zone_memalign(system_malloc_zone, align, size); @@ -250,12 +263,12 @@ void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { // This function is currently unused, and we build with -Werror. #if 0 -void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) { +void __asan_mz_free_definite_size( + malloc_zone_t* zone, void *ptr, size_t size) { // TODO(glider): check that |size| is valid. UNIMPLEMENTED(); } #endif -#endif kern_return_t mi_enumerator(task_t task, void *, unsigned type_mask, vm_address_t zone_address, @@ -297,13 +310,10 @@ void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); } -#if defined(MAC_OS_X_VERSION_10_6) && \ - MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 boolean_t mi_zone_locked(malloc_zone_t *zone) { // UNIMPLEMENTED(); return false; } -#endif } // unnamed namespace @@ -322,32 +332,25 @@ void ReplaceSystemMalloc() { asan_introspection.force_lock = &mi_force_lock; asan_introspection.force_unlock = &mi_force_unlock; asan_introspection.statistics = &mi_statistics; + asan_introspection.zone_locked = &mi_zone_locked; internal_memset(&asan_zone, 0, sizeof(malloc_zone_t)); - // Start with a version 4 zone which is used for OS X 10.4 and 10.5. - asan_zone.version = 4; + // Use version 6 for OSX >= 10.6. + asan_zone.version = 6; asan_zone.zone_name = "asan"; - asan_zone.size = &mz_size; - asan_zone.malloc = &mz_malloc; - asan_zone.calloc = &mz_calloc; - asan_zone.valloc = &mz_valloc; - asan_zone.free = &mz_free; - asan_zone.realloc = &mz_realloc; - asan_zone.destroy = &mz_destroy; + asan_zone.size = &__asan_mz_size; + asan_zone.malloc = &__asan_mz_malloc; + asan_zone.calloc = &__asan_mz_calloc; + asan_zone.valloc = &__asan_mz_valloc; + asan_zone.free = &__asan_mz_free; + asan_zone.realloc = &__asan_mz_realloc; + asan_zone.destroy = &__asan_mz_destroy; asan_zone.batch_malloc = 0; asan_zone.batch_free = 0; - asan_zone.introspect = &asan_introspection; - - // from AvailabilityMacros.h -#if defined(MAC_OS_X_VERSION_10_6) && \ - MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 - // Switch to version 6 on OSX 10.6 to support memalign. - asan_zone.version = 6; asan_zone.free_definite_size = 0; - asan_zone.memalign = &mz_memalign; - asan_introspection.zone_locked = &mi_zone_locked; -#endif + asan_zone.memalign = &__asan_mz_memalign; + asan_zone.introspect = &asan_introspection; // Register the ASan zone. malloc_zone_register(&asan_zone); diff --git a/libsanitizer/asan/asan_malloc_win.cc b/libsanitizer/asan/asan_malloc_win.cc index bbcf80e96ba..f2ab188bf5b 100644 --- a/libsanitizer/asan/asan_malloc_win.cc +++ b/libsanitizer/asan/asan_malloc_win.cc @@ -17,7 +17,7 @@ #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_stack.h" -#include "sanitizer_common/sanitizer_interception.h" +#include "interception/interception.h" #include <stddef.h> diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h index 907704d7fc9..b158bd2979d 100644 --- a/libsanitizer/asan/asan_mapping.h +++ b/libsanitizer/asan/asan_mapping.h @@ -57,13 +57,34 @@ // || `[0x20000000, 0x23ffffff]` || LowShadow || // || `[0x00000000, 0x1fffffff]` || LowMem || // -// Default Linux/MIPS mapping: +// Default Linux/MIPS32 mapping: // || `[0x2aaa0000, 0xffffffff]` || HighMem || // || `[0x0fff4000, 0x2aa9ffff]` || HighShadow || // || `[0x0bff4000, 0x0fff3fff]` || ShadowGap || // || `[0x0aaa0000, 0x0bff3fff]` || LowShadow || // || `[0x00000000, 0x0aa9ffff]` || LowMem || // +// Default Linux/MIPS64 mapping: +// || `[0x4000000000, 0xffffffffff]` || HighMem || +// || `[0x2800000000, 0x3fffffffff]` || HighShadow || +// || `[0x2400000000, 0x27ffffffff]` || ShadowGap || +// || `[0x2000000000, 0x23ffffffff]` || LowShadow || +// || `[0x0000000000, 0x1fffffffff]` || LowMem || +// +// Default Linux/AArch64 (39-bit VMA) mapping: +// || `[0x2000000000, 0x7fffffffff]` || highmem || +// || `[0x1400000000, 0x1fffffffff]` || highshadow || +// || `[0x1200000000, 0x13ffffffff]` || shadowgap || +// || `[0x1000000000, 0x11ffffffff]` || lowshadow || +// || `[0x0000000000, 0x0fffffffff]` || lowmem || +// +// Default Linux/AArch64 (42-bit VMA) mapping: +// || `[0x10000000000, 0x3ffffffffff]` || highmem || +// || `[0x0a000000000, 0x0ffffffffff]` || highshadow || +// || `[0x09000000000, 0x09fffffffff]` || shadowgap || +// || `[0x08000000000, 0x08fffffffff]` || lowshadow || +// || `[0x00000000000, 0x07fffffffff]` || lowmem || +// // Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000: // || `[0x500000000000, 0x7fffffffffff]` || HighMem || // || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow || @@ -77,36 +98,56 @@ // || `[0x48000000, 0x4bffffff]` || ShadowGap || // || `[0x40000000, 0x47ffffff]` || LowShadow || // || `[0x00000000, 0x3fffffff]` || LowMem || +// +// Default Windows/i386 mapping: +// (the exact location of HighShadow/HighMem may vary depending +// on WoW64, /LARGEADDRESSAWARE, etc). +// || `[0x50000000, 0xffffffff]` || HighMem || +// || `[0x3a000000, 0x4fffffff]` || HighShadow || +// || `[0x36000000, 0x39ffffff]` || ShadowGap || +// || `[0x30000000, 0x35ffffff]` || LowShadow || +// || `[0x00000000, 0x2fffffff]` || LowMem || static const u64 kDefaultShadowScale = 3; static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000 -static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kDefaultShadowOffset64 = 1ULL << 44; static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G. +static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000 +static const u64 kIosShadowOffset64 = 0x130000000; +static const u64 kIosSimShadowOffset32 = 1ULL << 30; +static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64; +#if SANITIZER_AARCH64_VMA == 39 static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; +#elif SANITIZER_AARCH64_VMA == 42 +static const u64 kAArch64_ShadowOffset64 = 1ULL << 39; +#endif static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; -static const u64 kMIPS64_ShadowOffset64 = 1ULL << 36; +static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000 static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 +static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 #define SHADOW_SCALE kDefaultShadowScale -#if SANITIZER_ANDROID -# define SHADOW_OFFSET (0) -#else -# if SANITIZER_WORDSIZE == 32 -# if defined(__mips__) + + +#if SANITIZER_WORDSIZE == 32 +# if SANITIZER_ANDROID +# define SHADOW_OFFSET (0) +# elif defined(__mips__) # define SHADOW_OFFSET kMIPS32_ShadowOffset32 # elif SANITIZER_FREEBSD # define SHADOW_OFFSET kFreeBSD_ShadowOffset32 +# elif SANITIZER_WINDOWS +# define SHADOW_OFFSET kWindowsShadowOffset32 +# elif SANITIZER_IOSSIM +# define SHADOW_OFFSET kIosSimShadowOffset32 +# elif SANITIZER_IOS +# define SHADOW_OFFSET kIosShadowOffset32 # else -# if SANITIZER_IOS -# define SHADOW_OFFSET kIosShadowOffset32 -# else -# define SHADOW_OFFSET kDefaultShadowOffset32 -# endif +# define SHADOW_OFFSET kDefaultShadowOffset32 # endif -# else +#else # if defined(__aarch64__) # define SHADOW_OFFSET kAArch64_ShadowOffset64 # elif defined(__powerpc64__) @@ -117,10 +158,13 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 # define SHADOW_OFFSET kDefaultShadowOffset64 # elif defined(__mips64) # define SHADOW_OFFSET kMIPS64_ShadowOffset64 +# elif SANITIZER_IOSSIM +# define SHADOW_OFFSET kIosSimShadowOffset64 +# elif SANITIZER_IOS +# define SHADOW_OFFSET kIosShadowOffset64 # else # define SHADOW_OFFSET kDefaultShort64bitShadowOffset # endif -# endif #endif #define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE) @@ -143,7 +187,8 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000 // With the zero shadow base we can not actually map pages starting from 0. // This constant is somewhat arbitrary. -#define kZeroBaseShadowStart (1 << 18) +#define kZeroBaseShadowStart 0 +#define kZeroBaseMaxShadowStart (1 << 18) #define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \ : kZeroBaseShadowStart) diff --git a/libsanitizer/asan/asan_new_delete.cc b/libsanitizer/asan/asan_new_delete.cc index 9d6660ec7b4..719cdfa2bb9 100644 --- a/libsanitizer/asan/asan_new_delete.cc +++ b/libsanitizer/asan/asan_new_delete.cc @@ -14,7 +14,7 @@ #include "asan_internal.h" #include "asan_stack.h" -#include "sanitizer_common/sanitizer_interception.h" +#include "interception/interception.h" #include <stddef.h> @@ -88,11 +88,11 @@ INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) { #if !SANITIZER_MAC CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr) throw() { +void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW); } CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr) throw() { +void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY(FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE @@ -104,12 +104,12 @@ void operator delete[](void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY(FROM_NEW_BR); } CXX_OPERATOR_ATTRIBUTE -void operator delete(void *ptr, size_t size) throw() { +void operator delete(void *ptr, size_t size) NOEXCEPT { GET_STACK_TRACE_FREE; asan_sized_free(ptr, size, &stack, FROM_NEW); } CXX_OPERATOR_ATTRIBUTE -void operator delete[](void *ptr, size_t size) throw() { +void operator delete[](void *ptr, size_t size) NOEXCEPT { GET_STACK_TRACE_FREE; asan_sized_free(ptr, size, &stack, FROM_NEW_BR); } diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc index c0f3991c1f3..78604310fcb 100644 --- a/libsanitizer/asan/asan_poisoning.cc +++ b/libsanitizer/asan/asan_poisoning.cc @@ -13,13 +13,24 @@ #include "asan_poisoning.h" #include "asan_report.h" #include "asan_stack.h" +#include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_flags.h" namespace __asan { +static atomic_uint8_t can_poison_memory; + +void SetCanPoisonMemory(bool value) { + atomic_store(&can_poison_memory, value, memory_order_release); +} + +bool CanPoisonMemory() { + return atomic_load(&can_poison_memory, memory_order_acquire); +} + void PoisonShadow(uptr addr, uptr size, u8 value) { - if (!flags()->poison_heap) return; + if (!CanPoisonMemory()) return; CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsInMem(addr)); CHECK(AddrIsAlignedByGranularity(addr + size)); @@ -32,7 +43,7 @@ void PoisonShadowPartialRightRedzone(uptr addr, uptr size, uptr redzone_size, u8 value) { - if (!flags()->poison_heap) return; + if (!CanPoisonMemory()) return; CHECK(AddrIsAlignedByGranularity(addr)); CHECK(AddrIsInMem(addr)); FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value); @@ -61,10 +72,10 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) { void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) { uptr end = ptr + size; - if (common_flags()->verbosity) { + if (Verbosity()) { Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n", poison ? "" : "un", ptr, end, size); - if (common_flags()->verbosity >= 2) + if (Verbosity() >= 2) PRINT_CURRENT_STACK(); } CHECK(size); @@ -99,7 +110,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) { if (!flags()->allow_user_poisoning || size == 0) return; uptr beg_addr = (uptr)addr; uptr end_addr = beg_addr + size; - VPrintf(1, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, + VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr, (void *)end_addr); ShadowSegmentEndpoint beg(beg_addr); ShadowSegmentEndpoint end(end_addr); @@ -139,7 +150,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) { if (!flags()->allow_user_poisoning || size == 0) return; uptr beg_addr = (uptr)addr; uptr end_addr = beg_addr + size; - VPrintf(1, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, + VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr, (void *)end_addr); ShadowSegmentEndpoint beg(beg_addr); ShadowSegmentEndpoint end(end_addr); @@ -205,7 +216,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) { __asan::AddressIsPoisoned(__p + __size - 1))) { \ GET_CURRENT_PC_BP_SP; \ uptr __bad = __asan_region_is_poisoned(__p, __size); \ - __asan_report_error(pc, bp, sp, __bad, isWrite, __size);\ + __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\ } \ } while (false); \ diff --git a/libsanitizer/asan/asan_poisoning.h b/libsanitizer/asan/asan_poisoning.h index 9644b7d840e..30e39e9077c 100644 --- a/libsanitizer/asan/asan_poisoning.h +++ b/libsanitizer/asan/asan_poisoning.h @@ -17,6 +17,10 @@ namespace __asan { +// Enable/disable memory poisoning. +void SetCanPoisonMemory(bool value); +bool CanPoisonMemory(); + // Poisons the shadow memory for "size" bytes starting from "addr". void PoisonShadow(uptr addr, uptr size, u8 value); @@ -32,7 +36,7 @@ void PoisonShadowPartialRightRedzone(uptr addr, // performance-critical code with care. ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, u8 value) { - DCHECK(flags()->poison_heap); + DCHECK(CanPoisonMemory()); uptr shadow_beg = MEM_TO_SHADOW(aligned_beg); uptr shadow_end = MEM_TO_SHADOW( aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1; @@ -58,15 +62,14 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size, if (page_end != shadow_end) { REAL(memset)((void *)page_end, 0, shadow_end - page_end); } - void *res = MmapFixedNoReserve(page_beg, page_end - page_beg); - CHECK_EQ(page_beg, res); + ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr); } } } ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone( uptr aligned_addr, uptr size, uptr redzone_size, u8 value) { - DCHECK(flags()->poison_heap); + DCHECK(CanPoisonMemory()); bool poison_partial = flags()->poison_partial; u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr); for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) { diff --git a/libsanitizer/asan/asan_posix.cc b/libsanitizer/asan/asan_posix.cc index 06d24d4e0ee..5b532e950bb 100644 --- a/libsanitizer/asan/asan_posix.cc +++ b/libsanitizer/asan/asan_posix.cc @@ -19,6 +19,7 @@ #include "asan_report.h" #include "asan_stack.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include <pthread.h> @@ -30,26 +31,52 @@ namespace __asan { -void AsanOnSIGSEGV(int, void *siginfo, void *context) { +void AsanOnDeadlySignal(int signo, void *siginfo, void *context) { ScopedDeadlySignal signal_scope(GetCurrentThread()); - uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr; int code = (int)((siginfo_t*)siginfo)->si_code; // Write the first message using the bullet-proof write. - if (13 != internal_write(2, "ASAN:SIGSEGV\n", 13)) Die(); - uptr pc, sp, bp; - GetPcSpBp(context, &pc, &sp, &bp); + if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die(); + SignalContext sig = SignalContext::Create(siginfo, context); // Access at a reasonable offset above SP, or slightly below it (to account // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is // probably a stack overflow. + bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF; + +#if __powerpc__ + // Large stack frames can be allocated with e.g. + // lis r0,-10000 + // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000 + // If the store faults then sp will not have been updated, so test above + // will not work, becase the fault address will be more than just "slightly" + // below sp. + if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) { + u32 inst = *(unsigned *)sig.pc; + u32 ra = (inst >> 16) & 0x1F; + u32 opcd = inst >> 26; + u32 xo = (inst >> 1) & 0x3FF; + // Check for store-with-update to sp. The instructions we accept are: + // stbu rs,d(ra) stbux rs,ra,rb + // sthu rs,d(ra) sthux rs,ra,rb + // stwu rs,d(ra) stwux rs,ra,rb + // stdu rs,ds(ra) stdux rs,ra,rb + // where ra is r1 (the stack pointer). + if (ra == 1 && + (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 || + (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181)))) + IsStackAccess = true; + } +#endif // __powerpc__ + // We also check si_code to filter out SEGV caused by something else other // then hitting the guard page or unmapped memory, like, for example, // unaligned memory access. - if (addr + 512 > sp && addr < sp + 0xFFFF && - (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR)) - ReportStackOverflow(pc, sp, bp, context, addr); + if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR)) + ReportStackOverflow(sig); + else if (signo == SIGFPE) + ReportDeadlySignal("FPE", sig); else - ReportSIGSEGV("SEGV", pc, sp, bp, context, addr); + ReportDeadlySignal("SEGV", sig); } // ---------------------- TSD ---------------- {{{1 diff --git a/libsanitizer/asan/asan_preinit.cc b/libsanitizer/asan/asan_preinit.cc index 2ce1fb9b666..6cb115bd369 100644 --- a/libsanitizer/asan/asan_preinit.cc +++ b/libsanitizer/asan/asan_preinit.cc @@ -11,9 +11,13 @@ //===----------------------------------------------------------------------===// #include "asan_internal.h" +using namespace __asan; + #if SANITIZER_CAN_USE_PREINIT_ARRAY // The symbol is called __local_asan_preinit, because it's not intended to be // exported. + // This code linked into the main executable when -fsanitize=address is in + // the link flags. It can only use exported interface functions. __attribute__((section(".preinit_array"), used)) void (*__local_asan_preinit)(void) = __asan_init; #endif diff --git a/libsanitizer/asan/asan_report.cc b/libsanitizer/asan/asan_report.cc index fcccb70ff8b..cbfc6f548e4 100644 --- a/libsanitizer/asan/asan_report.cc +++ b/libsanitizer/asan/asan_report.cc @@ -9,6 +9,7 @@ // // This file contains error reporting code. //===----------------------------------------------------------------------===// + #include "asan_flags.h" #include "asan_internal.h" #include "asan_mapping.h" @@ -25,7 +26,7 @@ namespace __asan { // -------------------- User-specified callbacks ----------------- {{{1 static void (*error_report_callback)(const char*); -static char *error_message_buffer = 0; +static char *error_message_buffer = nullptr; static uptr error_message_buffer_pos = 0; static uptr error_message_buffer_size = 0; @@ -51,7 +52,7 @@ void AppendToErrorMessageBuffer(const char *buffer) { buffer, remaining); error_message_buffer[error_message_buffer_size - 1] = '\0'; // FIXME: reallocate the buffer instead of truncating the message. - error_message_buffer_pos += remaining > length ? length : remaining; + error_message_buffer_pos += Min(remaining, length); } } @@ -85,6 +86,8 @@ class Decorator: public __sanitizer::SanitizerCommonDecorator { return Cyan(); case kAsanUserPoisonedMemoryMagic: case kAsanContiguousContainerOOBMagic: + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: return Blue(); case kAsanStackUseAfterScopeMagic: return Magenta(); @@ -171,6 +174,8 @@ static void PrintLegend(InternalScopedString *str) { PrintShadowByte(str, " Intra object redzone: ", kAsanIntraObjectRedzone); PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic); + PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic); + PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic); } void MaybeDumpInstructionBytes(uptr pc) { @@ -275,9 +280,8 @@ static void PrintGlobalLocation(InternalScopedString *str, str->append(":%d", g.location->column_no); } -bool DescribeAddressRelativeToGlobal(uptr addr, uptr size, - const __asan_global &g) { - if (!IsAddressNearGlobal(addr, g)) return false; +static void DescribeAddressRelativeToGlobal(uptr addr, uptr size, + const __asan_global &g) { InternalScopedString str(4096); Decorator d; str.append("%s", d.Location()); @@ -300,6 +304,26 @@ bool DescribeAddressRelativeToGlobal(uptr addr, uptr size, str.append("%s", d.EndLocation()); PrintGlobalNameIfASCII(&str, g); Printf("%s", str.data()); +} + +static bool DescribeAddressIfGlobal(uptr addr, uptr size, + const char *bug_type) { + // Assume address is close to at most four globals. + const int kMaxGlobalsInReport = 4; + __asan_global globals[kMaxGlobalsInReport]; + u32 reg_sites[kMaxGlobalsInReport]; + int globals_num = + GetGlobalsForAddress(addr, globals, reg_sites, ARRAY_SIZE(globals)); + if (globals_num == 0) + return false; + for (int i = 0; i < globals_num; i++) { + DescribeAddressRelativeToGlobal(addr, size, globals[i]); + if (0 == internal_strcmp(bug_type, "initialization-order-fiasco") && + reg_sites[i]) { + Printf(" registered at:\n"); + StackDepotGet(reg_sites[i]).Print(); + } + } return true; } @@ -348,7 +372,7 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr, uptr next_var_beg) { uptr var_end = var.beg + var.size; uptr addr_end = addr + access_size; - const char *pos_descr = 0; + const char *pos_descr = nullptr; // If the variable [var.beg, var_end) is the nearest variable to the // current memory access, indicate it in the log. if (addr >= var.beg) { @@ -519,7 +543,7 @@ void DescribeHeapAddress(uptr addr, uptr access_size) { StackTrace alloc_stack = chunk.GetAllocStack(); char tname[128]; Decorator d; - AsanThreadContext *free_thread = 0; + AsanThreadContext *free_thread = nullptr; if (chunk.FreeTid() != kInvalidTid) { free_thread = GetThreadContextByTidLocked(chunk.FreeTid()); Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(), @@ -545,12 +569,12 @@ void DescribeHeapAddress(uptr addr, uptr access_size) { DescribeThread(alloc_thread); } -void DescribeAddress(uptr addr, uptr access_size) { +static void DescribeAddress(uptr addr, uptr access_size, const char *bug_type) { // Check if this is shadow or shadow gap. if (DescribeAddressIfShadow(addr)) return; CHECK(AddrIsInMem(addr)); - if (DescribeAddressIfGlobal(addr, access_size)) + if (DescribeAddressIfGlobal(addr, access_size, bug_type)) return; if (DescribeAddressIfStack(addr, access_size)) return; @@ -572,6 +596,11 @@ void DescribeThread(AsanThreadContext *context) { InternalScopedString str(1024); str.append("Thread T%d%s", context->tid, ThreadNameWithParenthesis(context->tid, tname, sizeof(tname))); + if (context->parent_tid == kInvalidTid) { + str.append(" created by unknown thread\n"); + Printf("%s", str.data()); + return; + } str.append( " created by T%d%s here:\n", context->parent_tid, ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname))); @@ -609,7 +638,7 @@ class ScopedInErrorReport { } // If we're still not dead for some reason, use raw _exit() instead of // Die() to bypass any additional checks. - internal__exit(flags()->exitcode); + internal__exit(common_flags()->exitcode); } if (report) report_data = *report; report_happened = true; @@ -641,40 +670,39 @@ class ScopedInErrorReport { } }; -void ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr) { +void ReportStackOverflow(const SignalContext &sig) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); Report( "ERROR: AddressSanitizer: stack-overflow on address %p" " (pc %p bp %p sp %p T%d)\n", - (void *)addr, (void *)pc, (void *)bp, (void *)sp, + (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp, GetCurrentTidOrInvalid()); Printf("%s", d.EndWarning()); - GET_STACK_TRACE_SIGNAL(pc, bp, context); + GET_STACK_TRACE_SIGNAL(sig); stack.Print(); ReportErrorSummary("stack-overflow", &stack); } -void ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp, - void *context, uptr addr) { +void ReportDeadlySignal(const char *description, const SignalContext &sig) { ScopedInErrorReport in_report; Decorator d; Printf("%s", d.Warning()); Report( "ERROR: AddressSanitizer: %s on unknown address %p" " (pc %p bp %p sp %p T%d)\n", - description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, - GetCurrentTidOrInvalid()); - if (pc < GetPageSizeCached()) { + description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, + (void *)sig.sp, GetCurrentTidOrInvalid()); + if (sig.pc < GetPageSizeCached()) { Report("Hint: pc points to the zero page.\n"); } Printf("%s", d.EndWarning()); - GET_STACK_TRACE_SIGNAL(pc, bp, context); + GET_STACK_TRACE_SIGNAL(sig); stack.Print(); - MaybeDumpInstructionBytes(pc); + MaybeDumpInstructionBytes(sig.pc); Printf("AddressSanitizer can not provide additional info.\n"); - ReportErrorSummary("SEGV", &stack); + ReportErrorSummary(description, &stack); } void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { @@ -800,8 +828,8 @@ void ReportStringFunctionMemoryRangesOverlap(const char *function, bug_type, offset1, offset1 + length1, offset2, offset2 + length2); Printf("%s", d.EndWarning()); stack->Print(); - DescribeAddress((uptr)offset1, length1); - DescribeAddress((uptr)offset2, length2); + DescribeAddress((uptr)offset1, length1, bug_type); + DescribeAddress((uptr)offset2, length2, bug_type); ReportErrorSummary(bug_type, stack); } @@ -814,7 +842,7 @@ void ReportStringFunctionSizeOverflow(uptr offset, uptr size, Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size); Printf("%s", d.EndWarning()); stack->Print(); - DescribeAddress(offset, size); + DescribeAddress(offset, size, bug_type); ReportErrorSummary(bug_type, stack); } @@ -829,6 +857,9 @@ void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, " old_mid : %p\n" " new_mid : %p\n", beg, end, old_mid, new_mid); + uptr granularity = SHADOW_GRANULARITY; + if (!IsAligned(beg, granularity)) + Report("ERROR: beg is not aligned by %d\n", granularity); stack->Print(); ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack); } @@ -866,15 +897,16 @@ void ReportODRViolation(const __asan_global *g1, u32 stack_id1, static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) { ScopedInErrorReport in_report; + const char *bug_type = "invalid-pointer-pair"; Decorator d; Printf("%s", d.Warning()); Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2); Printf("%s", d.EndWarning()); GET_STACK_TRACE_FATAL(pc, bp); stack.Print(); - DescribeAddress(a1, 1); - DescribeAddress(a2, 1); - ReportErrorSummary("invalid-pointer-pair", &stack); + DescribeAddress(a1, 1, bug_type); + DescribeAddress(a2, 1, bug_type); + ReportErrorSummary(bug_type, &stack); } static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { @@ -925,13 +957,24 @@ void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, DescribeHeapAddress(addr, 1); } -} // namespace __asan +} // namespace __asan // --------------------------- Interface --------------------- {{{1 using namespace __asan; // NOLINT void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, - uptr access_size) { + uptr access_size, u32 exp) { + ENABLE_FRAME_POINTER; + + // Optimization experiments. + // The experiments can be used to evaluate potential optimizations that remove + // instrumentation (assess false negatives). Instead of completely removing + // some instrumentation, compiler can emit special calls into runtime + // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass + // mask of experiments (exp). + // The reaction to a non-zero value of exp is to be defined. + (void)exp; + // Determine the error type. const char *bug_descr = "unknown-crash"; if (AddrIsInMem(addr)) { @@ -980,6 +1023,10 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, case kAsanIntraObjectRedzone: bug_descr = "intra-object-overflow"; break; + case kAsanAllocaLeftMagic: + case kAsanAllocaRightMagic: + bug_descr = "dynamic-stack-buffer-overflow"; + break; } } @@ -1006,7 +1053,7 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, GET_STACK_TRACE_FATAL(pc, bp); stack.Print(); - DescribeAddress(addr, access_size); + DescribeAddress(addr, access_size, bug_descr); ReportErrorSummary(bug_descr, &stack); PrintShadowMemoryForAddress(addr); } @@ -1024,7 +1071,7 @@ void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { void __asan_describe_address(uptr addr) { // Thread registry must be locked while we're describing an address. asanThreadRegistry().Lock(); - DescribeAddress(addr, 1); + DescribeAddress(addr, 1, ""); asanThreadRegistry().Unlock(); } @@ -1069,7 +1116,7 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_ptr_cmp(void *a, void *b) { CheckForInvalidPointerPair(a, b); } -} // extern "C" +} // extern "C" #if !SANITIZER_SUPPORTS_WEAK_HOOKS // Provide default implementation of __asan_on_error that does nothing diff --git a/libsanitizer/asan/asan_report.h b/libsanitizer/asan/asan_report.h index b31b86dd08b..6214979cf4c 100644 --- a/libsanitizer/asan/asan_report.h +++ b/libsanitizer/asan/asan_report.h @@ -31,29 +31,25 @@ struct AddressDescription { const char *region_kind; }; +// Returns the number of globals close to the provided address and copies +// them to "globals" array. +int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites, + int max_globals); +bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr); // The following functions prints address description depending // on the memory type (shadow/heap/stack/global). void DescribeHeapAddress(uptr addr, uptr access_size); -bool DescribeAddressIfGlobal(uptr addr, uptr access_size); -bool DescribeAddressRelativeToGlobal(uptr addr, uptr access_size, - const __asan_global &g); -bool IsAddressNearGlobal(uptr addr, const __asan_global &g); -bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr); bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr, bool print = true); bool ParseFrameDescription(const char *frame_descr, InternalMmapVector<StackVarDescr> *vars); bool DescribeAddressIfStack(uptr addr, uptr access_size); -// Determines memory type on its own. -void DescribeAddress(uptr addr, uptr access_size); - void DescribeThread(AsanThreadContext *context); // Different kinds of error reports. -void NORETURN - ReportStackOverflow(uptr pc, uptr sp, uptr bp, void *context, uptr addr); -void NORETURN ReportSIGSEGV(const char *description, uptr pc, uptr sp, uptr bp, - void *context, uptr addr); +void NORETURN ReportStackOverflow(const SignalContext &sig); +void NORETURN ReportDeadlySignal(const char* description, + const SignalContext &sig); void NORETURN ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size, BufferedStackTrace *free_stack); void NORETURN ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack); diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc index 2c599047dac..551fea5b0c7 100644 --- a/libsanitizer/asan/asan_rtl.cc +++ b/libsanitizer/asan/asan_rtl.cc @@ -9,6 +9,7 @@ // // Main file of the ASan run-time library. //===----------------------------------------------------------------------===// + #include "asan_activation.h" #include "asan_allocator.h" #include "asan_interceptors.h" @@ -19,12 +20,15 @@ #include "asan_report.h" #include "asan_stack.h" #include "asan_stats.h" +#include "asan_suppressions.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "lsan/lsan_common.h" +#include "ubsan/ubsan_init.h" +#include "ubsan/ubsan_platform.h" int __asan_option_detect_stack_use_after_return; // Global interface symbol. uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan. @@ -51,13 +55,6 @@ static void AsanDie() { UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg); } } - if (common_flags()->coverage) - __sanitizer_cov_dump(); - if (death_callback) - death_callback(); - if (flags()->abort_on_error) - Abort(); - internal__exit(flags()->exitcode); } static void AsanCheckFailed(const char *file, int line, const char *cond, @@ -69,265 +66,9 @@ static void AsanCheckFailed(const char *file, int line, const char *cond, Die(); } -// -------------------------- Flags ------------------------- {{{1 -static const int kDefaultMallocContextSize = 30; - -Flags asan_flags_dont_use_directly; // use via flags(). - -static const char *MaybeCallAsanDefaultOptions() { - return (&__asan_default_options) ? __asan_default_options() : ""; -} - -static const char *MaybeUseAsanDefaultOptionsCompileDefinition() { -#ifdef ASAN_DEFAULT_OPTIONS -// Stringize the macro value. -# define ASAN_STRINGIZE(x) #x -# define ASAN_STRINGIZE_OPTIONS(options) ASAN_STRINGIZE(options) - return ASAN_STRINGIZE_OPTIONS(ASAN_DEFAULT_OPTIONS); -#else - return ""; -#endif -} - -static void ParseFlagsFromString(Flags *f, const char *str) { - CommonFlags *cf = common_flags(); - ParseCommonFlagsFromString(cf, str); - CHECK((uptr)cf->malloc_context_size <= kStackTraceMax); - // Please write meaningful flag descriptions when adding new flags. - ParseFlag(str, &f->quarantine_size, "quarantine_size", - "Size (in bytes) of quarantine used to detect use-after-free " - "errors. Lower value may reduce memory usage but increase the " - "chance of false negatives."); - ParseFlag(str, &f->redzone, "redzone", - "Minimal size (in bytes) of redzones around heap objects. " - "Requirement: redzone >= 16, is a power of two."); - ParseFlag(str, &f->max_redzone, "max_redzone", - "Maximal size (in bytes) of redzones around heap objects."); - CHECK_GE(f->redzone, 16); - CHECK_GE(f->max_redzone, f->redzone); - CHECK_LE(f->max_redzone, 2048); - CHECK(IsPowerOfTwo(f->redzone)); - CHECK(IsPowerOfTwo(f->max_redzone)); - - ParseFlag(str, &f->debug, "debug", - "If set, prints some debugging information and does additional checks."); - ParseFlag(str, &f->report_globals, "report_globals", - "Controls the way to handle globals (0 - don't detect buffer overflow on " - "globals, 1 - detect buffer overflow, 2 - print data about registered " - "globals)."); - - ParseFlag(str, &f->check_initialization_order, - "check_initialization_order", - "If set, attempts to catch initialization order issues."); - - ParseFlag(str, &f->replace_str, "replace_str", - "If set, uses custom wrappers and replacements for libc string functions " - "to find more errors."); - - ParseFlag(str, &f->replace_intrin, "replace_intrin", - "If set, uses custom wrappers for memset/memcpy/memmove intinsics."); - ParseFlag(str, &f->mac_ignore_invalid_free, "mac_ignore_invalid_free", - "Ignore invalid free() calls to work around some bugs. Used on OS X " - "only."); - ParseFlag(str, &f->detect_stack_use_after_return, - "detect_stack_use_after_return", - "Enables stack-use-after-return checking at run-time."); - ParseFlag(str, &f->min_uar_stack_size_log, "min_uar_stack_size_log", - "Minimum fake stack size log."); - ParseFlag(str, &f->max_uar_stack_size_log, "max_uar_stack_size_log", - "Maximum fake stack size log."); - ParseFlag(str, &f->uar_noreserve, "uar_noreserve", - "Use mmap with 'norserve' flag to allocate fake stack."); - ParseFlag(str, &f->max_malloc_fill_size, "max_malloc_fill_size", - "ASan allocator flag. max_malloc_fill_size is the maximal amount of " - "bytes that will be filled with malloc_fill_byte on malloc."); - ParseFlag(str, &f->malloc_fill_byte, "malloc_fill_byte", - "Value used to fill the newly allocated memory."); - ParseFlag(str, &f->exitcode, "exitcode", - "Override the program exit status if the tool found an error."); - ParseFlag(str, &f->allow_user_poisoning, "allow_user_poisoning", - "If set, user may manually mark memory regions as poisoned or " - "unpoisoned."); - ParseFlag(str, &f->sleep_before_dying, "sleep_before_dying", - "Number of seconds to sleep between printing an error report and " - "terminating the program. Useful for debugging purposes (e.g. when one " - "needs to attach gdb)."); - - ParseFlag(str, &f->check_malloc_usable_size, "check_malloc_usable_size", - "Allows the users to work around the bug in Nvidia drivers prior to " - "295.*."); - - ParseFlag(str, &f->unmap_shadow_on_exit, "unmap_shadow_on_exit", - "If set, explicitly unmaps the (huge) shadow at exit."); - ParseFlag(str, &f->abort_on_error, "abort_on_error", - "If set, the tool calls abort() instead of _exit() after printing the " - "error report."); - ParseFlag(str, &f->print_stats, "print_stats", - "Print various statistics after printing an error message or if " - "atexit=1."); - ParseFlag(str, &f->print_legend, "print_legend", - "Print the legend for the shadow bytes."); - ParseFlag(str, &f->atexit, "atexit", - "If set, prints ASan exit stats even after program terminates " - "successfully."); - - ParseFlag(str, &f->allow_reexec, "allow_reexec", - "Allow the tool to re-exec the program. This may interfere badly with " - "the debugger."); - - ParseFlag(str, &f->print_full_thread_history, - "print_full_thread_history", - "If set, prints thread creation stacks for the threads involved in the " - "report and their ancestors up to the main thread."); - - ParseFlag(str, &f->poison_heap, "poison_heap", - "Poison (or not) the heap memory on [de]allocation. Zero value is useful " - "for benchmarking the allocator or instrumentator."); - - ParseFlag(str, &f->poison_array_cookie, "poison_array_cookie", - "Poison (or not) the array cookie after operator new[]."); - - ParseFlag(str, &f->poison_partial, "poison_partial", - "If true, poison partially addressable 8-byte aligned words " - "(default=true). This flag affects heap and global buffers, but not " - "stack buffers."); - - ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch", - "Report errors on malloc/delete, new/free, new/delete[], etc."); - - ParseFlag(str, &f->new_delete_type_mismatch, "new_delete_type_mismatch", - "Report errors on mismatch betwen size of new and delete."); - - ParseFlag(str, &f->strict_memcmp, "strict_memcmp", - "If true, assume that memcmp(p1, p2, n) always reads n bytes before " - "comparing p1 and p2."); - - ParseFlag(str, &f->strict_init_order, "strict_init_order", - "If true, assume that dynamic initializers can never access globals from " - "other modules, even if the latter are already initialized."); - - ParseFlag(str, &f->start_deactivated, "start_deactivated", - "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap " - "poisoning) to reduce memory consumption as much as possible, and " - "restores them to original values when the first instrumented module is " - "loaded into the process. This is mainly intended to be used on " - "Android. "); - - ParseFlag(str, &f->detect_invalid_pointer_pairs, - "detect_invalid_pointer_pairs", - "If non-zero, try to detect operations like <, <=, >, >= and - on " - "invalid pointer pairs (e.g. when pointers belong to different objects). " - "The bigger the value the harder we try."); - - ParseFlag(str, &f->detect_container_overflow, - "detect_container_overflow", - "If true, honor the container overflow annotations. " - "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow"); - - ParseFlag(str, &f->detect_odr_violation, "detect_odr_violation", - "If >=2, detect violation of One-Definition-Rule (ODR); " - "If ==1, detect ODR-violation only if the two variables " - "have different sizes"); - - ParseFlag(str, &f->dump_instruction_bytes, "dump_instruction_bytes", - "If true, dump 16 bytes starting at the instruction that caused SEGV"); -} - -void InitializeFlags(Flags *f, const char *env) { - CommonFlags *cf = common_flags(); - SetCommonFlagsDefaults(cf); - cf->detect_leaks = CAN_SANITIZE_LEAKS; - cf->external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH"); - cf->malloc_context_size = kDefaultMallocContextSize; - cf->intercept_tls_get_addr = true; - cf->coverage = false; - - internal_memset(f, 0, sizeof(*f)); - f->quarantine_size = (ASAN_LOW_MEMORY) ? 1UL << 26 : 1UL << 28; - f->redzone = 16; - f->max_redzone = 2048; - f->debug = false; - f->report_globals = 1; - f->check_initialization_order = false; - f->replace_str = true; - f->replace_intrin = true; - f->mac_ignore_invalid_free = false; - f->detect_stack_use_after_return = false; // Also needs the compiler flag. - f->min_uar_stack_size_log = 16; // We can't do smaller anyway. - f->max_uar_stack_size_log = 20; // 1Mb per size class, i.e. ~11Mb per thread. - f->uar_noreserve = false; - f->max_malloc_fill_size = 0x1000; // By default, fill only the first 4K. - f->malloc_fill_byte = 0xbe; - f->exitcode = ASAN_DEFAULT_FAILURE_EXITCODE; - f->allow_user_poisoning = true; - f->sleep_before_dying = 0; - f->check_malloc_usable_size = true; - f->unmap_shadow_on_exit = false; - f->abort_on_error = false; - f->print_stats = false; - f->print_legend = true; - f->atexit = false; - f->allow_reexec = true; - f->print_full_thread_history = true; - f->poison_heap = true; - f->poison_array_cookie = true; - f->poison_partial = true; - // Turn off alloc/dealloc mismatch checker on Mac and Windows for now. - // https://code.google.com/p/address-sanitizer/issues/detail?id=131 - // https://code.google.com/p/address-sanitizer/issues/detail?id=309 - // TODO(glider,timurrrr): Fix known issues and enable this back. - f->alloc_dealloc_mismatch = (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0); - f->new_delete_type_mismatch = true; - f->strict_memcmp = true; - f->strict_init_order = false; - f->start_deactivated = false; - f->detect_invalid_pointer_pairs = 0; - f->detect_container_overflow = true; - f->detect_odr_violation = 2; - f->dump_instruction_bytes = false; - - // Override from compile definition. - ParseFlagsFromString(f, MaybeUseAsanDefaultOptionsCompileDefinition()); - - // Override from user-specified string. - ParseFlagsFromString(f, MaybeCallAsanDefaultOptions()); - VReport(1, "Using the defaults from __asan_default_options: %s\n", - MaybeCallAsanDefaultOptions()); - - // Override from command line. - ParseFlagsFromString(f, env); - if (common_flags()->help) { - PrintFlagDescriptions(); - } - - if (!CAN_SANITIZE_LEAKS && cf->detect_leaks) { - Report("%s: detect_leaks is not supported on this platform.\n", - SanitizerToolName); - cf->detect_leaks = false; - } - - // Make "strict_init_order" imply "check_initialization_order". - // TODO(samsonov): Use a single runtime flag for an init-order checker. - if (f->strict_init_order) { - f->check_initialization_order = true; - } -} - -// Parse flags that may change between startup and activation. -// On Android they come from a system property. -// On other platforms this is no-op. -void ParseExtraActivationFlags() { - char buf[100]; - GetExtraActivationFlags(buf, sizeof(buf)); - ParseFlagsFromString(flags(), buf); - if (buf[0] != '\0') - VReport(1, "Extra activation flags: %s\n", buf); -} - // -------------------------- Globals --------------------- {{{1 int asan_inited; bool asan_init_is_running; -void (*death_callback)(void); #if !ASAN_FIXED_MAPPING uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; @@ -341,17 +82,22 @@ void ShowStatsAndAbort() { // ---------------------- mmap -------------------- {{{1 // Reserve memory range [beg, end]. -static void ReserveShadowMemoryRange(uptr beg, uptr end) { +// We need to use inclusive range because end+1 may not be representable. +void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { CHECK_EQ((beg % GetPageSizeCached()), 0); CHECK_EQ(((end + 1) % GetPageSizeCached()), 0); uptr size = end - beg + 1; DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. - void *res = MmapFixedNoReserve(beg, size); + void *res = MmapFixedNoReserve(beg, size, name); if (res != (void*)beg) { Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " "Perhaps you're using ulimit -v\n", size); Abort(); } + if (common_flags()->no_huge_pages_for_shadow) + NoHugePagesInRegion(beg, size); + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(beg, size); } // --------------- LowLevelAllocateCallbac ---------- {{{1 @@ -362,11 +108,15 @@ static void OnLowLevelAllocate(uptr ptr, uptr size) { // -------------------------- Run-time entry ------------------- {{{1 // exported functions #define ASAN_REPORT_ERROR(type, is_write, size) \ -extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_ ## type ## size(uptr addr); \ -void __asan_report_ ## type ## size(uptr addr) { \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_ ## type ## size(uptr addr) { \ + GET_CALLER_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, addr, is_write, size, 0); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \ GET_CALLER_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, addr, is_write, size); \ + __asan_report_error(pc, bp, sp, addr, is_write, size, exp); \ } ASAN_REPORT_ERROR(load, false, 1) @@ -382,18 +132,20 @@ ASAN_REPORT_ERROR(store, true, 16) #define ASAN_REPORT_ERROR_N(type, is_write) \ extern "C" NOINLINE INTERFACE_ATTRIBUTE \ -void __asan_report_ ## type ## _n(uptr addr, uptr size); \ void __asan_report_ ## type ## _n(uptr addr, uptr size) { \ GET_CALLER_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, addr, is_write, size); \ + __asan_report_error(pc, bp, sp, addr, is_write, size, 0); \ +} \ +extern "C" NOINLINE INTERFACE_ATTRIBUTE \ +void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \ + GET_CALLER_PC_BP_SP; \ + __asan_report_error(pc, bp, sp, addr, is_write, size, exp); \ } ASAN_REPORT_ERROR_N(load, false) ASAN_REPORT_ERROR_N(store, true) -#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ - extern "C" NOINLINE INTERFACE_ATTRIBUTE void __asan_##type##size(uptr addr); \ - void __asan_##type##size(uptr addr) { \ +#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg) \ uptr sp = MEM_TO_SHADOW(addr); \ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \ : *reinterpret_cast<u16 *>(sp); \ @@ -405,10 +157,19 @@ ASAN_REPORT_ERROR_N(store, true) *__asan_test_only_reported_buggy_pointer = addr; \ } else { \ GET_CALLER_PC_BP_SP; \ - __asan_report_error(pc, bp, sp, addr, is_write, size); \ + __asan_report_error(pc, bp, sp, addr, is_write, size, exp_arg); \ } \ } \ - } \ + } + +#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_##type##size(uptr addr) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0) \ + } \ + extern "C" NOINLINE INTERFACE_ATTRIBUTE \ + void __asan_exp_##type##size(uptr addr, u32 exp) { \ + ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp) \ } ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1) @@ -423,18 +184,38 @@ ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8) ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16) extern "C" -NOINLINE INTERFACE_ATTRIBUTE void __asan_loadN(uptr addr, uptr size) { +NOINLINE INTERFACE_ATTRIBUTE +void __asan_loadN(uptr addr, uptr size) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; - __asan_report_error(pc, bp, sp, addr, false, size); + __asan_report_error(pc, bp, sp, addr, false, size, 0); } } extern "C" -NOINLINE INTERFACE_ATTRIBUTE void __asan_storeN(uptr addr, uptr size) { +NOINLINE INTERFACE_ATTRIBUTE +void __asan_exp_loadN(uptr addr, uptr size, u32 exp) { if (__asan_region_is_poisoned(addr, size)) { GET_CALLER_PC_BP_SP; - __asan_report_error(pc, bp, sp, addr, true, size); + __asan_report_error(pc, bp, sp, addr, false, size, exp); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_storeN(uptr addr, uptr size) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + __asan_report_error(pc, bp, sp, addr, true, size, 0); + } +} + +extern "C" +NOINLINE INTERFACE_ATTRIBUTE +void __asan_exp_storeN(uptr addr, uptr size, u32 exp) { + if (__asan_region_is_poisoned(addr, size)) { + GET_CALLER_PC_BP_SP; + __asan_report_error(pc, bp, sp, addr, true, size, exp); } } @@ -453,26 +234,39 @@ static NOINLINE void force_interface_symbols() { case 3: __asan_report_load4(0); break; case 4: __asan_report_load8(0); break; case 5: __asan_report_load16(0); break; - case 6: __asan_report_store1(0); break; - case 7: __asan_report_store2(0); break; - case 8: __asan_report_store4(0); break; - case 9: __asan_report_store8(0); break; - case 10: __asan_report_store16(0); break; - case 12: __asan_register_globals(0, 0); break; - case 13: __asan_unregister_globals(0, 0); break; - case 14: __asan_set_death_callback(0); break; - case 15: __asan_set_error_report_callback(0); break; - case 16: __asan_handle_no_return(); break; - case 17: __asan_address_is_poisoned(0); break; - case 25: __asan_poison_memory_region(0, 0); break; - case 26: __asan_unpoison_memory_region(0, 0); break; - case 27: __asan_set_error_exit_code(0); break; - case 30: __asan_before_dynamic_init(0); break; - case 31: __asan_after_dynamic_init(); break; - case 32: __asan_poison_stack_memory(0, 0); break; - case 33: __asan_unpoison_stack_memory(0, 0); break; - case 34: __asan_region_is_poisoned(0, 0); break; - case 35: __asan_describe_address(0); break; + case 6: __asan_report_load_n(0, 0); break; + case 7: __asan_report_store1(0); break; + case 8: __asan_report_store2(0); break; + case 9: __asan_report_store4(0); break; + case 10: __asan_report_store8(0); break; + case 11: __asan_report_store16(0); break; + case 12: __asan_report_store_n(0, 0); break; + case 13: __asan_report_exp_load1(0, 0); break; + case 14: __asan_report_exp_load2(0, 0); break; + case 15: __asan_report_exp_load4(0, 0); break; + case 16: __asan_report_exp_load8(0, 0); break; + case 17: __asan_report_exp_load16(0, 0); break; + case 18: __asan_report_exp_load_n(0, 0, 0); break; + case 19: __asan_report_exp_store1(0, 0); break; + case 20: __asan_report_exp_store2(0, 0); break; + case 21: __asan_report_exp_store4(0, 0); break; + case 22: __asan_report_exp_store8(0, 0); break; + case 23: __asan_report_exp_store16(0, 0); break; + case 24: __asan_report_exp_store_n(0, 0, 0); break; + case 25: __asan_register_globals(nullptr, 0); break; + case 26: __asan_unregister_globals(nullptr, 0); break; + case 27: __asan_set_death_callback(nullptr); break; + case 28: __asan_set_error_report_callback(nullptr); break; + case 29: __asan_handle_no_return(); break; + case 30: __asan_address_is_poisoned(nullptr); break; + case 31: __asan_poison_memory_region(nullptr, 0); break; + case 32: __asan_unpoison_memory_region(nullptr, 0); break; + case 34: __asan_before_dynamic_init(nullptr); break; + case 35: __asan_after_dynamic_init(); break; + case 36: __asan_poison_stack_memory(0, 0); break; + case 37: __asan_unpoison_stack_memory(0, 0); break; + case 38: __asan_region_is_poisoned(0, 0); break; + case 39: __asan_describe_address(0); break; } } @@ -496,8 +290,28 @@ static void InitializeHighMemEnd() { CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0); } -static void ProtectGap(uptr a, uptr size) { - CHECK_EQ(a, (uptr)Mprotect(a, size)); +static void ProtectGap(uptr addr, uptr size) { + void *res = MmapNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) + return; + // A few pages at the start of the address space can not be protected. + // But we really want to protect as much as possible, to prevent this memory + // being returned as a result of a non-FIXED mmap(). + if (addr == kZeroBaseShadowStart) { + uptr step = GetPageSizeCached(); + while (size > step && addr < kZeroBaseMaxShadowStart) { + addr += step; + size -= step; + void *res = MmapNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) + return; + } + } + + Report("ERROR: Failed to protect the shadow gap. " + "ASan cannot proceed correctly. ABORTING.\n"); + DumpProcessMap(); + Die(); } static void PrintAddressSpaceLayout() { @@ -536,13 +350,13 @@ static void PrintAddressSpaceLayout() { Printf("\n"); Printf("redzone=%zu\n", (uptr)flags()->redzone); Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone); - Printf("quarantine_size=%zuM\n", (uptr)flags()->quarantine_size >> 20); + Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb); Printf("malloc_context_size=%zu\n", (uptr)common_flags()->malloc_context_size); - Printf("SHADOW_SCALE: %zx\n", (uptr)SHADOW_SCALE); - Printf("SHADOW_GRANULARITY: %zx\n", (uptr)SHADOW_GRANULARITY); - Printf("SHADOW_OFFSET: %zx\n", (uptr)SHADOW_OFFSET); + Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE); + Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY); + Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET); CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7); if (kMidMemBeg) CHECK(kMidShadowBeg > kLowShadowEnd && @@ -556,10 +370,19 @@ static void AsanInitInternal() { CHECK(!asan_init_is_running && "ASan init calls itself!"); asan_init_is_running = true; + CacheBinaryName(); + // Initialize flags. This must be done early, because most of the // initialization steps look at flags(). - const char *options = GetEnv("ASAN_OPTIONS"); - InitializeFlags(flags(), options); + InitializeFlags(); + + CheckVMASize(); + + AsanCheckIncompatibleRT(); + AsanCheckDynamicRTPrereqs(); + + SetCanPoisonMemory(flags()->poison_heap); + SetMallocContextSize(common_flags()->malloc_context_size); InitializeHighMemEnd(); @@ -567,24 +390,15 @@ static void AsanInitInternal() { AsanDoesNotSupportStaticLinkage(); // Install tool-specific callbacks in sanitizer_common. - SetDieCallback(AsanDie); + AddDieCallback(AsanDie); SetCheckFailedCallback(AsanCheckFailed); SetPrintfAndReportCallback(AppendToErrorMessageBuffer); - if (!flags()->start_deactivated) - ParseExtraActivationFlags(); - __sanitizer_set_report_path(common_flags()->log_path); + + // Enable UAR detection, if required. __asan_option_detect_stack_use_after_return = flags()->detect_stack_use_after_return; - CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log); - - if (options) { - VReport(1, "Parsed ASAN_OPTIONS: %s\n", options); - } - - if (flags()->start_deactivated) - AsanStartDeactivated(); // Re-exec ourselves if we need to set additional env or command line args. MaybeReexec(); @@ -615,17 +429,16 @@ static void AsanInitInternal() { } #endif - if (common_flags()->verbosity) - PrintAddressSpaceLayout(); + if (Verbosity()) PrintAddressSpaceLayout(); DisableCoreDumperIfNecessary(); if (full_shadow_is_available) { // mmap the low shadow plus at least one page at the left. if (kLowShadowBeg) - ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); // mmap the high shadow. - ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); // protect the gap. ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1); @@ -634,11 +447,11 @@ static void AsanInitInternal() { MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) { CHECK(kLowShadowBeg != kLowShadowEnd); // mmap the low shadow plus at least one page at the left. - ReserveShadowMemoryRange(shadow_start, kLowShadowEnd); + ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow"); // mmap the mid shadow. - ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd); + ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow"); // mmap the high shadow. - ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd); + ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow"); // protect the gaps. ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1); ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1); @@ -646,14 +459,21 @@ static void AsanInitInternal() { } else { Report("Shadow memory range interleaves with an existing memory mapping. " "ASan cannot proceed correctly. ABORTING.\n"); + Report("ASan shadow was supposed to be located in the [%p-%p] range.\n", + shadow_start, kHighShadowEnd); DumpProcessMap(); Die(); } AsanTSDInit(PlatformTSDDtor); - InstallDeadlySignalHandlers(AsanOnSIGSEGV); + InstallDeadlySignalHandlers(AsanOnDeadlySignal); + + AllocatorOptions allocator_options; + allocator_options.SetFrom(flags(), common_flags()); + InitializeAllocator(allocator_options); - InitializeAllocator(); + MaybeStartBackgroudThread(); + SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback); // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited // should be set to 1 prior to initializing the threads. @@ -663,32 +483,40 @@ static void AsanInitInternal() { if (flags()->atexit) Atexit(asan_atexit); - if (common_flags()->coverage) { - __sanitizer_cov_init(); - Atexit(__sanitizer_cov_dump); - } + InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + + // Now that ASan runtime is (mostly) initialized, deactivate it if + // necessary, so that it can be re-activated when requested. + if (flags()->start_deactivated) + AsanDeactivate(); // interceptors InitTlsSize(); // Create main thread. - AsanThread *main_thread = AsanThread::Create(0, 0); - CreateThreadContextArgs create_main_args = { main_thread, 0 }; - u32 main_tid = asanThreadRegistry().CreateThread( - 0, true, 0, &create_main_args); - CHECK_EQ(0, main_tid); + AsanThread *main_thread = AsanThread::Create( + /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0, + /* stack */ nullptr, /* detached */ true); + CHECK_EQ(0, main_thread->tid()); SetCurrentThread(main_thread); - main_thread->ThreadStart(internal_getpid()); + main_thread->ThreadStart(internal_getpid(), + /* signal_thread_is_registered */ nullptr); force_interface_symbols(); // no-op. SanitizerInitializeUnwinder(); #if CAN_SANITIZE_LEAKS - __lsan::InitCommonLsan(false); + __lsan::InitCommonLsan(); if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) { Atexit(__lsan::DoLeakCheck); } #endif // CAN_SANITIZE_LEAKS +#if CAN_SANITIZE_UB + __ubsan::InitAsPlugin(); +#endif + + InitializeSuppressions(); + VReport(1, "AddressSanitizer Init done\n"); } @@ -700,46 +528,38 @@ void AsanInitFromRtl() { #if ASAN_DYNAMIC // Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable -// (and thus normal initializer from .preinit_array haven't run). +// (and thus normal initializers from .preinit_array or modules haven't run). class AsanInitializer { public: // NOLINT AsanInitializer() { - AsanCheckIncompatibleRT(); - AsanCheckDynamicRTPrereqs(); - if (UNLIKELY(!asan_inited)) - __asan_init(); + AsanInitFromRtl(); } }; static AsanInitializer asan_initializer; #endif // ASAN_DYNAMIC -} // namespace __asan +} // namespace __asan // ---------------------- Interface ---------------- {{{1 using namespace __asan; // NOLINT -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -const char* __asan_default_options() { return ""; } -} // extern "C" -#endif - -int NOINLINE __asan_set_error_exit_code(int exit_code) { - int old = flags()->exitcode; - flags()->exitcode = exit_code; - return old; -} - void NOINLINE __asan_handle_no_return() { int local_stack; AsanThread *curr_thread = GetCurrentThread(); - CHECK(curr_thread); uptr PageSize = GetPageSizeCached(); - uptr top = curr_thread->stack_top(); - uptr bottom = ((uptr)&local_stack - PageSize) & ~(PageSize-1); + uptr top, bottom; + if (curr_thread) { + top = curr_thread->stack_top(); + bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1); + } else { + // If we haven't seen this thread, try asking the OS for stack bounds. + uptr tls_addr, tls_size, stack_size; + GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr, + &tls_size); + top = bottom + stack_size; + } static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M if (top - bottom > kMaxExpectedCleanupSize) { static bool reported_warning = false; @@ -755,18 +575,21 @@ void NOINLINE __asan_handle_no_return() { return; } PoisonShadow(bottom, top - bottom, 0); - if (curr_thread->has_fake_stack()) + if (curr_thread && curr_thread->has_fake_stack()) curr_thread->fake_stack()->HandleNoReturn(); } void NOINLINE __asan_set_death_callback(void (*callback)(void)) { - death_callback = callback; + SetUserDieCallback(callback); } // Initialize as requested from instrumented application code. // We use this call as a trigger to wake up ASan from deactivated state. void __asan_init() { - AsanCheckIncompatibleRT(); AsanActivate(); AsanInitInternal(); } + +void __asan_version_mismatch_check() { + // Do nothing. +} diff --git a/libsanitizer/asan/asan_stack.cc b/libsanitizer/asan/asan_stack.cc index 96178e8247b..973c5ce59ef 100644 --- a/libsanitizer/asan/asan_stack.cc +++ b/libsanitizer/asan/asan_stack.cc @@ -11,6 +11,21 @@ //===----------------------------------------------------------------------===// #include "asan_internal.h" #include "asan_stack.h" +#include "sanitizer_common/sanitizer_atomic.h" + +namespace __asan { + +static atomic_uint32_t malloc_context_size; + +void SetMallocContextSize(u32 size) { + atomic_store(&malloc_context_size, size, memory_order_release); +} + +u32 GetMallocContextSize() { + return atomic_load(&malloc_context_size, memory_order_acquire); +} + +} // namespace __asan // ------------------ Interface -------------- {{{1 diff --git a/libsanitizer/asan/asan_stack.h b/libsanitizer/asan/asan_stack.h index 640c4cf4f7b..30dc5921303 100644 --- a/libsanitizer/asan/asan_stack.h +++ b/libsanitizer/asan/asan_stack.h @@ -9,6 +9,7 @@ // // ASan-private header for asan_stack.cc. //===----------------------------------------------------------------------===// + #ifndef ASAN_STACK_H #define ASAN_STACK_H @@ -19,6 +20,11 @@ namespace __asan { +static const u32 kDefaultMallocContextSize = 30; + +void SetMallocContextSize(u32 size); +u32 GetMallocContextSize(); + // Get the stack trace with the given pc and bp. // The pc will be in the position 0 of the resulting stack trace. // The bp may refer to the current frame or to the caller's frame. @@ -41,15 +47,15 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, uptr stack_bottom = t->stack_bottom(); ScopedUnwinding unwind_scope(t); stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast); - } else if (t == 0 && !fast) { + } else if (!t && !fast) { /* If GetCurrentThread() has failed, try to do slow unwind anyways. */ stack->Unwind(max_depth, pc, bp, context, 0, 0, false); } } -#endif // SANITIZER_WINDOWS +#endif // SANITIZER_WINDOWS } -} // namespace __asan +} // namespace __asan // NOTE: A Rule of thumb is to retrieve stack trace in the interceptors // as early as possible (in functions exposed to the user), as we generally @@ -76,9 +82,10 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \ common_flags()->fast_unwind_on_fatal) -#define GET_STACK_TRACE_SIGNAL(pc, bp, context) \ +#define GET_STACK_TRACE_SIGNAL(sig) \ BufferedStackTrace stack; \ - GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context, \ + GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \ + (sig).pc, (sig).bp, (sig).context, \ common_flags()->fast_unwind_on_fatal) #define GET_STACK_TRACE_FATAL_HERE \ @@ -90,9 +97,8 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, #define GET_STACK_TRACE_THREAD \ GET_STACK_TRACE(kStackTraceMax, true) -#define GET_STACK_TRACE_MALLOC \ - GET_STACK_TRACE(common_flags()->malloc_context_size, \ - common_flags()->fast_unwind_on_malloc) +#define GET_STACK_TRACE_MALLOC \ + GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc) #define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC @@ -108,4 +114,4 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth, stack.Print(); \ } -#endif // ASAN_STACK_H +#endif // ASAN_STACK_H diff --git a/libsanitizer/asan/asan_stats.cc b/libsanitizer/asan/asan_stats.cc index fbd636ea558..64a788a6a5d 100644 --- a/libsanitizer/asan/asan_stats.cc +++ b/libsanitizer/asan/asan_stats.cc @@ -49,12 +49,8 @@ void AsanStats::Print() { (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20, mmaps, munmaps); - PrintMallocStatsArray(" mmaps by size class: ", mmaped_by_size); PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size); - PrintMallocStatsArray(" frees by size class: ", freed_by_size); - PrintMallocStatsArray(" rfrees by size class: ", really_freed_by_size); - Printf("Stats: malloc large: %zu small slow: %zu\n", - malloc_large, malloc_small_slow); + Printf("Stats: malloc large: %zu\n", malloc_large); } void AsanStats::MergeFrom(const AsanStats *stats) { @@ -159,8 +155,7 @@ uptr __sanitizer_get_free_bytes() { GetAccumulatedStats(&stats); uptr total_free = stats.mmaped - stats.munmaped - + stats.really_freed - + stats.really_freed_redzones; + + stats.really_freed; uptr total_used = stats.malloced + stats.malloced_redzones; // Return sane value if total_free < total_used due to racy diff --git a/libsanitizer/asan/asan_stats.h b/libsanitizer/asan/asan_stats.h index 5b04b1718eb..a48e3f916a9 100644 --- a/libsanitizer/asan/asan_stats.h +++ b/libsanitizer/asan/asan_stats.h @@ -30,20 +30,14 @@ struct AsanStats { uptr freed; uptr real_frees; uptr really_freed; - uptr really_freed_redzones; uptr reallocs; uptr realloced; uptr mmaps; uptr mmaped; uptr munmaps; uptr munmaped; - uptr mmaped_by_size[kNumberOfSizeClasses]; - uptr malloced_by_size[kNumberOfSizeClasses]; - uptr freed_by_size[kNumberOfSizeClasses]; - uptr really_freed_by_size[kNumberOfSizeClasses]; - uptr malloc_large; - uptr malloc_small_slow; + uptr malloced_by_size[kNumberOfSizeClasses]; // Ctor for global AsanStats (accumulated stats for dead threads). explicit AsanStats(LinkerInitialized) { } diff --git a/libsanitizer/asan/asan_suppressions.cc b/libsanitizer/asan/asan_suppressions.cc new file mode 100644 index 00000000000..c94fff0500a --- /dev/null +++ b/libsanitizer/asan/asan_suppressions.cc @@ -0,0 +1,108 @@ +//===-- asan_suppressions.cc ----------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// Issue suppression and suppression-related functions. +//===----------------------------------------------------------------------===// + +#include "asan_suppressions.h" + +#include "asan_stack.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +namespace __asan { + +ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +static SuppressionContext *suppression_ctx = nullptr; +static const char kInterceptorName[] = "interceptor_name"; +static const char kInterceptorViaFunction[] = "interceptor_via_fun"; +static const char kInterceptorViaLibrary[] = "interceptor_via_lib"; +static const char kODRViolation[] = "odr_violation"; +static const char *kSuppressionTypes[] = { + kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary, + kODRViolation}; + +extern "C" { +#if SANITIZER_SUPPORTS_WEAK_HOOKS +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char *__asan_default_suppressions(); +#else +// No week hooks, provide empty implementation. +const char *__asan_default_suppressions() { return ""; } +#endif // SANITIZER_SUPPORTS_WEAK_HOOKS +} // extern "C" + +void InitializeSuppressions() { + CHECK_EQ(nullptr, suppression_ctx); + suppression_ctx = new (suppression_placeholder) // NOLINT + SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); + suppression_ctx->ParseFromFile(flags()->suppressions); + if (&__asan_default_suppressions) + suppression_ctx->Parse(__asan_default_suppressions()); +} + +bool IsInterceptorSuppressed(const char *interceptor_name) { + CHECK(suppression_ctx); + Suppression *s; + // Match "interceptor_name" suppressions. + return suppression_ctx->Match(interceptor_name, kInterceptorName, &s); +} + +bool HaveStackTraceBasedSuppressions() { + CHECK(suppression_ctx); + return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) || + suppression_ctx->HasSuppressionType(kInterceptorViaLibrary); +} + +bool IsODRViolationSuppressed(const char *global_var_name) { + CHECK(suppression_ctx); + Suppression *s; + // Match "odr_violation" suppressions. + return suppression_ctx->Match(global_var_name, kODRViolation, &s); +} + +bool IsStackTraceSuppressed(const StackTrace *stack) { + if (!HaveStackTraceBasedSuppressions()) + return false; + + CHECK(suppression_ctx); + Symbolizer *symbolizer = Symbolizer::GetOrInit(); + Suppression *s; + for (uptr i = 0; i < stack->size && stack->trace[i]; i++) { + uptr addr = stack->trace[i]; + + if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) { + // Match "interceptor_via_lib" suppressions. + if (const char *module_name = symbolizer->GetModuleNameForPc(addr)) + if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s)) + return true; + } + + if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) { + SymbolizedStack *frames = symbolizer->SymbolizePC(addr); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + const char *function_name = cur->info.function; + if (!function_name) { + continue; + } + // Match "interceptor_via_fun" suppressions. + if (suppression_ctx->Match(function_name, kInterceptorViaFunction, + &s)) { + frames->ClearAll(); + return true; + } + } + frames->ClearAll(); + } + } + return false; +} + +} // namespace __asan diff --git a/libsanitizer/asan/asan_suppressions.h b/libsanitizer/asan/asan_suppressions.h new file mode 100644 index 00000000000..331d7224548 --- /dev/null +++ b/libsanitizer/asan/asan_suppressions.h @@ -0,0 +1,28 @@ +//===-- asan_suppressions.h -------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of AddressSanitizer, an address sanity checker. +// +// ASan-private header for asan_suppressions.cc. +//===----------------------------------------------------------------------===// +#ifndef ASAN_SUPPRESSIONS_H +#define ASAN_SUPPRESSIONS_H + +#include "asan_internal.h" +#include "sanitizer_common/sanitizer_stacktrace.h" + +namespace __asan { + +void InitializeSuppressions(); +bool IsInterceptorSuppressed(const char *interceptor_name); +bool HaveStackTraceBasedSuppressions(); +bool IsStackTraceSuppressed(const StackTrace *stack); +bool IsODRViolationSuppressed(const char *global_var_name); + +} // namespace __asan + +#endif // ASAN_SUPPRESSIONS_H diff --git a/libsanitizer/asan/asan_thread.cc b/libsanitizer/asan/asan_thread.cc index 95ca6720090..92f968bdee4 100644 --- a/libsanitizer/asan/asan_thread.cc +++ b/libsanitizer/asan/asan_thread.cc @@ -25,6 +25,11 @@ namespace __asan { // AsanThreadContext implementation. +struct CreateThreadContextArgs { + AsanThread *thread; + StackTrace *stack; +}; + void AsanThreadContext::OnCreated(void *arg) { CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg); if (args->stack) @@ -35,7 +40,7 @@ void AsanThreadContext::OnCreated(void *arg) { void AsanThreadContext::OnFinished() { // Drop the link to the AsanThread object. - thread = 0; + thread = nullptr; } // MIPS requires aligned address @@ -73,13 +78,17 @@ AsanThreadContext *GetThreadContextByTidLocked(u32 tid) { // AsanThread implementation. -AsanThread *AsanThread::Create(thread_callback_t start_routine, - void *arg) { +AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, + bool detached) { uptr PageSize = GetPageSizeCached(); uptr size = RoundUpTo(sizeof(AsanThread), PageSize); AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__); thread->start_routine_ = start_routine; thread->arg_ = arg; + CreateThreadContextArgs args = { thread, stack }; + asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached, + parent_tid, &args); return thread; } @@ -114,7 +123,7 @@ void AsanThread::Destroy() { FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { uptr stack_size = this->stack_size(); if (stack_size == 0) // stack_size is not yet available, don't use FakeStack. - return 0; + return nullptr; uptr old_val = 0; // fake_stack_ has 3 states: // 0 -- not initialized @@ -135,11 +144,11 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() { SetTLSFakeStack(fake_stack_); return fake_stack_; } - return 0; + return nullptr; } void AsanThread::Init() { - fake_stack_ = 0; // Will be initialized lazily if needed. + fake_stack_ = nullptr; // Will be initialized lazily if needed. CHECK_EQ(this->stack_size(), 0U); SetThreadStackAndTls(); CHECK_GT(this->stack_size(), 0U); @@ -150,12 +159,15 @@ void AsanThread::Init() { VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(), (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_, &local); - AsanPlatformThreadInit(); } -thread_return_t AsanThread::ThreadStart(uptr os_id) { +thread_return_t AsanThread::ThreadStart( + uptr os_id, atomic_uintptr_t *signal_thread_is_registered) { Init(); - asanThreadRegistry().StartThread(tid(), os_id, 0); + asanThreadRegistry().StartThread(tid(), os_id, nullptr); + if (signal_thread_is_registered) + atomic_store(signal_thread_is_registered, 1, memory_order_release); + if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); if (!start_routine_) { @@ -262,7 +274,7 @@ AsanThread *GetCurrentThread() { return tctx->thread; } } - return 0; + return nullptr; } return context->thread; } @@ -287,7 +299,7 @@ AsanThread *FindThreadByStackAddress(uptr addr) { AsanThreadContext *tctx = static_cast<AsanThreadContext *>( asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress, (void *)addr)); - return tctx ? tctx->thread : 0; + return tctx ? tctx->thread : nullptr; } void EnsureMainThreadIDIsCorrect() { @@ -300,10 +312,10 @@ void EnsureMainThreadIDIsCorrect() { __asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) { __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>( __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id)); - if (!context) return 0; + if (!context) return nullptr; return context->thread; } -} // namespace __asan +} // namespace __asan // --- Implementation of LSan-specific functions --- {{{1 namespace __lsan { @@ -340,4 +352,4 @@ void UnlockThreadRegistry() { void EnsureMainThreadIDIsCorrect() { __asan::EnsureMainThreadIDIsCorrect(); } -} // namespace __lsan +} // namespace __lsan diff --git a/libsanitizer/asan/asan_thread.h b/libsanitizer/asan/asan_thread.h index f5ea53d051f..27a3367d8e7 100644 --- a/libsanitizer/asan/asan_thread.h +++ b/libsanitizer/asan/asan_thread.h @@ -9,6 +9,7 @@ // // ASan-private header for asan_thread.cc. //===----------------------------------------------------------------------===// + #ifndef ASAN_THREAD_H #define ASAN_THREAD_H @@ -32,19 +33,16 @@ class AsanThread; class AsanThreadContext : public ThreadContextBase { public: explicit AsanThreadContext(int tid) - : ThreadContextBase(tid), - announced(false), - destructor_iterations(kPthreadDestructorIterations), - stack_id(0), - thread(0) { - } + : ThreadContextBase(tid), announced(false), + destructor_iterations(GetPthreadDestructorIterations()), stack_id(0), + thread(nullptr) {} bool announced; u8 destructor_iterations; u32 stack_id; AsanThread *thread; - void OnCreated(void *arg); - void OnFinished(); + void OnCreated(void *arg) override; + void OnFinished() override; }; // AsanThreadContext objects are never freed, so we need many of them. @@ -53,12 +51,14 @@ COMPILER_CHECK(sizeof(AsanThreadContext) <= 256); // AsanThread are stored in TSD and destroyed when the thread dies. class AsanThread { public: - static AsanThread *Create(thread_callback_t start_routine, void *arg); + static AsanThread *Create(thread_callback_t start_routine, void *arg, + u32 parent_tid, StackTrace *stack, bool detached); static void TSDDtor(void *tsd); void Destroy(); void Init(); // Should be called from the thread itself. - thread_return_t ThreadStart(uptr os_id); + thread_return_t ThreadStart(uptr os_id, + atomic_uintptr_t *signal_thread_is_registered); uptr stack_top() { return stack_top_; } uptr stack_bottom() { return stack_bottom_; } @@ -83,8 +83,8 @@ class AsanThread { void DeleteFakeStack(int tid) { if (!fake_stack_) return; FakeStack *t = fake_stack_; - fake_stack_ = 0; - SetTLSFakeStack(0); + fake_stack_ = nullptr; + SetTLSFakeStack(nullptr); t->Destroy(tid); } @@ -94,7 +94,7 @@ class AsanThread { FakeStack *fake_stack() { if (!__asan_option_detect_stack_use_after_return) - return 0; + return nullptr; if (!has_fake_stack()) return AsyncSignalSafeLazyInitFakeStack(); return fake_stack_; @@ -164,11 +164,6 @@ class ScopedDeadlySignal { AsanThread *thread; }; -struct CreateThreadContextArgs { - AsanThread *thread; - StackTrace *stack; -}; - // Returns a single instance of registry. ThreadRegistry &asanThreadRegistry(); @@ -183,6 +178,6 @@ AsanThread *FindThreadByStackAddress(uptr addr); // Used to handle fork(). void EnsureMainThreadIDIsCorrect(); -} // namespace __asan +} // namespace __asan -#endif // ASAN_THREAD_H +#endif // ASAN_THREAD_H diff --git a/libsanitizer/asan/asan_win.cc b/libsanitizer/asan/asan_win.cc index b0028763b11..caec9f83d68 100644 --- a/libsanitizer/asan/asan_win.cc +++ b/libsanitizer/asan/asan_win.cc @@ -14,27 +14,139 @@ #if SANITIZER_WINDOWS #include <windows.h> -#include <dbghelp.h> #include <stdlib.h> #include "asan_interceptors.h" #include "asan_internal.h" #include "asan_report.h" +#include "asan_stack.h" #include "asan_thread.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_mutex.h" +using namespace __asan; // NOLINT + extern "C" { - SANITIZER_INTERFACE_ATTRIBUTE - int __asan_should_detect_stack_use_after_return() { - __asan_init(); - return __asan_option_detect_stack_use_after_return; - } +SANITIZER_INTERFACE_ATTRIBUTE +int __asan_should_detect_stack_use_after_return() { + __asan_init(); + return __asan_option_detect_stack_use_after_return; +} + +// -------------------- A workaround for the abscence of weak symbols ----- {{{ +// We don't have a direct equivalent of weak symbols when using MSVC, but we can +// use the /alternatename directive to tell the linker to default a specific +// symbol to a specific value, which works nicely for allocator hooks and +// __asan_default_options(). +void __sanitizer_default_malloc_hook(void *ptr, uptr size) { } +void __sanitizer_default_free_hook(void *ptr) { } +const char* __asan_default_default_options() { return ""; } +const char* __asan_default_default_suppressions() { return ""; } +void __asan_default_on_error() {} +#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT +#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT +#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT +#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT +#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT +// }}} +} // extern "C" + +// ---------------------- Windows-specific inteceptors ---------------- {{{ +INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) { + CHECK(REAL(RaiseException)); + __asan_handle_no_return(); + REAL(RaiseException)(a, b, c, d); +} + +INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler3)); + __asan_handle_no_return(); + return REAL(_except_handler3)(a, b, c, d); +} + +#if ASAN_DYNAMIC +// This handler is named differently in -MT and -MD CRTs. +#define _except_handler4 _except_handler4_common +#endif +INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) { + CHECK(REAL(_except_handler4)); + __asan_handle_no_return(); + return REAL(_except_handler4)(a, b, c, d); +} + +static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) { + AsanThread *t = (AsanThread*)arg; + SetCurrentThread(t); + return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr); +} + +INTERCEPTOR_WINAPI(DWORD, CreateThread, + void* security, uptr stack_size, + DWORD (__stdcall *start_routine)(void*), void* arg, + DWORD thr_flags, void* tid) { + // Strict init-order checking is thread-hostile. + if (flags()->strict_init_order) + StopInitOrderChecking(); + GET_STACK_TRACE_THREAD; + // FIXME: The CreateThread interceptor is not the same as a pthread_create + // one. This is a bandaid fix for PR22025. + bool detached = false; // FIXME: how can we determine it on Windows? + u32 current_tid = GetCurrentTidOrInvalid(); + AsanThread *t = + AsanThread::Create(start_routine, arg, current_tid, &stack, detached); + return REAL(CreateThread)(security, stack_size, + asan_thread_start, t, thr_flags, tid); +} + +namespace { +BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED); + +void EnsureWorkerThreadRegistered() { + // FIXME: GetCurrentThread relies on TSD, which might not play well with + // system thread pools. We might want to use something like reference + // counting to zero out GetCurrentThread() underlying storage when the last + // work item finishes? Or can we disable reclaiming of threads in the pool? + BlockingMutexLock l(&mu_for_thread_tracking); + if (__asan::GetCurrentThread()) + return; + + AsanThread *t = AsanThread::Create( + /* start_routine */ nullptr, /* arg */ nullptr, + /* parent_tid */ -1, /* stack */ nullptr, /* detached */ true); + t->Init(); + asanThreadRegistry().StartThread(t->tid(), 0, 0); + SetCurrentThread(t); +} +} // namespace + +INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) { + // NtWaitForWorkViaWorkerFactory is called from system worker pool threads to + // query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc. + // System worker pool threads are created at arbitraty point in time and + // without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory + // instead and don't register a specific parent_tid/stack. + EnsureWorkerThreadRegistered(); + return REAL(NtWaitForWorkViaWorkerFactory)(a, b); } +// }}} + namespace __asan { -// ---------------------- TSD ---------------- {{{1 +void InitializePlatformInterceptors() { + ASAN_INTERCEPT_FUNC(CreateThread); + ASAN_INTERCEPT_FUNC(RaiseException); + ASAN_INTERCEPT_FUNC(_except_handler3); + ASAN_INTERCEPT_FUNC(_except_handler4); + + // NtWaitForWorkViaWorkerFactory is always linked dynamically. + CHECK(::__interception::OverrideFunction( + "NtWaitForWorkViaWorkerFactory", + (uptr)WRAP(NtWaitForWorkViaWorkerFactory), + (uptr *)&REAL(NtWaitForWorkViaWorkerFactory))); +} + +// ---------------------- TSD ---------------- {{{ static bool tsd_key_inited = false; static __declspec(thread) void *fake_tsd = 0; @@ -57,7 +169,13 @@ void AsanTSDSet(void *tsd) { void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); } -// ---------------------- Various stuff ---------------- {{{1 +// }}} + +// ---------------------- Various stuff ---------------- {{{ +void DisableReexec() { + // No need to re-exec on Windows. +} + void MaybeReexec() { // No need to re-exec on Windows. } @@ -73,15 +191,11 @@ void AsanCheckDynamicRTPrereqs() {} void AsanCheckIncompatibleRT() {} -void AsanPlatformThreadInit() { - // Nothing here for now. -} - void ReadContextStack(void *context, uptr *stack, uptr *ssize) { UNIMPLEMENTED(); } -void AsanOnSIGSEGV(int, void *siginfo, void *context) { +void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); } @@ -90,12 +204,6 @@ static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler; static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { EXCEPTION_RECORD *exception_record = info->ExceptionRecord; CONTEXT *context = info->ContextRecord; - uptr pc = (uptr)exception_record->ExceptionAddress; -#ifdef _WIN64 - uptr bp = (uptr)context->Rbp, sp = (uptr)context->Rsp; -#else - uptr bp = (uptr)context->Ebp, sp = (uptr)context->Esp; -#endif if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION || exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) { @@ -103,8 +211,8 @@ static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) { (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) ? "access-violation" : "in-page-error"; - uptr access_addr = exception_record->ExceptionInformation[1]; - ReportSIGSEGV(description, pc, sp, bp, context, access_addr); + SignalContext sig = SignalContext::Create(exception_record, context); + ReportDeadlySignal(description, sig); } // FIXME: Handle EXCEPTION_STACK_OVERFLOW here. @@ -142,10 +250,10 @@ int __asan_set_seh_filter() { // Put a pointer to __asan_set_seh_filter at the end of the global list // of C initializers, after the default EH is set by the CRT. #pragma section(".CRT$XIZ", long, read) // NOLINT -static __declspec(allocate(".CRT$XIZ")) +__declspec(allocate(".CRT$XIZ")) int (*__intercept_seh)() = __asan_set_seh_filter; #endif - +// }}} } // namespace __asan #endif // _WIN32 diff --git a/libsanitizer/asan/asan_win_dll_thunk.cc b/libsanitizer/asan/asan_win_dll_thunk.cc index 6adb7d2e942..19d73f9f89e 100644 --- a/libsanitizer/asan/asan_win_dll_thunk.cc +++ b/libsanitizer/asan/asan_win_dll_thunk.cc @@ -19,7 +19,7 @@ // simplifies the build procedure. #ifdef ASAN_DLL_THUNK #include "asan_init_version.h" -#include "sanitizer_common/sanitizer_interception.h" +#include "interception/interception.h" // ---------- Function interception helper functions and macros ----------- {{{1 extern "C" { @@ -28,8 +28,9 @@ void *__stdcall GetProcAddress(void *module, const char *proc_name); void abort(); } -static void *getRealProcAddressOrDie(const char *name) { - void *ret = GetProcAddress(GetModuleHandleA(0), name); +static uptr getRealProcAddressOrDie(const char *name) { + uptr ret = + __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name); if (!ret) abort(); return ret; @@ -60,13 +61,12 @@ struct FunctionInterceptor<0> { }; #define INTERCEPT_WHEN_POSSIBLE(main_function, dll_function) \ - template<> struct FunctionInterceptor<__LINE__> { \ + template <> struct FunctionInterceptor<__LINE__> { \ static void Execute() { \ - void *wrapper = getRealProcAddressOrDie(main_function); \ - if (!__interception::OverrideFunction((uptr)dll_function, \ - (uptr)wrapper, 0)) \ + uptr wrapper = getRealProcAddressOrDie(main_function); \ + if (!__interception::OverrideFunction((uptr)dll_function, wrapper, 0)) \ abort(); \ - FunctionInterceptor<__LINE__-1>::Execute(); \ + FunctionInterceptor<__LINE__ - 1>::Execute(); \ } \ }; @@ -208,7 +208,7 @@ extern "C" { // __asan_init is expected to be called by only one thread. if (fn) return; - fn = (fntype)getRealProcAddressOrDie(__asan_init_name); + fn = (fntype)getRealProcAddressOrDie("__asan_init"); fn(); __asan_option_detect_stack_use_after_return = (__asan_should_detect_stack_use_after_return() != 0); @@ -217,6 +217,10 @@ extern "C" { } } +extern "C" void __asan_version_mismatch_check() { + // Do nothing. +} + INTERFACE_FUNCTION(__asan_handle_no_return) INTERFACE_FUNCTION(__asan_report_store1) @@ -292,7 +296,45 @@ INTERFACE_FUNCTION(__asan_stack_free_8) INTERFACE_FUNCTION(__asan_stack_free_9) INTERFACE_FUNCTION(__asan_stack_free_10) +// FIXME: we might want to have a sanitizer_win_dll_thunk? +INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container) +INTERFACE_FUNCTION(__sanitizer_cov) +INTERFACE_FUNCTION(__sanitizer_cov_dump) +INTERFACE_FUNCTION(__sanitizer_cov_indir_call16) +INTERFACE_FUNCTION(__sanitizer_cov_init) INTERFACE_FUNCTION(__sanitizer_cov_module_init) +INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block) +INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter) +INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp) +INTERFACE_FUNCTION(__sanitizer_cov_trace_switch) +INTERFACE_FUNCTION(__sanitizer_cov_with_check) +INTERFACE_FUNCTION(__sanitizer_get_allocated_size) +INTERFACE_FUNCTION(__sanitizer_get_coverage_guards) +INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes) +INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size) +INTERFACE_FUNCTION(__sanitizer_get_free_bytes) +INTERFACE_FUNCTION(__sanitizer_get_heap_size) +INTERFACE_FUNCTION(__sanitizer_get_ownership) +INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage) +INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes) +INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file) +INTERFACE_FUNCTION(__sanitizer_print_stack_trace) +INTERFACE_FUNCTION(__sanitizer_ptr_cmp) +INTERFACE_FUNCTION(__sanitizer_ptr_sub) +INTERFACE_FUNCTION(__sanitizer_report_error_summary) +INTERFACE_FUNCTION(__sanitizer_reset_coverage) +INTERFACE_FUNCTION(__sanitizer_get_number_of_counters) +INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters) +INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify) +INTERFACE_FUNCTION(__sanitizer_set_death_callback) +INTERFACE_FUNCTION(__sanitizer_set_report_path) +INTERFACE_FUNCTION(__sanitizer_unaligned_load16) +INTERFACE_FUNCTION(__sanitizer_unaligned_load32) +INTERFACE_FUNCTION(__sanitizer_unaligned_load64) +INTERFACE_FUNCTION(__sanitizer_unaligned_store16) +INTERFACE_FUNCTION(__sanitizer_unaligned_store32) +INTERFACE_FUNCTION(__sanitizer_unaligned_store64) +INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container) // TODO(timurrrr): Add more interface functions on the as-needed basis. @@ -342,11 +384,15 @@ INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT INTERCEPT_LIBRARY_FUNCTION(strchr); INTERCEPT_LIBRARY_FUNCTION(strcmp); INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT +INTERCEPT_LIBRARY_FUNCTION(strcspn); INTERCEPT_LIBRARY_FUNCTION(strlen); INTERCEPT_LIBRARY_FUNCTION(strncat); INTERCEPT_LIBRARY_FUNCTION(strncmp); INTERCEPT_LIBRARY_FUNCTION(strncpy); INTERCEPT_LIBRARY_FUNCTION(strnlen); +INTERCEPT_LIBRARY_FUNCTION(strpbrk); +INTERCEPT_LIBRARY_FUNCTION(strspn); +INTERCEPT_LIBRARY_FUNCTION(strstr); INTERCEPT_LIBRARY_FUNCTION(strtol); INTERCEPT_LIBRARY_FUNCTION(wcslen); diff --git a/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc index 1b59677eeff..69da2a83c47 100644 --- a/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc +++ b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc @@ -13,7 +13,8 @@ // // This includes: // - forwarding the detect_stack_use_after_return runtime option -// - installing a custom SEH handler +// - working around deficiencies of the MD runtime +// - installing a custom SEH handlerx // //===----------------------------------------------------------------------===// @@ -21,10 +22,15 @@ // Using #ifdef rather than relying on Makefiles etc. // simplifies the build procedure. #ifdef ASAN_DYNAMIC_RUNTIME_THUNK -extern "C" { -__declspec(dllimport) int __asan_set_seh_filter(); -__declspec(dllimport) int __asan_should_detect_stack_use_after_return(); +#include <windows.h> +// First, declare CRT sections we'll be using in this file +#pragma section(".CRT$XID", long, read) // NOLINT +#pragma section(".CRT$XIZ", long, read) // NOLINT +#pragma section(".CRT$XTW", long, read) // NOLINT +#pragma section(".CRT$XTY", long, read) // NOLINT + +//////////////////////////////////////////////////////////////////////////////// // Define a copy of __asan_option_detect_stack_use_after_return that should be // used when linking an MD runtime with a set of object files on Windows. // @@ -35,16 +41,55 @@ __declspec(dllimport) int __asan_should_detect_stack_use_after_return(); // with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows // just to work around this issue, let's clone the a variable that is // constant after initialization anyways. +extern "C" { +__declspec(dllimport) int __asan_should_detect_stack_use_after_return(); int __asan_option_detect_stack_use_after_return = __asan_should_detect_stack_use_after_return(); +} + +//////////////////////////////////////////////////////////////////////////////// +// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL +// unload or on exit. ASan relies on LLVM global_dtors to call +// __asan_unregister_globals on these events, which unfortunately doesn't work +// with the MD runtime, see PR22545 for the details. +// To work around this, for each DLL we schedule a call to UnregisterGlobals +// using atexit() that calls a small subset of C terminators +// where LLVM global_dtors is placed. Fingers crossed, no other C terminators +// are there. +extern "C" void __cdecl _initterm(void *a, void *b); + +namespace { +__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0; +__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0; + +void UnregisterGlobals() { + _initterm(&before_global_dtors, &after_global_dtors); +} + +int ScheduleUnregisterGlobals() { + return atexit(UnregisterGlobals); +} + +// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after +// atexit() is initialized (.CRT$XIC). As this is executed before C++ +// initializers (think ctors for globals), UnregisterGlobals gets executed after +// dtors for C++ globals. +__declspec(allocate(".CRT$XID")) +int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals; + +} // namespace + +//////////////////////////////////////////////////////////////////////////////// +// ASan SEH handling. +// We need to set the ASan-specific SEH handler at the end of CRT initialization +// of each module (see also asan_win.cc). +extern "C" { +__declspec(dllimport) int __asan_set_seh_filter(); +static int SetSEHFilter() { return __asan_set_seh_filter(); } -// Set the ASan-specific SEH handler at the end of CRT initialization of each -// module (see asan_win.cc for the details). -// // Unfortunately, putting a pointer to __asan_set_seh_filter into // __asan_intercept_seh gets optimized out, so we have to use an extra function. -static int SetSEHFilter() { return __asan_set_seh_filter(); } -#pragma section(".CRT$XIZ", long, read) // NOLINT __declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter; } + #endif // ASAN_DYNAMIC_RUNTIME_THUNK diff --git a/libsanitizer/asan/libtool-version b/libsanitizer/asan/libtool-version index f18f40737c7..7e838a5740b 100644 --- a/libsanitizer/asan/libtool-version +++ b/libsanitizer/asan/libtool-version @@ -3,4 +3,4 @@ # a separate file so that version updates don't involve re-running # automake. # CURRENT:REVISION:AGE -2:0:0 +3:0:0 diff --git a/libsanitizer/configure b/libsanitizer/configure index 809f0d76bd3..80655f05e97 100755 --- a/libsanitizer/configure +++ b/libsanitizer/configure @@ -616,6 +616,8 @@ BACKTRACE_SUPPORTED FORMAT_FILE SANITIZER_SUPPORTED_FALSE SANITIZER_SUPPORTED_TRUE +USE_CXX_ABI_FLAG_FALSE +USE_CXX_ABI_FLAG_TRUE USING_MAC_INTERPOSE_FALSE USING_MAC_INTERPOSE_TRUE link_liblsan @@ -12027,7 +12029,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 12030 "configure" +#line 12032 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -12133,7 +12135,7 @@ else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF -#line 12136 "configure" +#line 12138 "configure" #include "confdefs.h" #if HAVE_DLFCN_H @@ -15514,7 +15516,7 @@ done # Common libraries that we need to link against for all sanitizer libs. -link_sanitizer_common='-lpthread -ldl -lm' +link_sanitizer_common='-lrt -lpthread -ldl -lm' # Set up the set of additional libraries that we need to link against for libasan. link_libasan=$link_sanitizer_common @@ -15532,58 +15534,9 @@ link_libubsan=$link_sanitizer_common link_liblsan=$link_sanitizer_common -# At least for glibc, clock_gettime is in librt. But don't pull that -# in if it still doesn't give us the function we want. This -# test is copied from libgomp. -if test $ac_cv_func_clock_gettime = no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for clock_gettime in -lrt" >&5 -$as_echo_n "checking for clock_gettime in -lrt... " >&6; } -if test "${ac_cv_lib_rt_clock_gettime+set}" = set; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lrt $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char clock_gettime (); -int -main () -{ -return clock_gettime (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_rt_clock_gettime=yes -else - ac_cv_lib_rt_clock_gettime=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_rt_clock_gettime" >&5 -$as_echo "$ac_cv_lib_rt_clock_gettime" >&6; } -if test "x$ac_cv_lib_rt_clock_gettime" = x""yes; then : - link_libasan="-lrt $link_libasan" -link_libtsan="-lrt $link_libtsan" -# Other sanitizers do not override clock_* API - -fi - -fi - case "$host" in - *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;; - *) MAC_INTERPOSE=false ;; + *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ; CXX_ABI_NEEDED=true ;; + *) MAC_INTERPOSE=false ; CXX_ABI_NEEDED=false ;; esac if $MAC_INTERPOSE; then USING_MAC_INTERPOSE_TRUE= @@ -15593,6 +15546,14 @@ else USING_MAC_INTERPOSE_FALSE= fi + if $CXX_ABI_NEEDED; then + USE_CXX_ABI_FLAG_TRUE= + USE_CXX_ABI_FLAG_FALSE='#' +else + USE_CXX_ABI_FLAG_TRUE='#' + USE_CXX_ABI_FLAG_FALSE= +fi + backtrace_supported=yes @@ -16550,6 +16511,10 @@ if test -z "${USING_MAC_INTERPOSE_TRUE}" && test -z "${USING_MAC_INTERPOSE_FALSE as_fn_error "conditional \"USING_MAC_INTERPOSE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi +if test -z "${USE_CXX_ABI_FLAG_TRUE}" && test -z "${USE_CXX_ABI_FLAG_FALSE}"; then + as_fn_error "conditional \"USE_CXX_ABI_FLAG\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi if test -z "${SANITIZER_SUPPORTED_TRUE}" && test -z "${SANITIZER_SUPPORTED_FALSE}"; then as_fn_error "conditional \"SANITIZER_SUPPORTED\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 diff --git a/libsanitizer/configure.ac b/libsanitizer/configure.ac index 11e2d9961d6..ece15ed13dc 100644 --- a/libsanitizer/configure.ac +++ b/libsanitizer/configure.ac @@ -96,7 +96,7 @@ AM_CONDITIONAL(LSAN_SUPPORTED, [test "x$LSAN_SUPPORTED" = "xyes"]) AC_CHECK_FUNCS(clock_getres clock_gettime clock_settime) # Common libraries that we need to link against for all sanitizer libs. -link_sanitizer_common='-lpthread -ldl -lm' +link_sanitizer_common='-lrt -lpthread -ldl -lm' # Set up the set of additional libraries that we need to link against for libasan. link_libasan=$link_sanitizer_common @@ -114,22 +114,12 @@ AC_SUBST(link_libubsan) link_liblsan=$link_sanitizer_common AC_SUBST(link_liblsan) -# At least for glibc, clock_gettime is in librt. But don't pull that -# in if it still doesn't give us the function we want. This -# test is copied from libgomp. -if test $ac_cv_func_clock_gettime = no; then - AC_CHECK_LIB(rt, clock_gettime, - [link_libasan="-lrt $link_libasan" -link_libtsan="-lrt $link_libtsan" -# Other sanitizers do not override clock_* API -]) -fi - case "$host" in - *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ;; - *) MAC_INTERPOSE=false ;; + *-*-darwin*) MAC_INTERPOSE=true ; enable_static=no ; CXX_ABI_NEEDED=true ;; + *) MAC_INTERPOSE=false ; CXX_ABI_NEEDED=false ;; esac AM_CONDITIONAL(USING_MAC_INTERPOSE, $MAC_INTERPOSE) +AM_CONDITIONAL(USE_CXX_ABI_FLAG, $CXX_ABI_NEEDED) backtrace_supported=yes diff --git a/libsanitizer/configure.tgt b/libsanitizer/configure.tgt index ac0eb7a4460..1ba3718cdbe 100644 --- a/libsanitizer/configure.tgt +++ b/libsanitizer/configure.tgt @@ -35,6 +35,9 @@ case "${target}" in arm*-*-linux*) ;; aarch64*-*-linux*) + if test x$ac_cv_sizeof_void_p = x8; then + TSAN_SUPPORTED=yes + fi ;; x86_64-*-darwin[1]* | i?86-*-darwin[1]*) TSAN_SUPPORTED=no diff --git a/libsanitizer/include/sanitizer/asan_interface.h b/libsanitizer/include/sanitizer/asan_interface.h index 023fa29c60f..448a0bcba01 100644 --- a/libsanitizer/include/sanitizer/asan_interface.h +++ b/libsanitizer/include/sanitizer/asan_interface.h @@ -108,12 +108,7 @@ extern "C" { void __asan_report_error(void *pc, void *bp, void *sp, void *addr, int is_write, size_t access_size); - // Sets the exit code to use when reporting an error. - // Returns the old value. - int __asan_set_error_exit_code(int exit_code); - - // Sets the callback to be called right before death on error. - // Passing 0 will unset the callback. + // Deprecated. Call __sanitizer_set_death_callback instead. void __asan_set_death_callback(void (*callback)(void)); void __asan_set_error_report_callback(void (*callback)(const char*)); diff --git a/libsanitizer/include/sanitizer/common_interface_defs.h b/libsanitizer/include/sanitizer/common_interface_defs.h index 3aba519327e..6a97567fea6 100644 --- a/libsanitizer/include/sanitizer/common_interface_defs.h +++ b/libsanitizer/include/sanitizer/common_interface_defs.h @@ -60,15 +60,6 @@ extern "C" { void __sanitizer_unaligned_store32(void *p, uint32_t x); void __sanitizer_unaligned_store64(void *p, uint64_t x); - // Initialize coverage. - void __sanitizer_cov_init(); - // Record and dump coverage info. - void __sanitizer_cov_dump(); - // Open <name>.sancov.packed in the coverage directory and return the file - // descriptor. Returns -1 on failure, or if coverage dumping is disabled. - // This is intended for use by sandboxing code. - intptr_t __sanitizer_maybe_open_cov_file(const char *name); - // Annotate the current state of a contiguous container, such as // std::vector, std::string or similar. // A contiguous container is a container that keeps all of its elements @@ -115,6 +106,20 @@ extern "C" { // Print the stack trace leading to this call. Useful for debugging user code. void __sanitizer_print_stack_trace(); + // Sets the callback to be called right before death on error. + // Passing 0 will unset the callback. + void __sanitizer_set_death_callback(void (*callback)(void)); + + // Interceptor hooks. + // Whenever a libc function interceptor is called it checks if the + // corresponding weak hook is defined, and it so -- calls it. + // The primary use case is data-flow-guided fuzzing, where the fuzzer needs + // to know what is being passed to libc functions, e.g. memcmp. + // FIXME: implement more hooks. + void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1, + const void *s2, size_t n); + void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1, + const char *s2, size_t n); #ifdef __cplusplus } // extern "C" #endif diff --git a/libsanitizer/include/sanitizer/coverage_interface.h b/libsanitizer/include/sanitizer/coverage_interface.h new file mode 100644 index 00000000000..1b7e2a4cdd8 --- /dev/null +++ b/libsanitizer/include/sanitizer/coverage_interface.h @@ -0,0 +1,61 @@ +//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Public interface for sanitizer coverage. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_COVERAG_INTERFACE_H +#define SANITIZER_COVERAG_INTERFACE_H + +#include <sanitizer/common_interface_defs.h> + +#ifdef __cplusplus +extern "C" { +#endif + + // Initialize coverage. + void __sanitizer_cov_init(); + // Record and dump coverage info. + void __sanitizer_cov_dump(); + // Open <name>.sancov.packed in the coverage directory and return the file + // descriptor. Returns -1 on failure, or if coverage dumping is disabled. + // This is intended for use by sandboxing code. + intptr_t __sanitizer_maybe_open_cov_file(const char *name); + // Get the number of total unique covered entities (blocks, edges, calls). + // This can be useful for coverage-directed in-process fuzzers. + uintptr_t __sanitizer_get_total_unique_coverage(); + + // Reset the basic-block (edge) coverage to the initial state. + // Useful for in-process fuzzing to start collecting coverage from scratch. + // Experimental, will likely not work for multi-threaded process. + void __sanitizer_reset_coverage(); + // Set *data to the array of covered PCs and return the size of that array. + // Some of the entries in *data will be zero. + uintptr_t __sanitizer_get_coverage_guards(uintptr_t **data); + + // The coverage instrumentation may optionally provide imprecise counters. + // Rather than exposing the counter values to the user we instead map + // the counters to a bitset. + // Every counter is associated with 8 bits in the bitset. + // We define 8 value ranges: 1, 2, 3, 4-7, 8-15, 16-31, 32-127, 128+ + // The i-th bit is set to 1 if the counter value is in the i-th range. + // This counter-based coverage implementation is *not* thread-safe. + + // Returns the number of registered coverage counters. + uintptr_t __sanitizer_get_number_of_counters(); + // Updates the counter 'bitset', clears the counters and returns the number of + // new bits in 'bitset'. + // If 'bitset' is nullptr, only clears the counters. + // Otherwise 'bitset' should be at least + // __sanitizer_get_number_of_counters bytes long and 8-aligned. + uintptr_t + __sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset); +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // SANITIZER_COVERAG_INTERFACE_H diff --git a/libsanitizer/include/sanitizer/dfsan_interface.h b/libsanitizer/include/sanitizer/dfsan_interface.h index c1b160205a7..0cebccf945e 100644 --- a/libsanitizer/include/sanitizer/dfsan_interface.h +++ b/libsanitizer/include/sanitizer/dfsan_interface.h @@ -83,6 +83,24 @@ size_t dfsan_get_label_count(void); /// callback executes. Pass in NULL to remove any callback. void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback); +/// Writes the labels currently used by the program to the given file +/// descriptor. The lines of the output have the following format: +/// +/// <label> <parent label 1> <parent label 2> <label description if any> +void dfsan_dump_labels(int fd); + +/// Interceptor hooks. +/// Whenever a dfsan's custom function is called the corresponding +/// hook is called it non-zero. The hooks should be defined by the user. +/// The primary use case is taint-guided fuzzing, where the fuzzer +/// needs to see the parameters of the function and the labels. +/// FIXME: implement more hooks. +void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2, + size_t n, dfsan_label s1_label, + dfsan_label s2_label, dfsan_label n_label); +void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2, + size_t n, dfsan_label s1_label, + dfsan_label s2_label, dfsan_label n_label); #ifdef __cplusplus } // extern "C" diff --git a/libsanitizer/include/sanitizer/lsan_interface.h b/libsanitizer/include/sanitizer/lsan_interface.h index 95e79245ec0..bdbe39084f7 100644 --- a/libsanitizer/include/sanitizer/lsan_interface.h +++ b/libsanitizer/include/sanitizer/lsan_interface.h @@ -39,14 +39,25 @@ extern "C" { void __lsan_register_root_region(const void *p, size_t size); void __lsan_unregister_root_region(const void *p, size_t size); - // Calling this function makes LSan enter the leak checking phase immediately. - // Use this if normal end-of-process leak checking happens too late (e.g. if - // you have intentional memory leaks in your shutdown code). Calling this - // function overrides end-of-process leak checking; it must be called at - // most once per process. This function will terminate the process if there - // are memory leaks and the exit_code flag is non-zero. + // Check for leaks now. This function behaves identically to the default + // end-of-process leak check. In particular, it will terminate the process if + // leaks are found and the exitcode runtime flag is non-zero. + // Subsequent calls to this function will have no effect and end-of-process + // leak check will not run. Effectively, end-of-process leak check is moved to + // the time of first invocation of this function. + // By calling this function early during process shutdown, you can instruct + // LSan to ignore shutdown-only leaks which happen later on. void __lsan_do_leak_check(); + // Check for leaks now. Returns zero if no leaks have been found or if leak + // detection is disabled, non-zero otherwise. + // This function may be called repeatedly, e.g. to periodically check a + // long-running process. It prints a leak report if appropriate, but does not + // terminate the process. It does not affect the behavior of + // __lsan_do_leak_check() or the end-of-process leak check, and is not + // affected by them. + int __lsan_do_recoverable_leak_check(); + // The user may optionally provide this function to disallow leak checking // for the program it is linked into (if the return value is non-zero). This // function must be defined as returning a constant value; any behavior beyond diff --git a/libsanitizer/include/sanitizer/msan_interface.h b/libsanitizer/include/sanitizer/msan_interface.h index ea7ad1d7c9b..92793a19bde 100644 --- a/libsanitizer/include/sanitizer/msan_interface.h +++ b/libsanitizer/include/sanitizer/msan_interface.h @@ -23,6 +23,11 @@ extern "C" { /* Get raw origin for an address. */ uint32_t __msan_get_origin(const volatile void *a); + /* Test that this_id is a descendant of prev_id (or they are simply equal). + * "descendant" here means they are part of the same chain, created with + * __msan_chain_origin. */ + int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id); + /* Returns non-zero if tracking origins. */ int __msan_get_track_origins(); @@ -36,7 +41,9 @@ extern "C" { contents). */ void __msan_unpoison_string(const volatile char *a); - /* Make memory region fully uninitialized (without changing its contents). */ + /* Make memory region fully uninitialized (without changing its contents). + This is a legacy interface that does not update origin information. Use + __msan_allocated_memory() instead. */ void __msan_poison(const volatile void *a, size_t size); /* Make memory region partially uninitialized (without changing its contents). @@ -52,10 +59,6 @@ extern "C" { * is not. */ void __msan_check_mem_is_initialized(const volatile void *x, size_t size); - /* Set exit code when error(s) were detected. - Value of 0 means don't change the program exit code. */ - void __msan_set_exit_code(int exit_code); - /* For testing: __msan_set_expect_umr(1); ... some buggy code ... @@ -83,14 +86,22 @@ extern "C" { Memory will be marked uninitialized, with origin at the call site. */ void __msan_allocated_memory(const volatile void* data, size_t size); + /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */ + void __sanitizer_dtor_callback(const volatile void* data, size_t size); + /* This function may be optionally provided by user and should return a string containing Msan runtime options. See msan_flags.h for details. */ const char* __msan_default_options(); - /* Sets the callback to be called right before death on error. - Passing 0 will unset the callback. */ + /* Deprecated. Call __sanitizer_set_death_callback instead. */ void __msan_set_death_callback(void (*callback)(void)); + /* Update shadow for the application copy of size bytes from src to dst. + Src and dst are application addresses. This function does not copy the + actual application memory, it only updates shadow and origin for such + copy. Source and destination regions can overlap. */ + void __msan_copy_shadow(const volatile void *dst, const volatile void *src, + size_t size); #ifdef __cplusplus } // extern "C" #endif diff --git a/libsanitizer/interception/interception.h b/libsanitizer/interception/interception.h index 458928f14b1..f2d48c9332f 100644 --- a/libsanitizer/interception/interception.h +++ b/libsanitizer/interception/interception.h @@ -217,7 +217,6 @@ const interpose_substitution substitution_##func_name[] \ namespace __interception { \ FUNC_TYPE(func) PTR_TO_REAL(func); \ } \ - DECLARE_WRAPPER_WINAPI(ret_type, func, __VA_ARGS__) \ extern "C" \ INTERCEPTOR_ATTRIBUTE \ ret_type __stdcall WRAP(func)(__VA_ARGS__) diff --git a/libsanitizer/interception/interception_linux.h b/libsanitizer/interception/interception_linux.h index e2cc4c19dff..61bf48a7233 100644 --- a/libsanitizer/interception/interception_linux.h +++ b/libsanitizer/interception/interception_linux.h @@ -33,12 +33,12 @@ void *GetFuncAddrVer(const char *func_name, const char *ver); (::__interception::uptr) & WRAP(func)) #if !defined(__ANDROID__) // android does not have dlvsym -# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ - ::__interception::real_##func = (func##_f)(unsigned long) \ - ::__interception::GetFuncAddrVer(#func, symver) +#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ + (::__interception::real_##func = (func##_f)( \ + unsigned long)::__interception::GetFuncAddrVer(#func, symver)) #else -# define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ - INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) +#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \ + INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) #endif // !defined(__ANDROID__) #endif // INTERCEPTION_LINUX_H diff --git a/libsanitizer/interception/interception_win.cc b/libsanitizer/interception/interception_win.cc index f9c2e9b3ad8..24784c73ded 100644 --- a/libsanitizer/interception/interception_win.cc +++ b/libsanitizer/interception/interception_win.cc @@ -82,6 +82,7 @@ static size_t RoundUpToInstrBoundary(size_t size, char *code) { cursor += 2; continue; case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX + case '\xB8': // B8 XX YY ZZ WW = mov eax, WWZZYYXX cursor += 5; continue; } @@ -179,11 +180,15 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) { return true; } -static const void **InterestingDLLsAvailable() { - const char *InterestingDLLs[] = {"kernel32.dll", - "msvcr110.dll", // VS2012 - "msvcr120.dll", // VS2013 - NULL}; +static void **InterestingDLLsAvailable() { + const char *InterestingDLLs[] = { + "kernel32.dll", + "msvcr110.dll", // VS2012 + "msvcr120.dll", // VS2013 + // NTDLL should go last as it exports some functions that we should override + // in the CRT [presumably only used internally]. + "ntdll.dll", NULL + }; static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 }; if (!result[0]) { for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) { @@ -191,14 +196,65 @@ static const void **InterestingDLLsAvailable() { result[j++] = (void *)h; } } - return (const void **)&result[0]; + return &result[0]; +} + +namespace { +// Utility for reading loaded PE images. +template <typename T> class RVAPtr { + public: + RVAPtr(void *module, uptr rva) + : ptr_(reinterpret_cast<T *>(reinterpret_cast<char *>(module) + rva)) {} + operator T *() { return ptr_; } + T *operator->() { return ptr_; } + T *operator++() { return ++ptr_; } + + private: + T *ptr_; +}; +} // namespace + +// Internal implementation of GetProcAddress. At least since Windows 8, +// GetProcAddress appears to initialize DLLs before returning function pointers +// into them. This is problematic for the sanitizers, because they typically +// want to intercept malloc *before* MSVCRT initializes. Our internal +// implementation walks the export list manually without doing initialization. +uptr InternalGetProcAddress(void *module, const char *func_name) { + // Check that the module header is full and present. + RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0); + RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew); + if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ" + headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0" + headers->FileHeader.SizeOfOptionalHeader < + sizeof(IMAGE_OPTIONAL_HEADER)) { + return 0; + } + + IMAGE_DATA_DIRECTORY *export_directory = + &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT]; + RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module, + export_directory->VirtualAddress); + RVAPtr<DWORD> functions(module, exports->AddressOfFunctions); + RVAPtr<DWORD> names(module, exports->AddressOfNames); + RVAPtr<WORD> ordinals(module, exports->AddressOfNameOrdinals); + + for (DWORD i = 0; i < exports->NumberOfNames; i++) { + RVAPtr<char> name(module, names[i]); + if (!strcmp(func_name, name)) { + DWORD index = ordinals[i]; + RVAPtr<char> func(module, functions[index]); + return (uptr)(char *)func; + } + } + + return 0; } static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) { *func_addr = 0; - const void **DLLs = InterestingDLLsAvailable(); + void **DLLs = InterestingDLLsAvailable(); for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i) - *func_addr = (uptr)GetProcAddress((HMODULE)DLLs[i], func_name); + *func_addr = InternalGetProcAddress(DLLs[i], func_name); return (*func_addr != 0); } diff --git a/libsanitizer/interception/interception_win.h b/libsanitizer/interception/interception_win.h index e9c6200d1b2..6388209dcc1 100644 --- a/libsanitizer/interception/interception_win.h +++ b/libsanitizer/interception/interception_win.h @@ -28,6 +28,10 @@ bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0); // Overrides a function in a system DLL or DLL CRT by its exported name. bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0); + +// Windows-only replacement for GetProcAddress. Useful for some sanitizers. +uptr InternalGetProcAddress(void *module, const char *func_name); + } // namespace __interception #if defined(INTERCEPTION_DYNAMIC_CRT) diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc index 61792d9c9ea..6e7429c95a5 100644 --- a/libsanitizer/lsan/lsan.cc +++ b/libsanitizer/lsan/lsan.cc @@ -13,6 +13,7 @@ #include "lsan.h" #include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "lsan_allocator.h" #include "lsan_common.h" @@ -32,13 +33,44 @@ bool WordIsPoisoned(uptr addr) { using namespace __lsan; // NOLINT +static void InitializeFlags() { + // Set all the default values. + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH"); + cf.malloc_context_size = 30; + cf.detect_leaks = true; + cf.exitcode = 23; + OverrideCommonFlags(cf); + } + + Flags *f = flags(); + f->SetDefaults(); + + FlagParser parser; + RegisterLsanFlags(&parser, f); + RegisterCommonFlags(&parser); + + parser.ParseString(GetEnv("LSAN_OPTIONS")); + + SetVerbosity(common_flags()->verbosity); + + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) parser.PrintFlagDescriptions(); +} + extern "C" void __lsan_init() { CHECK(!lsan_init_is_running); if (lsan_inited) return; lsan_init_is_running = true; SanitizerToolName = "LeakSanitizer"; - InitCommonLsan(true); + CacheBinaryName(); + InitializeFlags(); + InitCommonLsan(); InitializeAllocator(); InitTlsSize(); InitializeInterceptors(); @@ -50,6 +82,9 @@ extern "C" void __lsan_init() { if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) Atexit(DoLeakCheck); + + InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + lsan_inited = true; lsan_init_is_running = false; } diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc index 2d406a0f852..486305bcc76 100644 --- a/libsanitizer/lsan/lsan_allocator.cc +++ b/libsanitizer/lsan/lsan_allocator.cc @@ -23,19 +23,29 @@ extern "C" void *memset(void *ptr, int value, uptr num); namespace __lsan { -static const uptr kMaxAllowedMallocSize = 8UL << 30; -static const uptr kAllocatorSpace = 0x600000000000ULL; -static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. - struct ChunkMetadata { - bool allocated : 8; // Must be first. + u8 allocated : 8; // Must be first. ChunkTag tag : 2; uptr requested_size : 54; u32 stack_trace_id; }; +#if defined(__mips64) +static const uptr kMaxAllowedMallocSize = 4UL << 30; +static const uptr kRegionSizeLog = 20; +static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; +typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; +typedef CompactSizeClassMap SizeClassMap; +typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, + sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap> + PrimaryAllocator; +#else +static const uptr kMaxAllowedMallocSize = 8UL << 30; +static const uptr kAllocatorSpace = 0x600000000000ULL; +static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator; +#endif typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef LargeMmapAllocator<> SecondaryAllocator; typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, @@ -45,7 +55,7 @@ static Allocator allocator; static THREADLOCAL AllocatorCache cache; void InitializeAllocator() { - allocator.Init(); + allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null); } void AllocatorThreadFinish() { @@ -79,7 +89,7 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment, size = 1; if (size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); - return 0; + return nullptr; } void *p = allocator.Allocate(&cache, size, alignment, false); // Do not rely on the allocator to clear the memory (it's slow). @@ -102,7 +112,7 @@ void *Reallocate(const StackTrace &stack, void *p, uptr new_size, if (new_size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); allocator.Deallocate(&cache, p); - return 0; + return nullptr; } p = allocator.Reallocate(&cache, p, new_size, alignment); RegisterAllocation(stack, p, new_size); @@ -200,7 +210,7 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p) { return kIgnoreObjectInvalid; } } -} // namespace __lsan +} // namespace __lsan using namespace __lsan; @@ -229,10 +239,10 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } SANITIZER_INTERFACE_ATTRIBUTE -int __sanitizer_get_ownership(const void *p) { return Metadata(p) != 0; } +int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_allocated_size(const void *p) { return GetMallocUsableSize(p); } -} // extern "C" +} // extern "C" diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc index aa79a7e41c6..7efbf600da2 100644 --- a/libsanitizer/lsan/lsan_common.cc +++ b/libsanitizer/lsan/lsan_common.cc @@ -14,11 +14,11 @@ #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" -#include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_report_decorator.h" @@ -34,52 +34,17 @@ bool DisabledInThisThread() { return disable_counter > 0; } Flags lsan_flags; -static void InitializeFlags(bool standalone) { - Flags *f = flags(); - // Default values. - f->report_objects = false; - f->resolution = 0; - f->max_leaks = 0; - f->exitcode = 23; - f->use_registers = true; - f->use_globals = true; - f->use_stacks = true; - f->use_tls = true; - f->use_root_regions = true; - f->use_unaligned = false; - f->use_poisoned = false; - f->log_pointers = false; - f->log_threads = false; - - const char *options = GetEnv("LSAN_OPTIONS"); - if (options) { - ParseFlag(options, &f->use_registers, "use_registers", ""); - ParseFlag(options, &f->use_globals, "use_globals", ""); - ParseFlag(options, &f->use_stacks, "use_stacks", ""); - ParseFlag(options, &f->use_tls, "use_tls", ""); - ParseFlag(options, &f->use_root_regions, "use_root_regions", ""); - ParseFlag(options, &f->use_unaligned, "use_unaligned", ""); - ParseFlag(options, &f->use_poisoned, "use_poisoned", ""); - ParseFlag(options, &f->report_objects, "report_objects", ""); - ParseFlag(options, &f->resolution, "resolution", ""); - CHECK_GE(&f->resolution, 0); - ParseFlag(options, &f->max_leaks, "max_leaks", ""); - CHECK_GE(&f->max_leaks, 0); - ParseFlag(options, &f->log_pointers, "log_pointers", ""); - ParseFlag(options, &f->log_threads, "log_threads", ""); - ParseFlag(options, &f->exitcode, "exitcode", ""); - } +void Flags::SetDefaults() { +#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "lsan_flags.inc" +#undef LSAN_FLAG +} - // Set defaults for common flags (only in standalone mode) and parse - // them from LSAN_OPTIONS. - CommonFlags *cf = common_flags(); - if (standalone) { - SetCommonFlagsDefaults(cf); - cf->external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH"); - cf->malloc_context_size = 30; - cf->detect_leaks = true; - } - ParseCommonFlagsFromString(cf, options); +void RegisterLsanFlags(FlagParser *parser, Flags *f) { +#define LSAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "lsan_flags.inc" +#undef LSAN_FLAG } #define LOG_POINTERS(...) \ @@ -92,14 +57,23 @@ static void InitializeFlags(bool standalone) { if (flags()->log_threads) Report(__VA_ARGS__); \ } while (0); -static bool suppressions_inited = false; +ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +static SuppressionContext *suppression_ctx = nullptr; +static const char kSuppressionLeak[] = "leak"; +static const char *kSuppressionTypes[] = { kSuppressionLeak }; void InitializeSuppressions() { - CHECK(!suppressions_inited); - SuppressionContext::InitIfNecessary(); + CHECK_EQ(nullptr, suppression_ctx); + suppression_ctx = new (suppression_placeholder) // NOLINT + SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); + suppression_ctx->ParseFromFile(flags()->suppressions); if (&__lsan_default_suppressions) - SuppressionContext::Get()->Parse(__lsan_default_suppressions()); - suppressions_inited = true; + suppression_ctx->Parse(__lsan_default_suppressions()); +} + +static SuppressionContext *GetSuppressionContext() { + CHECK(suppression_ctx); + return suppression_ctx; } struct RootRegion { @@ -115,8 +89,7 @@ void InitializeRootRegions() { root_regions = new(placeholder) InternalMmapVector<RootRegion>(1); } -void InitCommonLsan(bool standalone) { - InitializeFlags(standalone); +void InitCommonLsan() { InitializeRootRegions(); if (common_flags()->detect_leaks) { // Initialization which can fail or print warnings should only be done if @@ -139,9 +112,11 @@ static inline bool CanBeAHeapPointer(uptr p) { // bound on heap addresses. const uptr kMinAddress = 4 * 4096; if (p < kMinAddress) return false; -#ifdef __x86_64__ +#if defined(__x86_64__) // Accept only canonical form user-space addresses. return ((p >> 47) == 0); +#elif defined(__mips64) + return ((p >> 40) == 0); #else return true; #endif @@ -149,13 +124,14 @@ static inline bool CanBeAHeapPointer(uptr p) { // Scans the memory range, looking for byte patterns that point into allocator // chunks. Marks those chunks with |tag| and adds them to |frontier|. -// There are two usage modes for this function: finding reachable or ignored -// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks +// There are two usage modes for this function: finding reachable chunks +// (|tag| = kReachable) and finding indirectly leaked chunks // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, // so |frontier| = 0. void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag) { + CHECK(tag == kReachable || tag == kIndirectlyLeaked); const uptr alignment = flags()->pointer_alignment(); LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); uptr pp = begin; @@ -169,9 +145,7 @@ void ScanRangeForPointers(uptr begin, uptr end, // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. if (chunk == begin) continue; LsanMetadata m(chunk); - // Reachable beats ignored beats leaked. - if (m.tag() == kReachable) continue; - if (m.tag() == kIgnored && tag != kReachable) continue; + if (m.tag() == kReachable || m.tag() == kIgnored) continue; // Do this check relatively late so we can log only the interesting cases. if (!flags()->use_poisoned && WordIsPoisoned(pp)) { @@ -267,8 +241,8 @@ static void ProcessRootRegion(Frontier *frontier, uptr root_begin, MemoryMappingLayout proc_maps(/*cache_enabled*/true); uptr begin, end, prot; while (proc_maps.Next(&begin, &end, - /*offset*/ 0, /*filename*/ 0, /*filename_size*/ 0, - &prot)) { + /*offset*/ nullptr, /*filename*/ nullptr, + /*filename_size*/ 0, &prot)) { uptr intersection_begin = Max(root_begin, begin); uptr intersection_end = Min(end, root_end); if (intersection_begin >= intersection_end) continue; @@ -310,7 +284,7 @@ static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable) { ScanRangeForPointers(chunk, chunk + m.requested_size(), - /* frontier */ 0, "HEAP", kIndirectlyLeaked); + /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); } } @@ -320,8 +294,11 @@ static void CollectIgnoredCb(uptr chunk, void *arg) { CHECK(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); - if (m.allocated() && m.tag() == kIgnored) + if (m.allocated() && m.tag() == kIgnored) { + LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", + chunk, chunk + m.requested_size(), m.requested_size()); reinterpret_cast<Frontier *>(arg)->push_back(chunk); + } } // Sets the appropriate tag on each chunk. @@ -329,26 +306,33 @@ static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { // Holds the flood fill frontier. Frontier frontier(1); + ForEachChunk(CollectIgnoredCb, &frontier); ProcessGlobalRegions(&frontier); ProcessThreads(suspended_threads, &frontier); ProcessRootRegions(&frontier); FloodFillTag(&frontier, kReachable); + // The check here is relatively expensive, so we do this in a separate flood // fill. That way we can skip the check for chunks that are reachable // otherwise. LOG_POINTERS("Processing platform-specific allocations.\n"); + CHECK_EQ(0, frontier.size()); ProcessPlatformSpecificAllocations(&frontier); FloodFillTag(&frontier, kReachable); - LOG_POINTERS("Scanning ignored chunks.\n"); - CHECK_EQ(0, frontier.size()); - ForEachChunk(CollectIgnoredCb, &frontier); - FloodFillTag(&frontier, kIgnored); - // Iterate over leaked chunks and mark those that are reachable from other // leaked chunks. LOG_POINTERS("Scanning leaked chunks.\n"); - ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */); + ForEachChunk(MarkIndirectlyLeakedCb, nullptr); +} + +// ForEachChunk callback. Resets the tags to pre-leak-check state. +static void ResetTagsCb(uptr chunk, void *arg) { + (void)arg; + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() != kIgnored) + m.set_tag(kDirectlyLeaked); } static void PrintStackTraceById(u32 stack_trace_id) { @@ -365,7 +349,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) { LsanMetadata m(chunk); if (!m.allocated()) return; if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { - uptr resolution = flags()->resolution; + u32 resolution = flags()->resolution; u32 stack_trace_id = 0; if (resolution > 0) { StackTrace stack = StackDepotGet(m.stack_trace_id()); @@ -381,7 +365,7 @@ static void CollectLeaksCb(uptr chunk, void *arg) { static void PrintMatchedSuppressions() { InternalMmapVector<Suppression *> matched(1); - SuppressionContext::Get()->GetMatched(&matched); + GetSuppressionContext()->GetMatched(&matched); if (!matched.size()) return; const char *line = "-----------------------------------------------------"; @@ -389,40 +373,38 @@ static void PrintMatchedSuppressions() { Printf("Suppressions used:\n"); Printf(" count bytes template\n"); for (uptr i = 0; i < matched.size(); i++) - Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count), - matched[i]->weight, matched[i]->templ); + Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed( + &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); Printf("%s\n\n", line); } -struct DoLeakCheckParam { +struct CheckForLeaksParam { bool success; LeakReport leak_report; }; -static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, - void *arg) { - DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg); +static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, + void *arg) { + CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); CHECK(param); CHECK(!param->success); ClassifyAllChunks(suspended_threads); ForEachChunk(CollectLeaksCb, ¶m->leak_report); + // Clean up for subsequent leak checks. This assumes we did not overwrite any + // kIgnored tags. + ForEachChunk(ResetTagsCb, nullptr); param->success = true; } -void DoLeakCheck() { - EnsureMainThreadIDIsCorrect(); - BlockingMutexLock l(&global_mutex); - static bool already_done; - if (already_done) return; - already_done = true; +static bool CheckForLeaks() { if (&__lsan_is_turned_off && __lsan_is_turned_off()) - return; - - DoLeakCheckParam param; + return false; + EnsureMainThreadIDIsCorrect(); + CheckForLeaksParam param; param.success = false; LockThreadRegistry(); LockAllocator(); - StopTheWorld(DoLeakCheckCallback, ¶m); + DoStopTheWorld(CheckForLeaksCallback, ¶m); UnlockAllocator(); UnlockThreadRegistry(); @@ -446,39 +428,51 @@ void DoLeakCheck() { PrintMatchedSuppressions(); if (unsuppressed_count > 0) { param.leak_report.PrintSummary(); - if (flags()->exitcode) { - if (common_flags()->coverage) - __sanitizer_cov_dump(); - internal__exit(flags()->exitcode); - } + return true; } + return false; +} + +void DoLeakCheck() { + BlockingMutexLock l(&global_mutex); + static bool already_done; + if (already_done) return; + already_done = true; + bool have_leaks = CheckForLeaks(); + if (!have_leaks) { + return; + } + if (common_flags()->exitcode) { + Die(); + } +} + +static int DoRecoverableLeakCheck() { + BlockingMutexLock l(&global_mutex); + bool have_leaks = CheckForLeaks(); + return have_leaks ? 1 : 0; } static Suppression *GetSuppressionForAddr(uptr addr) { - Suppression *s; + Suppression *s = nullptr; // Suppress by module name. - const char *module_name; - uptr module_offset; - if (Symbolizer::GetOrInit() - ->GetModuleNameAndOffsetForPC(addr, &module_name, &module_offset) && - SuppressionContext::Get()->Match(module_name, SuppressionLeak, &s)) - return s; + SuppressionContext *suppressions = GetSuppressionContext(); + if (const char *module_name = + Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) + if (suppressions->Match(module_name, kSuppressionLeak, &s)) + return s; // Suppress by file or function name. - static const uptr kMaxAddrFrames = 16; - InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); - for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo(); - uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC( - addr, addr_frames.data(), kMaxAddrFrames); - for (uptr i = 0; i < addr_frames_num; i++) { - if (SuppressionContext::Get()->Match(addr_frames[i].function, - SuppressionLeak, &s) || - SuppressionContext::Get()->Match(addr_frames[i].file, SuppressionLeak, - &s)) - return s; + SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { + if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || + suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { + break; + } } - return 0; + frames->ClearAll(); + return s; } static Suppression *GetSuppressionForStack(u32 stack_trace_id) { @@ -488,7 +482,7 @@ static Suppression *GetSuppressionForStack(u32 stack_trace_id) { StackTrace::GetPreviousInstructionPc(stack.trace[i])); if (s) return s; } - return 0; + return nullptr; } ///// LeakReport implementation. ///// @@ -591,10 +585,9 @@ void LeakReport::PrintSummary() { bytes += leaks_[i].total_size; allocations += leaks_[i].hit_count; } - InternalScopedBuffer<char> summary(kMaxSummaryLength); - internal_snprintf(summary.data(), summary.size(), - "%zu byte(s) leaked in %zu allocation(s).", bytes, - allocations); + InternalScopedString summary(kMaxSummaryLength); + summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, + allocations); ReportErrorSummary(summary.data()); } @@ -603,7 +596,8 @@ void LeakReport::ApplySuppressions() { Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); if (s) { s->weight += leaks_[i].total_size; - s->hit_count += leaks_[i].hit_count; + atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + + leaks_[i].hit_count); leaks_[i].is_suppressed = true; } } @@ -616,8 +610,8 @@ uptr LeakReport::UnsuppressedLeakCount() { return result; } -} // namespace __lsan -#endif // CAN_SANITIZE_LEAKS +} // namespace __lsan +#endif // CAN_SANITIZE_LEAKS using namespace __lsan; // NOLINT @@ -638,7 +632,7 @@ void __lsan_ignore_object(const void *p) { "heap object at %p is already being ignored\n", p); if (res == kIgnoreObjectSuccess) VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE @@ -649,7 +643,7 @@ void __lsan_register_root_region(const void *begin, uptr size) { RootRegion region = {begin, size}; root_regions->push_back(region); VReport(1, "Registered root region at %p of size %llu\n", begin, size); -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE @@ -676,7 +670,7 @@ void __lsan_unregister_root_region(const void *begin, uptr size) { begin, size); Die(); } -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS } SANITIZER_INTERFACE_ATTRIBUTE @@ -702,7 +696,16 @@ void __lsan_do_leak_check() { #if CAN_SANITIZE_LEAKS if (common_flags()->detect_leaks) __lsan::DoLeakCheck(); -#endif // CAN_SANITIZE_LEAKS +#endif // CAN_SANITIZE_LEAKS +} + +SANITIZER_INTERFACE_ATTRIBUTE +int __lsan_do_recoverable_leak_check() { +#if CAN_SANITIZE_LEAKS + if (common_flags()->detect_leaks) + return __lsan::DoRecoverableLeakCheck(); +#endif // CAN_SANITIZE_LEAKS + return 0; } #if !SANITIZER_SUPPORTS_WEAK_HOOKS @@ -711,4 +714,4 @@ int __lsan_is_turned_off() { return 0; } #endif -} // extern "C" +} // extern "C" diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h index 72523d9ff5c..786a53b69f0 100644 --- a/libsanitizer/lsan/lsan_common.h +++ b/libsanitizer/lsan/lsan_common.h @@ -17,14 +17,20 @@ #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_platform.h" +#include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_symbolizer.h" -#if SANITIZER_LINUX && defined(__x86_64__) && (SANITIZER_WORDSIZE == 64) +#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips64)) \ + && (SANITIZER_WORDSIZE == 64) #define CAN_SANITIZE_LEAKS 1 #else #define CAN_SANITIZE_LEAKS 0 #endif +namespace __sanitizer { +class FlagParser; +} + namespace __lsan { // Chunk tags. @@ -36,44 +42,19 @@ enum ChunkTag { }; struct Flags { +#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "lsan_flags.inc" +#undef LSAN_FLAG + + void SetDefaults(); uptr pointer_alignment() const { return use_unaligned ? 1 : sizeof(uptr); } - - // Print addresses of leaked objects after main leak report. - bool report_objects; - // Aggregate two objects into one leak if this many stack frames match. If - // zero, the entire stack trace must match. - int resolution; - // The number of leaks reported. - int max_leaks; - // If nonzero kill the process with this exit code upon finding leaks. - int exitcode; - - // Flags controlling the root set of reachable memory. - // Global variables (.data and .bss). - bool use_globals; - // Thread stacks. - bool use_stacks; - // Thread registers. - bool use_registers; - // TLS and thread-specific storage. - bool use_tls; - // Regions added via __lsan_register_root_region(). - bool use_root_regions; - - // Consider unaligned pointers valid. - bool use_unaligned; - // Consider pointers found in poisoned memory to be valid. - bool use_poisoned; - - // Debug logging. - bool log_pointers; - bool log_threads; }; extern Flags lsan_flags; inline Flags *flags() { return &lsan_flags; } +void RegisterLsanFlags(FlagParser *parser, Flags *f); struct Leak { u32 id; @@ -117,6 +98,8 @@ typedef InternalMmapVector<uptr> Frontier; void InitializePlatformSpecificModules(); void ProcessGlobalRegions(Frontier *frontier); void ProcessPlatformSpecificAllocations(Frontier *frontier); +// Run stoptheworld while holding any platform-specific locks. +void DoStopTheWorld(StopTheWorldCallback callback, void* argument); void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, @@ -129,7 +112,7 @@ enum IgnoreObjectResult { }; // Functions called from the parent tool. -void InitCommonLsan(bool standalone); +void InitCommonLsan(); void DoLeakCheck(); bool DisabledInThisThread(); diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc index b17156ce6bb..0456dce890a 100644 --- a/libsanitizer/lsan/lsan_common_linux.cc +++ b/libsanitizer/lsan/lsan_common_linux.cc @@ -27,7 +27,7 @@ static const char kLinkerName[] = "ld"; // We request 2 modules matching "ld", so we can print a warning if there's more // than one match. But only the first one is actually used. static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64); -static LoadedModule *linker = 0; +static LoadedModule *linker = nullptr; static bool IsLinker(const char* full_name) { return LibraryNameIs(full_name, kLinkerName); @@ -47,7 +47,7 @@ void InitializePlatformSpecificModules() { else if (num_matches > 1) VReport(1, "LeakSanitizer: Multiple modules match \"%s\". " "TLS will not be handled correctly.\n", kLinkerName); - linker = 0; + linker = nullptr; } static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, @@ -83,10 +83,6 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size, // Scans global variables for heap pointers. void ProcessGlobalRegions(Frontier *frontier) { if (!flags()->use_globals) return; - // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of - // deadlocking by running this under StopTheWorld. However, the lock is - // reentrant, so we should be able to fix this by acquiring the lock before - // suspending threads. dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); } @@ -112,7 +108,7 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) { reinterpret_cast<ProcessPlatformAllocParam *>(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); - if (m.allocated() && m.tag() != kReachable) { + if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { u32 stack_id = m.stack_trace_id(); uptr caller_pc = 0; if (stack_id > 0) @@ -151,5 +147,31 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg); } -} // namespace __lsan -#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX +struct DoStopTheWorldParam { + StopTheWorldCallback callback; + void *argument; +}; + +static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size, + void *data) { + DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data); + StopTheWorld(param->callback, param->argument); + return 1; +} + +// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one +// of the threads is frozen while holding the libdl lock, the tracer will hang +// in dl_iterate_phdr() forever. +// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the +// tracer task and the thread that spawned it. Thus, if we run the tracer task +// while holding the libdl lock in the parent thread, we can safely reenter it +// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr() +// callback in the parent thread. +void DoStopTheWorld(StopTheWorldCallback callback, void *argument) { + DoStopTheWorldParam param = {callback, argument}; + dl_iterate_phdr(DoStopTheWorldCallback, ¶m); +} + +} // namespace __lsan + +#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX diff --git a/libsanitizer/lsan/lsan_flags.inc b/libsanitizer/lsan/lsan_flags.inc new file mode 100644 index 00000000000..73a980e1724 --- /dev/null +++ b/libsanitizer/lsan/lsan_flags.inc @@ -0,0 +1,41 @@ +//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// LSan runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef LSAN_FLAG +# error "Define LSAN_FLAG prior to including this file!" +#endif + +// LSAN_FLAG(Type, Name, DefaultValue, Description) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +LSAN_FLAG(bool, report_objects, false, + "Print addresses of leaked objects after main leak report.") +LSAN_FLAG( + int, resolution, 0, + "Aggregate two objects into one leak if this many stack frames match. If " + "zero, the entire stack trace must match.") +LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.") + +// Flags controlling the root set of reachable memory. +LSAN_FLAG(bool, use_globals, true, + "Root set: include global variables (.data and .bss)") +LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks") +LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers") +LSAN_FLAG(bool, use_tls, true, + "Root set: include TLS and thread-specific storage") +LSAN_FLAG(bool, use_root_regions, true, + "Root set: include regions added via __lsan_register_root_region().") + +LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.") +LSAN_FLAG(bool, use_poisoned, false, + "Consider pointers found in poisoned memory to be valid.") +LSAN_FLAG(bool, log_pointers, false, "Debug logging") +LSAN_FLAG(bool, log_threads, false, "Debug logging") +LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.") diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc index ac05dfaa54f..57581e855c4 100644 --- a/libsanitizer/lsan/lsan_interceptors.cc +++ b/libsanitizer/lsan/lsan_interceptors.cc @@ -10,11 +10,11 @@ // //===----------------------------------------------------------------------===// +#include "interception/interception.h" #include "sanitizer_common/sanitizer_allocator.h" #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_interception.h" #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" @@ -69,7 +69,7 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { CHECK(allocated < kCallocPoolSize); return mem; } - if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return 0; + if (CallocShouldReturnNullDueToOverflow(size, nmemb)) return nullptr; ENSURE_LSAN_INITED; GET_STACK_TRACE_MALLOC; size *= nmemb; @@ -162,9 +162,9 @@ void *operator new[](uptr size, std::nothrow_t const&) { OPERATOR_NEW_BODY; } Deallocate(ptr); INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr) throw() { OPERATOR_DELETE_BODY; } +void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr) throw() { OPERATOR_DELETE_BODY; } +void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } INTERCEPTOR_ATTRIBUTE @@ -206,16 +206,16 @@ extern "C" void *__lsan_thread_start_func(void *arg) { // Wait until the last iteration to maximize the chance that we are the last // destructor to run. if (pthread_setspecific(g_thread_finalize_key, - (void*)kPthreadDestructorIterations)) { + (void*)GetPthreadDestructorIterations())) { Report("LeakSanitizer: failed to set thread key.\n"); Die(); } int tid = 0; while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) internal_sched_yield(); - atomic_store(&p->tid, 0, memory_order_release); SetCurrentThread(tid); ThreadStart(tid, GetTid()); + atomic_store(&p->tid, 0, memory_order_release); return callback(param); } @@ -224,7 +224,7 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr, ENSURE_LSAN_INITED; EnsureMainThreadIDIsCorrect(); __sanitizer_pthread_attr_t myattr; - if (attr == 0) { + if (!attr) { pthread_attr_init(&myattr); attr = &myattr; } @@ -282,4 +282,4 @@ void InitializeInterceptors() { } } -} // namespace __lsan +} // namespace __lsan diff --git a/libsanitizer/lsan/lsan_thread.cc b/libsanitizer/lsan/lsan_thread.cc index 07f9d0ab439..1313ea2dce1 100644 --- a/libsanitizer/lsan/lsan_thread.cc +++ b/libsanitizer/lsan/lsan_thread.cc @@ -77,7 +77,7 @@ void ThreadContext::OnFinished() { u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) { return thread_registry->CreateThread(user_id, detached, parent_tid, - /* arg */ 0); + /* arg */ nullptr); } void ThreadStart(u32 tid, uptr os_id) { @@ -97,9 +97,9 @@ void ThreadFinish() { } ThreadContext *CurrentThreadContext() { - if (!thread_registry) return 0; + if (!thread_registry) return nullptr; if (GetCurrentThread() == kInvalidTid) - return 0; + return nullptr; // No lock needed when getting current thread. return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread()); } @@ -118,7 +118,7 @@ u32 ThreadTid(uptr uid) { void ThreadJoin(u32 tid) { CHECK_NE(tid, kInvalidTid); - thread_registry->JoinThread(tid, /* arg */0); + thread_registry->JoinThread(tid, /* arg */nullptr); } void EnsureMainThreadIDIsCorrect() { @@ -155,4 +155,4 @@ void UnlockThreadRegistry() { thread_registry->Unlock(); } -} // namespace __lsan +} // namespace __lsan diff --git a/libsanitizer/lsan/lsan_thread.h b/libsanitizer/lsan/lsan_thread.h index cd13fdb5c52..70c3ff92616 100644 --- a/libsanitizer/lsan/lsan_thread.h +++ b/libsanitizer/lsan/lsan_thread.h @@ -20,8 +20,8 @@ namespace __lsan { class ThreadContext : public ThreadContextBase { public: explicit ThreadContext(int tid); - void OnStarted(void *arg); - void OnFinished(); + void OnStarted(void *arg) override; + void OnFinished() override; uptr stack_begin() { return stack_begin_; } uptr stack_end() { return stack_end_; } uptr tls_begin() { return tls_begin_; } diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am index d541717866b..ee7a3f1dd2c 100644 --- a/libsanitizer/sanitizer_common/Makefile.am +++ b/libsanitizer/sanitizer_common/Makefile.am @@ -27,6 +27,7 @@ sanitizer_common_files = \ sanitizer_deadlock_detector1.cc \ sanitizer_deadlock_detector2.cc \ sanitizer_flags.cc \ + sanitizer_flag_parser.cc \ sanitizer_libc.cc \ sanitizer_libignore.cc \ sanitizer_linux.cc \ @@ -45,6 +46,7 @@ sanitizer_common_files = \ sanitizer_stackdepot.cc \ sanitizer_stacktrace.cc \ sanitizer_stacktrace_libcdep.cc \ + sanitizer_symbolizer_mac.cc \ sanitizer_stacktrace_printer.cc \ sanitizer_stoptheworld_linux_libcdep.cc \ sanitizer_suppressions.cc \ @@ -55,7 +57,7 @@ sanitizer_common_files = \ sanitizer_symbolizer_win.cc \ sanitizer_thread_registry.cc \ sanitizer_tls_get_addr.cc \ - sanitizer_unwind_posix_libcdep.cc \ + sanitizer_unwind_linux_libcdep.cc \ sanitizer_win.cc diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in index 661452448b2..4b008ad7ae6 100644 --- a/libsanitizer/sanitizer_common/Makefile.in +++ b/libsanitizer/sanitizer_common/Makefile.in @@ -85,7 +85,8 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \ sanitizer_coverage_mapping_libcdep.lo \ sanitizer_deadlock_detector1.lo \ sanitizer_deadlock_detector2.lo sanitizer_flags.lo \ - sanitizer_libc.lo sanitizer_libignore.lo sanitizer_linux.lo \ + sanitizer_flag_parser.lo sanitizer_libc.lo \ + sanitizer_libignore.lo sanitizer_linux.lo \ sanitizer_linux_libcdep.lo sanitizer_mac.lo \ sanitizer_persistent_allocator.lo \ sanitizer_platform_limits_linux.lo \ @@ -94,7 +95,7 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \ sanitizer_procmaps_common.lo sanitizer_procmaps_freebsd.lo \ sanitizer_procmaps_linux.lo sanitizer_procmaps_mac.lo \ sanitizer_stackdepot.lo sanitizer_stacktrace.lo \ - sanitizer_stacktrace_libcdep.lo \ + sanitizer_stacktrace_libcdep.lo sanitizer_symbolizer_mac.lo \ sanitizer_stacktrace_printer.lo \ sanitizer_stoptheworld_linux_libcdep.lo \ sanitizer_suppressions.lo sanitizer_symbolizer.lo \ @@ -102,7 +103,7 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \ sanitizer_symbolizer_libcdep.lo \ sanitizer_symbolizer_posix_libcdep.lo \ sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \ - sanitizer_tls_get_addr.lo sanitizer_unwind_posix_libcdep.lo \ + sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \ sanitizer_win.lo am_libsanitizer_common_la_OBJECTS = $(am__objects_1) libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS) @@ -290,6 +291,7 @@ sanitizer_common_files = \ sanitizer_deadlock_detector1.cc \ sanitizer_deadlock_detector2.cc \ sanitizer_flags.cc \ + sanitizer_flag_parser.cc \ sanitizer_libc.cc \ sanitizer_libignore.cc \ sanitizer_linux.cc \ @@ -308,6 +310,7 @@ sanitizer_common_files = \ sanitizer_stackdepot.cc \ sanitizer_stacktrace.cc \ sanitizer_stacktrace_libcdep.cc \ + sanitizer_symbolizer_mac.cc \ sanitizer_stacktrace_printer.cc \ sanitizer_stoptheworld_linux_libcdep.cc \ sanitizer_suppressions.cc \ @@ -318,7 +321,7 @@ sanitizer_common_files = \ sanitizer_symbolizer_win.cc \ sanitizer_thread_registry.cc \ sanitizer_tls_get_addr.cc \ - sanitizer_unwind_posix_libcdep.cc \ + sanitizer_unwind_linux_libcdep.cc \ sanitizer_win.cc libsanitizer_common_la_SOURCES = $(sanitizer_common_files) @@ -421,6 +424,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_coverage_mapping_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector1.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_deadlock_detector2.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flag_parser.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_flags.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libc.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@ @@ -446,11 +450,12 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libbacktrace.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_libcdep.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@ -@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_posix_libcdep.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@ .cc.o: diff --git a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h index 9e15d51c66c..3bc40ef64f0 100644 --- a/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h +++ b/libsanitizer/sanitizer_common/sanitizer_addrhashmap.h @@ -141,7 +141,7 @@ bool AddrHashMap<T, kSize>::Handle::created() const { template<typename T, uptr kSize> bool AddrHashMap<T, kSize>::Handle::exists() const { - return cell_ != 0; + return cell_ != nullptr; } template<typename T, uptr kSize> @@ -158,7 +158,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) { h->created_ = false; h->addidx_ = -1U; h->bucket_ = b; - h->cell_ = 0; + h->cell_ = nullptr; // If we want to remove the element, we need exclusive access to the bucket, // so skip the lock-free phase. @@ -248,7 +248,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) { } // Store in the add cells. - if (add == 0) { + if (!add) { // Allocate a new add array. const uptr kInitSize = 64; add = (AddBucket*)InternalAlloc(kInitSize); @@ -280,7 +280,7 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) { template<typename T, uptr kSize> void AddrHashMap<T, kSize>::release(Handle *h) { - if (h->cell_ == 0) + if (!h->cell_) return; Bucket *b = h->bucket_; Cell *c = h->cell_; diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_allocator.cc index f4e3af1b06a..b5e0bab91f1 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator.cc +++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cc @@ -9,10 +9,10 @@ // run-time libraries. // This allocator is used inside run-times. //===----------------------------------------------------------------------===// + #include "sanitizer_allocator.h" #include "sanitizer_allocator_internal.h" #include "sanitizer_common.h" -#include "sanitizer_flags.h" namespace __sanitizer { @@ -43,7 +43,7 @@ InternalAllocator *internal_allocator() { return 0; } -#else // SANITIZER_GO +#else // SANITIZER_GO static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)]; static atomic_uint8_t internal_allocator_initialized; @@ -59,7 +59,7 @@ InternalAllocator *internal_allocator() { SpinMutexLock l(&internal_alloc_init_mu); if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) == 0) { - internal_allocator_instance->Init(); + internal_allocator_instance->Init(/* may_return_null*/ false); atomic_store(&internal_allocator_initialized, 1, memory_order_release); } } @@ -76,29 +76,29 @@ static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) { } static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) { - if (cache == 0) { + if (!cache) { SpinMutexLock l(&internal_allocator_cache_mu); return internal_allocator()->Deallocate(&internal_allocator_cache, ptr); } internal_allocator()->Deallocate(cache, ptr); } -#endif // SANITIZER_GO +#endif // SANITIZER_GO const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull; void *InternalAlloc(uptr size, InternalAllocatorCache *cache) { if (size + sizeof(u64) < size) - return 0; + return nullptr; void *p = RawInternalAlloc(size + sizeof(u64), cache); - if (p == 0) - return 0; + if (!p) + return nullptr; ((u64*)p)[0] = kBlockMagic; return (char*)p + sizeof(u64); } void InternalFree(void *addr, InternalAllocatorCache *cache) { - if (addr == 0) + if (!addr) return; addr = (char*)addr - sizeof(u64); CHECK_EQ(kBlockMagic, ((u64*)addr)[0]); @@ -138,14 +138,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) { return (max / size) < n; } -void *AllocatorReturnNull() { - if (common_flags()->allocator_may_return_null) - return 0; +void NORETURN ReportAllocatorCannotReturnNull() { Report("%s's allocator is terminating the process instead of returning 0\n", SanitizerToolName); Report("If you don't like this behavior set allocator_may_return_null=1\n"); CHECK(0); - return 0; + Die(); } -} // namespace __sanitizer +} // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h index dd5539a2087..d98528c722c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h @@ -21,8 +21,8 @@ namespace __sanitizer { -// Depending on allocator_may_return_null either return 0 or crash. -void *AllocatorReturnNull(); +// Prints error message and kills the program. +void NORETURN ReportAllocatorCannotReturnNull(); // SizeClassMap maps allocation sizes into size classes and back. // Class 0 corresponds to size 0. @@ -209,6 +209,7 @@ class AllocatorStats { void Init() { internal_memset(this, 0, sizeof(*this)); } + void InitLinkerInitialized() {} void Add(AllocatorStat i, uptr v) { v += atomic_load(&stats_[i], memory_order_relaxed); @@ -238,11 +239,14 @@ class AllocatorStats { // Global stats, used for aggregation and querying. class AllocatorGlobalStats : public AllocatorStats { public: - void Init() { - internal_memset(this, 0, sizeof(*this)); + void InitLinkerInitialized() { next_ = this; prev_ = this; } + void Init() { + internal_memset(this, 0, sizeof(*this)); + InitLinkerInitialized(); + } void Register(AllocatorStats *s) { SpinMutexLock l(&mu_); @@ -317,7 +321,7 @@ class SizeClassAllocator64 { void Init() { CHECK_EQ(kSpaceBeg, - reinterpret_cast<uptr>(Mprotect(kSpaceBeg, kSpaceSize))); + reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize))); MapWithCallback(kSpaceEnd, AdditionalSize()); } @@ -341,7 +345,7 @@ class SizeClassAllocator64 { CHECK_LT(class_id, kNumClasses); RegionInfo *region = GetRegionInfo(class_id); Batch *b = region->free_list.Pop(); - if (b == 0) + if (!b) b = PopulateFreeList(stat, c, class_id, region); region->n_allocated += b->count; return b; @@ -365,16 +369,16 @@ class SizeClassAllocator64 { void *GetBlockBegin(const void *p) { uptr class_id = GetSizeClass(p); uptr size = SizeClassMap::Size(class_id); - if (!size) return 0; + if (!size) return nullptr; uptr chunk_idx = GetChunkIdx((uptr)p, size); uptr reg_beg = (uptr)p & ~(kRegionSize - 1); uptr beg = chunk_idx * size; uptr next_beg = beg + size; - if (class_id >= kNumClasses) return 0; + if (class_id >= kNumClasses) return nullptr; RegionInfo *region = GetRegionInfo(class_id); if (region->mapped_user >= next_beg) return reinterpret_cast<void*>(reg_beg + beg); - return 0; + return nullptr; } static uptr GetActuallyAllocatedSize(void *p) { @@ -603,6 +607,7 @@ class TwoLevelByteMap { internal_memset(map1_, 0, sizeof(map1_)); mu_.Init(); } + void TestOnlyUnmap() { for (uptr i = 0; i < kSize1; i++) { u8 *p = Get(i); @@ -816,6 +821,10 @@ class SizeClassAllocator32 { void PrintStats() { } + static uptr AdditionalSize() { + return 0; + } + typedef SizeClassMap SizeClassMapT; static const uptr kNumClasses = SizeClassMap::kNumClasses; @@ -862,9 +871,9 @@ class SizeClassAllocator32 { uptr reg = AllocateRegion(stat, class_id); uptr n_chunks = kRegionSize / (size + kMetadataSize); uptr max_count = SizeClassMap::MaxCached(class_id); - Batch *b = 0; + Batch *b = nullptr; for (uptr i = reg; i < reg + n_chunks * size; i += size) { - if (b == 0) { + if (!b) { if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id)) b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch))); else @@ -875,7 +884,7 @@ class SizeClassAllocator32 { if (b->count == max_count) { CHECK_GT(b->count, 0); sci->free_list.push_back(b); - b = 0; + b = nullptr; } } if (b) { @@ -1000,9 +1009,14 @@ struct SizeClassAllocatorLocalCache { template <class MapUnmapCallback = NoOpMapUnmapCallback> class LargeMmapAllocator { public: - void Init() { - internal_memset(this, 0, sizeof(*this)); + void InitLinkerInitialized(bool may_return_null) { page_size_ = GetPageSizeCached(); + atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); + } + + void Init(bool may_return_null) { + internal_memset(this, 0, sizeof(*this)); + InitLinkerInitialized(may_return_null); } void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) { @@ -1010,7 +1024,9 @@ class LargeMmapAllocator { uptr map_size = RoundUpMapSize(size); if (alignment > page_size_) map_size += alignment; - if (map_size < size) return AllocatorReturnNull(); // Overflow. + // Overflow. + if (map_size < size) + return ReturnNullOrDie(); uptr map_beg = reinterpret_cast<uptr>( MmapOrDie(map_size, "LargeMmapAllocator")); CHECK(IsAligned(map_beg, page_size_)); @@ -1046,6 +1062,16 @@ class LargeMmapAllocator { return reinterpret_cast<void*>(res); } + void *ReturnNullOrDie() { + if (atomic_load(&may_return_null_, memory_order_acquire)) + return nullptr; + ReportAllocatorCannotReturnNull(); + } + + void SetMayReturnNull(bool may_return_null) { + atomic_store(&may_return_null_, may_return_null, memory_order_release); + } + void Deallocate(AllocatorStats *stat, void *p) { Header *h = GetHeader(p); { @@ -1078,7 +1104,7 @@ class LargeMmapAllocator { } bool PointerIsMine(const void *p) { - return GetBlockBegin(p) != 0; + return GetBlockBegin(p) != nullptr; } uptr GetActuallyAllocatedSize(void *p) { @@ -1107,13 +1133,13 @@ class LargeMmapAllocator { nearest_chunk = ch; } if (!nearest_chunk) - return 0; + return nullptr; Header *h = reinterpret_cast<Header *>(nearest_chunk); CHECK_GE(nearest_chunk, h->map_beg); CHECK_LT(nearest_chunk, h->map_beg + h->map_size); CHECK_LE(nearest_chunk, p); if (h->map_beg + h->map_size <= p) - return 0; + return nullptr; return GetUser(h); } @@ -1123,7 +1149,7 @@ class LargeMmapAllocator { mutex_.CheckLocked(); uptr p = reinterpret_cast<uptr>(ptr); uptr n = n_chunks_; - if (!n) return 0; + if (!n) return nullptr; if (!chunks_sorted_) { // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate. SortArray(reinterpret_cast<uptr*>(chunks_), n); @@ -1135,7 +1161,7 @@ class LargeMmapAllocator { chunks_[n - 1]->map_size; } if (p < min_mmap_ || p >= max_mmap_) - return 0; + return nullptr; uptr beg = 0, end = n - 1; // This loop is a log(n) lower_bound. It does not check for the exact match // to avoid expensive cache-thrashing loads. @@ -1156,7 +1182,7 @@ class LargeMmapAllocator { Header *h = chunks_[beg]; if (h->map_beg + h->map_size <= p || p < h->map_beg) - return 0; + return nullptr; return GetUser(h); } @@ -1224,6 +1250,7 @@ class LargeMmapAllocator { struct Stats { uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64]; } stats; + atomic_uint8_t may_return_null_; SpinMutex mutex_; }; @@ -1237,19 +1264,32 @@ template <class PrimaryAllocator, class AllocatorCache, class SecondaryAllocator> // NOLINT class CombinedAllocator { public: - void Init() { + void InitCommon(bool may_return_null) { primary_.Init(); - secondary_.Init(); + atomic_store(&may_return_null_, may_return_null, memory_order_relaxed); + } + + void InitLinkerInitialized(bool may_return_null) { + secondary_.InitLinkerInitialized(may_return_null); + stats_.InitLinkerInitialized(); + InitCommon(may_return_null); + } + + void Init(bool may_return_null) { + secondary_.Init(may_return_null); stats_.Init(); + InitCommon(may_return_null); } void *Allocate(AllocatorCache *cache, uptr size, uptr alignment, - bool cleared = false) { + bool cleared = false, bool check_rss_limit = false) { // Returning 0 on malloc(0) may break a lot of code. if (size == 0) size = 1; if (size + alignment < size) - return AllocatorReturnNull(); + return ReturnNullOrDie(); + if (check_rss_limit && RssLimitIsExceeded()) + return ReturnNullOrDie(); if (alignment > 8) size = RoundUpTo(size, alignment); void *res; @@ -1265,6 +1305,30 @@ class CombinedAllocator { return res; } + bool MayReturnNull() const { + return atomic_load(&may_return_null_, memory_order_acquire); + } + + void *ReturnNullOrDie() { + if (MayReturnNull()) + return nullptr; + ReportAllocatorCannotReturnNull(); + } + + void SetMayReturnNull(bool may_return_null) { + secondary_.SetMayReturnNull(may_return_null); + atomic_store(&may_return_null_, may_return_null, memory_order_release); + } + + bool RssLimitIsExceeded() { + return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire); + } + + void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) { + atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded, + memory_order_release); + } + void Deallocate(AllocatorCache *cache, void *p) { if (!p) return; if (primary_.PointerIsMine(p)) @@ -1279,7 +1343,7 @@ class CombinedAllocator { return Allocate(cache, new_size, alignment); if (!new_size) { Deallocate(cache, p); - return 0; + return nullptr; } CHECK(PointerIsMine(p)); uptr old_size = GetActuallyAllocatedSize(p); @@ -1377,11 +1441,13 @@ class CombinedAllocator { PrimaryAllocator primary_; SecondaryAllocator secondary_; AllocatorGlobalStats stats_; + atomic_uint8_t may_return_null_; + atomic_uint8_t rss_limit_is_exceeded_; }; // Returns true if calloc(size, n) should return 0 due to overflow in size*n. bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n); -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_ALLOCATOR_H +#endif // SANITIZER_ALLOCATOR_H diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h index 5e8cc107706..99d8516d8cf 100644 --- a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h +++ b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h @@ -1,4 +1,4 @@ -//===-- sanitizer_allocator_internal.h -------------------------- C++ -----===// +//===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===// // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. @@ -43,10 +43,19 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator> typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache, LargeMmapAllocator<> > InternalAllocator; -void *InternalAlloc(uptr size, InternalAllocatorCache *cache = 0); -void InternalFree(void *p, InternalAllocatorCache *cache = 0); +void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr); +void InternalFree(void *p, InternalAllocatorCache *cache = nullptr); InternalAllocator *internal_allocator(); -} // namespace __sanitizer +enum InternalAllocEnum { + INTERNAL_ALLOC +}; -#endif // SANITIZER_ALLOCATOR_INTERNAL_H +} // namespace __sanitizer + +inline void *operator new(__sanitizer::operator_new_size_type size, + InternalAllocEnum) { + return InternalAlloc(size); +} + +#endif // SANITIZER_ALLOCATOR_INTERNAL_H diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic.h b/libsanitizer/sanitizer_common/sanitizer_atomic.h index 6387664f63b..4973b7d4e88 100644 --- a/libsanitizer/sanitizer_common/sanitizer_atomic.h +++ b/libsanitizer/sanitizer_common/sanitizer_atomic.h @@ -53,7 +53,7 @@ struct atomic_uintptr_t { } // namespace __sanitizer -#if defined(__GNUC__) +#if defined(__clang__) || defined(__GNUC__) # include "sanitizer_atomic_clang.h" #elif defined(_MSC_VER) # include "sanitizer_atomic_msvc.h" @@ -61,4 +61,20 @@ struct atomic_uintptr_t { # error "Unsupported compiler" #endif +namespace __sanitizer { + +// Clutter-reducing helpers. + +template<typename T> +INLINE typename T::Type atomic_load_relaxed(const volatile T *a) { + return atomic_load(a, memory_order_relaxed); +} + +template<typename T> +INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) { + atomic_store(a, v, memory_order_relaxed); +} + +} // namespace __sanitizer + #endif // SANITIZER_ATOMIC_H diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h index 1198dec3e97..4ac3b90769f 100644 --- a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h +++ b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h @@ -19,6 +19,15 @@ extern "C" void _mm_mfence(); #pragma intrinsic(_mm_mfence) extern "C" void _mm_pause(); #pragma intrinsic(_mm_pause) +extern "C" char _InterlockedExchange8( // NOLINT + char volatile *Addend, char Value); // NOLINT +#pragma intrinsic(_InterlockedExchange8) +extern "C" short _InterlockedExchange16( // NOLINT + short volatile *Addend, short Value); // NOLINT +#pragma intrinsic(_InterlockedExchange16) +extern "C" long _InterlockedExchange( // NOLINT + long volatile *Addend, long Value); // NOLINT +#pragma intrinsic(_InterlockedExchange) extern "C" long _InterlockedExchangeAdd( // NOLINT long volatile * Addend, long Value); // NOLINT #pragma intrinsic(_InterlockedExchangeAdd) @@ -143,28 +152,25 @@ INLINE u8 atomic_exchange(volatile atomic_uint8_t *a, u8 v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); - __asm { - mov eax, a - mov cl, v - xchg [eax], cl // NOLINT - mov v, cl - } - return v; + return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v); } INLINE u16 atomic_exchange(volatile atomic_uint16_t *a, u16 v, memory_order mo) { (void)mo; DCHECK(!((uptr)a % sizeof(*a))); - __asm { - mov eax, a - mov cx, v - xchg [eax], cx // NOLINT - mov v, cx - } - return v; + return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v); +} + +INLINE u32 atomic_exchange(volatile atomic_uint32_t *a, + u32 v, memory_order mo) { + (void)mo; + DCHECK(!((uptr)a % sizeof(*a))); + return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v); } +#ifndef _WIN64 + INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a, u8 *cmp, u8 xchgv, @@ -186,6 +192,8 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a, return false; } +#endif + INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a, uptr *cmp, uptr xchg, diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cc b/libsanitizer/sanitizer_common/sanitizer_common.cc index f06efffb325..a7b5efb6bb4 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common.cc +++ b/libsanitizer/sanitizer_common/sanitizer_common.cc @@ -10,13 +10,19 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common.h" +#include "sanitizer_allocator_internal.h" #include "sanitizer_flags.h" #include "sanitizer_libc.h" +#include "sanitizer_placement_new.h" +#include "sanitizer_stacktrace_printer.h" +#include "sanitizer_symbolizer.h" namespace __sanitizer { const char *SanitizerToolName = "SanitizerTool"; +atomic_uint32_t current_verbosity; + uptr GetPageSizeCached() { static uptr PageSize; if (!PageSize) @@ -24,19 +30,71 @@ uptr GetPageSizeCached() { return PageSize; } +StaticSpinMutex report_file_mu; +ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0}; -// By default, dump to stderr. If |log_to_file| is true and |report_fd_pid| -// isn't equal to the current PID, try to obtain file descriptor by opening -// file "report_path_prefix.<PID>". -fd_t report_fd = kStderrFd; +void RawWrite(const char *buffer) { + report_file.Write(buffer, internal_strlen(buffer)); +} -// Set via __sanitizer_set_report_path. -bool log_to_file = false; -char report_path_prefix[sizeof(report_path_prefix)]; +void ReportFile::ReopenIfNecessary() { + mu->CheckLocked(); + if (fd == kStdoutFd || fd == kStderrFd) return; + + uptr pid = internal_getpid(); + // If in tracer, use the parent's file. + if (pid == stoptheworld_tracer_pid) + pid = stoptheworld_tracer_ppid; + if (fd != kInvalidFd) { + // If the report file is already opened by the current process, + // do nothing. Otherwise the report file was opened by the parent + // process, close it now. + if (fd_pid == pid) + return; + else + CloseFile(fd); + } -// PID of process that opened |report_fd|. If a fork() occurs, the PID of the -// child thread will be different from |report_fd_pid|. -uptr report_fd_pid = 0; + const char *exe_name = GetProcessName(); + if (common_flags()->log_exe_name && exe_name) { + internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix, + exe_name, pid); + } else { + internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid); + } + fd = OpenFile(full_path, WrOnly); + if (fd == kInvalidFd) { + const char *ErrorMsgPrefix = "ERROR: Can't open file: "; + WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix)); + WriteToFile(kStderrFd, full_path, internal_strlen(full_path)); + Die(); + } + fd_pid = pid; +} + +void ReportFile::SetReportPath(const char *path) { + if (!path) + return; + uptr len = internal_strlen(path); + if (len > sizeof(path_prefix) - 100) { + Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n", + path[0], path[1], path[2], path[3], + path[4], path[5], path[6], path[7]); + Die(); + } + + SpinMutexLock l(mu); + if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd) + CloseFile(fd); + fd = kInvalidFd; + if (internal_strcmp(path, "stdout") == 0) { + fd = kStdoutFd; + } else if (internal_strcmp(path, "stderr") == 0) { + fd = kStderrFd; + } else { + internal_snprintf(path_prefix, kMaxPathLength, "%s", path); + } +} // PID of the tracer task in StopTheWorld. It shares the address space with the // main process, but has a different PID and thus requires special handling. @@ -45,20 +103,47 @@ uptr stoptheworld_tracer_pid = 0; // writing to the same log file. uptr stoptheworld_tracer_ppid = 0; -static DieCallbackType DieCallback; -void SetDieCallback(DieCallbackType callback) { - DieCallback = callback; +static const int kMaxNumOfInternalDieCallbacks = 5; +static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks]; + +bool AddDieCallback(DieCallbackType callback) { + for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) { + if (InternalDieCallbacks[i] == nullptr) { + InternalDieCallbacks[i] = callback; + return true; + } + } + return false; } -DieCallbackType GetDieCallback() { - return DieCallback; +bool RemoveDieCallback(DieCallbackType callback) { + for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) { + if (InternalDieCallbacks[i] == callback) { + internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1], + sizeof(InternalDieCallbacks[0]) * + (kMaxNumOfInternalDieCallbacks - i - 1)); + InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr; + return true; + } + } + return false; +} + +static DieCallbackType UserDieCallback; +void SetUserDieCallback(DieCallbackType callback) { + UserDieCallback = callback; } void NORETURN Die() { - if (DieCallback) { - DieCallback(); + if (UserDieCallback) + UserDieCallback(); + for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) { + if (InternalDieCallbacks[i]) + InternalDieCallbacks[i](); } - internal__exit(1); + if (common_flags()->abort_on_error) + Abort(); + internal__exit(common_flags()->exitcode); } static CheckFailedCallbackType CheckFailedCallback; @@ -76,37 +161,57 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond, Die(); } -uptr ReadFileToBuffer(const char *file_name, char **buff, - uptr *buff_size, uptr max_len) { +void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, + error_t err) { + static int recursion_count; + if (recursion_count) { + // The Report() and CHECK calls below may call mmap recursively and fail. + // If we went into recursion, just die. + RawWrite("ERROR: Failed to mmap\n"); + Die(); + } + recursion_count++; + Report("ERROR: %s failed to " + "allocate 0x%zx (%zd) bytes of %s (error code: %d)\n", + SanitizerToolName, size, size, mem_type, err); + DumpProcessMap(); + UNREACHABLE("unable to mmap"); +} + +bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, + uptr *read_len, uptr max_len, error_t *errno_p) { uptr PageSize = GetPageSizeCached(); uptr kMinFileLen = PageSize; - uptr read_len = 0; - *buff = 0; + *buff = nullptr; *buff_size = 0; + *read_len = 0; // The files we usually open are not seekable, so try different buffer sizes. for (uptr size = kMinFileLen; size <= max_len; size *= 2) { - uptr openrv = OpenFile(file_name, /*write*/ false); - if (internal_iserror(openrv)) return 0; - fd_t fd = openrv; + fd_t fd = OpenFile(file_name, RdOnly, errno_p); + if (fd == kInvalidFd) return false; UnmapOrDie(*buff, *buff_size); *buff = (char*)MmapOrDie(size, __func__); *buff_size = size; + *read_len = 0; // Read up to one page at a time. - read_len = 0; bool reached_eof = false; - while (read_len + PageSize <= size) { - uptr just_read = internal_read(fd, *buff + read_len, PageSize); + while (*read_len + PageSize <= size) { + uptr just_read; + if (!ReadFromFile(fd, *buff + *read_len, PageSize, &just_read, errno_p)) { + UnmapOrDie(*buff, *buff_size); + return false; + } if (just_read == 0) { reached_eof = true; break; } - read_len += just_read; + *read_len += just_read; } - internal_close(fd); + CloseFile(fd); if (reached_eof) // We've read the whole file. break; } - return read_len; + return true; } typedef bool UptrComparisonFunction(const uptr &a, const uptr &b); @@ -143,62 +248,77 @@ void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) { const char *StripPathPrefix(const char *filepath, const char *strip_path_prefix) { - if (filepath == 0) return 0; - if (strip_path_prefix == 0) return filepath; - const char *pos = internal_strstr(filepath, strip_path_prefix); - if (pos == 0) return filepath; - pos += internal_strlen(strip_path_prefix); - if (pos[0] == '.' && pos[1] == '/') - pos += 2; - return pos; + if (!filepath) return nullptr; + if (!strip_path_prefix) return filepath; + const char *res = filepath; + if (const char *pos = internal_strstr(filepath, strip_path_prefix)) + res = pos + internal_strlen(strip_path_prefix); + if (res[0] == '.' && res[1] == '/') + res += 2; + return res; } const char *StripModuleName(const char *module) { - if (module == 0) - return 0; - if (const char *slash_pos = internal_strrchr(module, '/')) + if (!module) + return nullptr; + if (SANITIZER_WINDOWS) { + // On Windows, both slash and backslash are possible. + // Pick the one that goes last. + if (const char *bslash_pos = internal_strrchr(module, '\\')) + return StripModuleName(bslash_pos + 1); + } + if (const char *slash_pos = internal_strrchr(module, '/')) { return slash_pos + 1; + } return module; } void ReportErrorSummary(const char *error_message) { if (!common_flags()->print_summary) return; - InternalScopedBuffer<char> buff(kMaxSummaryLength); - internal_snprintf(buff.data(), buff.size(), - "SUMMARY: %s: %s", SanitizerToolName, error_message); + InternalScopedString buff(kMaxSummaryLength); + buff.append("SUMMARY: %s: %s", SanitizerToolName, error_message); __sanitizer_report_error_summary(buff.data()); } -void ReportErrorSummary(const char *error_type, const char *file, - int line, const char *function) { +#ifndef SANITIZER_GO +void ReportErrorSummary(const char *error_type, const AddressInfo &info) { if (!common_flags()->print_summary) return; - InternalScopedBuffer<char> buff(kMaxSummaryLength); - internal_snprintf( - buff.data(), buff.size(), "%s %s:%d %s", error_type, - file ? StripPathPrefix(file, common_flags()->strip_path_prefix) : "??", - line, function ? function : "??"); + InternalScopedString buff(kMaxSummaryLength); + buff.append("%s ", error_type); + RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style, + common_flags()->strip_path_prefix); ReportErrorSummary(buff.data()); } +#endif -LoadedModule::LoadedModule(const char *module_name, uptr base_address) { +void LoadedModule::set(const char *module_name, uptr base_address) { + clear(); full_name_ = internal_strdup(module_name); base_address_ = base_address; - n_ranges_ = 0; +} + +void LoadedModule::clear() { + InternalFree(full_name_); + full_name_ = nullptr; + while (!ranges_.empty()) { + AddressRange *r = ranges_.front(); + ranges_.pop_front(); + InternalFree(r); + } } void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) { - CHECK_LT(n_ranges_, kMaxNumberOfAddressRanges); - ranges_[n_ranges_].beg = beg; - ranges_[n_ranges_].end = end; - exec_[n_ranges_] = executable; - n_ranges_++; + void *mem = InternalAlloc(sizeof(AddressRange)); + AddressRange *r = new(mem) AddressRange(beg, end, executable); + ranges_.push_back(r); } bool LoadedModule::containsAddress(uptr address) const { - for (uptr i = 0; i < n_ranges_; i++) { - if (ranges_[i].beg <= address && address < ranges_[i].end) + for (Iterator iter = ranges(); iter.hasNext();) { + const AddressRange *r = iter.next(); + if (r->beg <= address && address < r->end) return true; } return false; @@ -210,12 +330,9 @@ void IncreaseTotalMmap(uptr size) { if (!common_flags()->mmap_limit_mb) return; uptr total_mmaped = atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size; - if ((total_mmaped >> 20) > common_flags()->mmap_limit_mb) { - // Since for now mmap_limit_mb is not a user-facing flag, just CHECK. - uptr mmap_limit_mb = common_flags()->mmap_limit_mb; - common_flags()->mmap_limit_mb = 0; // Allow mmap in CHECK. - RAW_CHECK(total_mmaped >> 20 < mmap_limit_mb); - } + // Since for now mmap_limit_mb is not a user-facing flag, just kill + // a program. Use RAW_CHECK to avoid extra mmaps in reporting. + RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb); } void DecreaseTotalMmap(uptr size) { @@ -223,39 +340,130 @@ void DecreaseTotalMmap(uptr size) { atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed); } -} // namespace __sanitizer +bool TemplateMatch(const char *templ, const char *str) { + if ((!str) || str[0] == 0) + return false; + bool start = false; + if (templ && templ[0] == '^') { + start = true; + templ++; + } + bool asterisk = false; + while (templ && templ[0]) { + if (templ[0] == '*') { + templ++; + start = false; + asterisk = true; + continue; + } + if (templ[0] == '$') + return str[0] == 0 || asterisk; + if (str[0] == 0) + return false; + char *tpos = (char*)internal_strchr(templ, '*'); + char *tpos1 = (char*)internal_strchr(templ, '$'); + if ((!tpos) || (tpos1 && tpos1 < tpos)) + tpos = tpos1; + if (tpos) + tpos[0] = 0; + const char *str0 = str; + const char *spos = internal_strstr(str, templ); + str = spos + internal_strlen(templ); + templ = tpos; + if (tpos) + tpos[0] = tpos == tpos1 ? '$' : '*'; + if (!spos) + return false; + if (start && spos != str0) + return false; + start = false; + asterisk = false; + } + return true; +} -using namespace __sanitizer; // NOLINT +static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':'; -extern "C" { -void __sanitizer_set_report_path(const char *path) { +char *FindPathToBinary(const char *name) { + const char *path = GetEnv("PATH"); if (!path) - return; - uptr len = internal_strlen(path); - if (len > sizeof(report_path_prefix) - 100) { - Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n", - path[0], path[1], path[2], path[3], - path[4], path[5], path[6], path[7]); - Die(); + return nullptr; + uptr name_len = internal_strlen(name); + InternalScopedBuffer<char> buffer(kMaxPathLength); + const char *beg = path; + while (true) { + const char *end = internal_strchrnul(beg, kPathSeparator); + uptr prefix_len = end - beg; + if (prefix_len + name_len + 2 <= kMaxPathLength) { + internal_memcpy(buffer.data(), beg, prefix_len); + buffer[prefix_len] = '/'; + internal_memcpy(&buffer[prefix_len + 1], name, name_len); + buffer[prefix_len + 1 + name_len] = '\0'; + if (FileExists(buffer.data())) + return internal_strdup(buffer.data()); + } + if (*end == '\0') break; + beg = end + 1; } - if (report_fd != kStdoutFd && - report_fd != kStderrFd && - report_fd != kInvalidFd) - internal_close(report_fd); - report_fd = kInvalidFd; - log_to_file = false; - if (internal_strcmp(path, "stdout") == 0) { - report_fd = kStdoutFd; - } else if (internal_strcmp(path, "stderr") == 0) { - report_fd = kStderrFd; - } else { - internal_strncpy(report_path_prefix, path, sizeof(report_path_prefix)); - report_path_prefix[len] = '\0'; - log_to_file = true; + return nullptr; +} + +static char binary_name_cache_str[kMaxPathLength]; +static char process_name_cache_str[kMaxPathLength]; + +const char *GetProcessName() { + return process_name_cache_str; +} + +static uptr ReadProcessName(/*out*/ char *buf, uptr buf_len) { + ReadLongProcessName(buf, buf_len); + char *s = const_cast<char *>(StripModuleName(buf)); + uptr len = internal_strlen(s); + if (s != buf) { + internal_memmove(buf, s, len); + buf[len] = '\0'; } + return len; +} + +void UpdateProcessName() { + ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str)); +} + +// Call once to make sure that binary_name_cache_str is initialized +void CacheBinaryName() { + if (binary_name_cache_str[0] != '\0') + return; + ReadBinaryName(binary_name_cache_str, sizeof(binary_name_cache_str)); + ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str)); +} + +uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) { + CacheBinaryName(); + uptr name_len = internal_strlen(binary_name_cache_str); + name_len = (name_len < buf_len - 1) ? name_len : buf_len - 1; + if (buf_len == 0) + return 0; + internal_memcpy(buf, binary_name_cache_str, name_len); + buf[name_len] = '\0'; + return name_len; +} + +} // namespace __sanitizer + +using namespace __sanitizer; // NOLINT + +extern "C" { +void __sanitizer_set_report_path(const char *path) { + report_file.SetReportPath(path); } void __sanitizer_report_error_summary(const char *error_summary) { Printf("%s\n", error_summary); } -} // extern "C" + +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_set_death_callback(void (*callback)(void)) { + SetUserDieCallback(callback); +} +} // extern "C" diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h index a8924913872..637e229742d 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common.h +++ b/libsanitizer/sanitizer_common/sanitizer_common.h @@ -5,8 +5,8 @@ // //===----------------------------------------------------------------------===// // -// This file is shared between AddressSanitizer and ThreadSanitizer -// run-time libraries. +// This file is shared between run-time libraries of sanitizers. +// // It declares common functions and classes that are used in both runtimes. // Implementation of some functions are provided in sanitizer_common, while // others must be defined by run-time library itself. @@ -14,13 +14,21 @@ #ifndef SANITIZER_COMMON_H #define SANITIZER_COMMON_H +#include "sanitizer_flags.h" +#include "sanitizer_interface_internal.h" #include "sanitizer_internal_defs.h" #include "sanitizer_libc.h" +#include "sanitizer_list.h" #include "sanitizer_mutex.h" -#include "sanitizer_flags.h" + +#ifdef _MSC_VER +extern "C" void _ReadWriteBarrier(); +#pragma intrinsic(_ReadWriteBarrier) +#endif namespace __sanitizer { struct StackTrace; +struct AddressInfo; // Constants. const uptr kWordSize = SANITIZER_WORDSIZE / 8; @@ -32,12 +40,27 @@ const uptr kWordSizeInBits = 8 * kWordSize; const uptr kCacheLineSize = 64; #endif -const uptr kMaxPathLength = 512; +const uptr kMaxPathLength = 4096; + +// 16K loaded modules should be enough for everyone. +static const uptr kMaxNumberOfModules = 1 << 14; const uptr kMaxThreadStackSize = 1 << 30; // 1Gb +// Denotes fake PC values that come from JIT/JAVA/etc. +// For such PC values __tsan_symbolize_external() will be called. +const u64 kExternalPCBit = 1ULL << 60; + extern const char *SanitizerToolName; // Can be changed by the tool. +extern atomic_uint32_t current_verbosity; +INLINE void SetVerbosity(int verbosity) { + atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); +} +INLINE int Verbosity() { + return atomic_load(¤t_verbosity, memory_order_relaxed); +} + uptr GetPageSize(); uptr GetPageSizeCached(); uptr GetMmapGranularity(); @@ -53,17 +76,27 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, // Memory management void *MmapOrDie(uptr size, const char *mem_type); void UnmapOrDie(void *addr, uptr size); -void *MmapFixedNoReserve(uptr fixed_addr, uptr size); +void *MmapFixedNoReserve(uptr fixed_addr, uptr size, + const char *name = nullptr); void *MmapNoReserveOrDie(uptr size, const char *mem_type); void *MmapFixedOrDie(uptr fixed_addr, uptr size); -void *Mprotect(uptr fixed_addr, uptr size); +void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); // Map aligned chunk of address space; size and alignment are powers of two. void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type); +// Disallow access to a memory range. Use MmapNoAccess to allocate an +// unaccessible memory. +bool MprotectNoAccess(uptr addr, uptr size); + // Used to check if we can map shadow memory to a fixed location. bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); void FlushUnneededShadowMemory(uptr addr, uptr size); void IncreaseTotalMmap(uptr size); void DecreaseTotalMmap(uptr size); +uptr GetRSS(); +void NoHugePagesInRegion(uptr addr, uptr length); +void DontDumpShadowMemory(uptr addr, uptr length); +// Check if the built VMA size matches the runtime one. +void CheckVMASize(); // InternalScopedBuffer can be used instead of large stack arrays to // keep frame size low. @@ -126,44 +159,93 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback); // IO void RawWrite(const char *buffer); -bool PrintsToTty(); -// Caching version of PrintsToTty(). Not thread-safe. -bool PrintsToTtyCached(); bool ColorizeReports(); void Printf(const char *format, ...); void Report(const char *format, ...); void SetPrintfAndReportCallback(void (*callback)(const char *)); #define VReport(level, ...) \ do { \ - if ((uptr)common_flags()->verbosity >= (level)) Report(__VA_ARGS__); \ + if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \ } while (0) #define VPrintf(level, ...) \ do { \ - if ((uptr)common_flags()->verbosity >= (level)) Printf(__VA_ARGS__); \ + if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \ } while (0) // Can be used to prevent mixing error reports from different sanitizers. extern StaticSpinMutex CommonSanitizerReportMutex; -void MaybeOpenReportFile(); -extern fd_t report_fd; -extern bool log_to_file; -extern char report_path_prefix[4096]; -extern uptr report_fd_pid; + +struct ReportFile { + void Write(const char *buffer, uptr length); + bool SupportsColors(); + void SetReportPath(const char *path); + + // Don't use fields directly. They are only declared public to allow + // aggregate initialization. + + // Protects fields below. + StaticSpinMutex *mu; + // Opened file descriptor. Defaults to stderr. It may be equal to + // kInvalidFd, in which case new file will be opened when necessary. + fd_t fd; + // Path prefix of report file, set via __sanitizer_set_report_path. + char path_prefix[kMaxPathLength]; + // Full path to report, obtained as <path_prefix>.PID + char full_path[kMaxPathLength]; + // PID of the process that opened fd. If a fork() occurs, + // the PID of child will be different from fd_pid. + uptr fd_pid; + + private: + void ReopenIfNecessary(); +}; +extern ReportFile report_file; + extern uptr stoptheworld_tracer_pid; extern uptr stoptheworld_tracer_ppid; -uptr OpenFile(const char *filename, bool write); +enum FileAccessMode { + RdOnly, + WrOnly, + RdWr +}; + +// Returns kInvalidFd on error. +fd_t OpenFile(const char *filename, FileAccessMode mode, + error_t *errno_p = nullptr); +void CloseFile(fd_t); + +// Return true on success, false on error. +bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, + uptr *bytes_read = nullptr, error_t *error_p = nullptr); +bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, + uptr *bytes_written = nullptr, error_t *error_p = nullptr); + +bool RenameFile(const char *oldpath, const char *newpath, + error_t *error_p = nullptr); + +// Scoped file handle closer. +struct FileCloser { + explicit FileCloser(fd_t fd) : fd(fd) {} + ~FileCloser() { CloseFile(fd); } + fd_t fd; +}; + +bool SupportsColoredOutput(fd_t fd); + // Opens the file 'file_name" and reads up to 'max_len' bytes. // The resulting buffer is mmaped and stored in '*buff'. -// The size of the mmaped region is stored in '*buff_size', -// Returns the number of read bytes or 0 if file can not be opened. -uptr ReadFileToBuffer(const char *file_name, char **buff, - uptr *buff_size, uptr max_len); +// The size of the mmaped region is stored in '*buff_size'. +// The total number of read bytes is stored in '*read_len'. +// Returns true if file was successfully opened and read. +bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, + uptr *read_len, uptr max_len = 1 << 26, + error_t *errno_p = nullptr); // Maps given file to virtual memory, and returns pointer to it -// (or NULL if the mapping failes). Stores the size of mmaped region +// (or NULL if mapping fails). Stores the size of mmaped region // in '*buff_size'. void *MapFileToMemory(const char *file_name, uptr *buff_size); -void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset); +void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset); bool IsAccessibleMemoryRange(uptr beg, uptr size); @@ -174,6 +256,12 @@ const char *StripPathPrefix(const char *filepath, const char *StripModuleName(const char *module); // OS +uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); +uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len); +uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len); +const char *GetProcessName(); +void UpdateProcessName(); +void CacheBinaryName(); void DisableCoreDumperIfNecessary(); void DumpProcessMap(); bool FileExists(const char *filename); @@ -181,6 +269,9 @@ const char *GetEnv(const char *name); bool SetEnv(const char *name, const char *value); const char *GetPwd(); char *FindPathToBinary(const char *name); +bool IsPathSeparator(const char c); +bool IsAbsolutePath(const char *path); + u32 GetUid(); void ReExec(); bool StackSizeIsUnlimited(); @@ -192,10 +283,13 @@ void PrepareForSandboxing(__sanitizer_sandbox_arguments *args); void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args); void SetSandboxingCallback(void (*f)()); -void CovUpdateMapping(uptr caller_pc = 0); +void CoverageUpdateMapping(); void CovBeforeFork(); void CovAfterFork(int child_pid); +void InitializeCoverage(bool enabled, const char *coverage_dir); +void ReInitializeCoverage(bool enabled, const char *coverage_dir); + void InitTlsSize(); uptr GetTlsSize(); @@ -205,12 +299,15 @@ void SleepForMillis(int millis); u64 NanoTime(); int Atexit(void (*function)(void)); void SortArray(uptr *array, uptr size); +bool TemplateMatch(const char *templ, const char *str); // Exit void NORETURN Abort(); void NORETURN Die(); void NORETURN CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); +void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, + error_t err); // Set the name of the current thread to 'name', return true on succees. // The name may be truncated to a system-dependent limit. @@ -222,12 +319,26 @@ bool SanitizerGetThreadName(char *name, int max_len); // Specific tools may override behavior of "Die" and "CheckFailed" functions // to do tool-specific job. typedef void (*DieCallbackType)(void); -void SetDieCallback(DieCallbackType); -DieCallbackType GetDieCallback(); + +// It's possible to add several callbacks that would be run when "Die" is +// called. The callbacks will be run in the opposite order. The tools are +// strongly recommended to setup all callbacks during initialization, when there +// is only a single thread. +bool AddDieCallback(DieCallbackType callback); +bool RemoveDieCallback(DieCallbackType callback); + +void SetUserDieCallback(DieCallbackType callback); + typedef void (*CheckFailedCallbackType)(const char *, int, const char *, u64, u64); void SetCheckFailedCallback(CheckFailedCallbackType callback); +// Callback will be called if soft_rss_limit_mb is given and the limit is +// exceeded (exceeded==true) or if rss went down below the limit +// (exceeded==false). +// The callback should be registered once at the tool init time. +void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); + // Functions related to signal handling. typedef void (*SignalHandlerType)(int, void *, void *); bool IsDeadlySignal(int signum); @@ -243,9 +354,9 @@ const int kMaxSummaryLength = 1024; // and pass it to __sanitizer_report_error_summary. void ReportErrorSummary(const char *error_message); // Same as above, but construct error_message as: -// error_type file:line function -void ReportErrorSummary(const char *error_type, const char *file, - int line, const char *function); +// error_type file:line[:column][ function] +void ReportErrorSummary(const char *error_type, const AddressInfo &info); +// Same as above, but obtains AddressInfo by symbolizing top stack trace frame. void ReportErrorSummary(const char *error_type, StackTrace *trace); // Math @@ -264,7 +375,11 @@ INLINE uptr MostSignificantSetBitIndex(uptr x) { CHECK_NE(x, 0U); unsigned long up; // NOLINT #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) +# ifdef _WIN64 + up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x); +# else up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); +# endif #elif defined(_WIN64) _BitScanReverse64(&up, x); #else @@ -277,7 +392,11 @@ INLINE uptr LeastSignificantSetBitIndex(uptr x) { CHECK_NE(x, 0U); unsigned long up; // NOLINT #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) +# ifdef _WIN64 + up = __builtin_ctzll(x); +# else up = __builtin_ctzl(x); +# endif #elif defined(_WIN64) _BitScanForward64(&up, x); #else @@ -297,7 +416,7 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) { uptr up = MostSignificantSetBitIndex(size); CHECK(size < (1ULL << (up + 1))); CHECK(size > (1ULL << up)); - return 1UL << (up + 1); + return 1ULL << (up + 1); } INLINE uptr RoundUpTo(uptr size, uptr boundary) { @@ -315,17 +434,7 @@ INLINE bool IsAligned(uptr a, uptr alignment) { INLINE uptr Log2(uptr x) { CHECK(IsPowerOfTwo(x)); -#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) - return __builtin_ctzl(x); -#elif defined(_WIN64) - unsigned long ret; // NOLINT - _BitScanForward64(&ret, x); - return ret; -#else - unsigned long ret; // NOLINT - _BitScanForward(&ret, x); - return ret; -#endif + return LeastSignificantSetBitIndex(x); } // Don't use std::min, std::max or std::swap, to minimize dependency @@ -354,14 +463,14 @@ INLINE int ToLower(int c) { // small vectors. // WARNING: The current implementation supports only POD types. template<typename T> -class InternalMmapVector { +class InternalMmapVectorNoCtor { public: - explicit InternalMmapVector(uptr initial_capacity) { + void Initialize(uptr initial_capacity) { capacity_ = Max(initial_capacity, (uptr)1); size_ = 0; - data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVector"); + data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor"); } - ~InternalMmapVector() { + void Destroy() { UnmapOrDie(data_, capacity_ * sizeof(T)); } T &operator[](uptr i) { @@ -394,11 +503,15 @@ class InternalMmapVector { const T *data() const { return data_; } + T *data() { + return data_; + } uptr capacity() const { return capacity_; } void clear() { size_ = 0; } + bool empty() const { return size() == 0; } private: void Resize(uptr new_capacity) { @@ -412,15 +525,24 @@ class InternalMmapVector { UnmapOrDie(old_data, capacity_ * sizeof(T)); capacity_ = new_capacity; } - // Disallow evil constructors. - InternalMmapVector(const InternalMmapVector&); - void operator=(const InternalMmapVector&); T *data_; uptr capacity_; uptr size_; }; +template<typename T> +class InternalMmapVector : public InternalMmapVectorNoCtor<T> { + public: + explicit InternalMmapVector(uptr initial_capacity) { + InternalMmapVectorNoCtor<T>::Initialize(initial_capacity); + } + ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); } + // Disallow evil constructors. + InternalMmapVector(const InternalMmapVector&); + void operator=(const InternalMmapVector&); +}; + // HeapSort for arrays and InternalMmapVector. template<class Container, class Compare> void InternalSort(Container *v, uptr size, Compare comp) { @@ -478,29 +600,32 @@ uptr InternalBinarySearch(const Container &v, uptr first, uptr last, // executable or a shared object). class LoadedModule { public: - LoadedModule(const char *module_name, uptr base_address); + LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); } + void set(const char *module_name, uptr base_address); + void clear(); void addAddressRange(uptr beg, uptr end, bool executable); bool containsAddress(uptr address) const; const char *full_name() const { return full_name_; } uptr base_address() const { return base_address_; } - uptr n_ranges() const { return n_ranges_; } - uptr address_range_start(int i) const { return ranges_[i].beg; } - uptr address_range_end(int i) const { return ranges_[i].end; } - bool address_range_executable(int i) const { return exec_[i]; } - - private: struct AddressRange { + AddressRange *next; uptr beg; uptr end; + bool executable; + + AddressRange(uptr beg, uptr end, bool executable) + : next(nullptr), beg(beg), end(end), executable(executable) {} }; - char *full_name_; + + typedef IntrusiveList<AddressRange>::ConstIterator Iterator; + Iterator ranges() const { return Iterator(&ranges_); } + + private: + char *full_name_; // Owned. uptr base_address_; - static const uptr kMaxNumberOfAddressRanges = 6; - AddressRange ranges_[kMaxNumberOfAddressRanges]; - bool exec_[kMaxNumberOfAddressRanges]; - uptr n_ranges_; + IntrusiveList<AddressRange> ranges_; }; // OS-dependent function that fills array with descriptions of at most @@ -511,45 +636,80 @@ typedef bool (*string_predicate_t)(const char *); uptr GetListOfModules(LoadedModule *modules, uptr max_modules, string_predicate_t filter); -#if SANITIZER_POSIX -const uptr kPthreadDestructorIterations = 4; -#else -// Unused on Windows. -const uptr kPthreadDestructorIterations = 0; -#endif - // Callback type for iterating over a set of memory ranges. typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); -#if (SANITIZER_FREEBSD || SANITIZER_LINUX) && !defined(SANITIZER_GO) -extern uptr indirect_call_wrapper; -void SetIndirectCallWrapper(uptr wrapper); +enum AndroidApiLevel { + ANDROID_NOT_ANDROID = 0, + ANDROID_KITKAT = 19, + ANDROID_LOLLIPOP_MR1 = 22, + ANDROID_POST_LOLLIPOP = 23 +}; -template <typename F> -F IndirectExternCall(F f) { - typedef F (*WrapF)(F); - return indirect_call_wrapper ? ((WrapF)indirect_call_wrapper)(f) : f; -} +#if SANITIZER_LINUX +// Initialize Android logging. Any writes before this are silently lost. +void AndroidLogInit(); +void WriteToSyslog(const char *buffer); #else -INLINE void SetIndirectCallWrapper(uptr wrapper) {} -template <typename F> -F IndirectExternCall(F f) { - return f; -} +INLINE void AndroidLogInit() {} +INLINE void WriteToSyslog(const char *buffer) {} #endif #if SANITIZER_ANDROID -// Initialize Android logging. Any writes before this are silently lost. -void AndroidLogInit(); -void AndroidLogWrite(const char *buffer); void GetExtraActivationFlags(char *buf, uptr size); void SanitizerInitializeUnwinder(); +AndroidApiLevel AndroidGetApiLevel(); #else -INLINE void AndroidLogInit() {} INLINE void AndroidLogWrite(const char *buffer_unused) {} INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; } INLINE void SanitizerInitializeUnwinder() {} +INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } +#endif + +INLINE uptr GetPthreadDestructorIterations() { +#if SANITIZER_ANDROID + return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; +#elif SANITIZER_POSIX + return 4; +#else +// Unused on Windows. + return 0; +#endif +} + +void *internal_start_thread(void(*func)(void*), void *arg); +void internal_join_thread(void *th); +void MaybeStartBackgroudThread(); + +// Make the compiler think that something is going on there. +// Use this inside a loop that looks like memset/memcpy/etc to prevent the +// compiler from recognising it and turning it into an actual call to +// memset/memcpy/etc. +static inline void SanitizerBreakOptimization(void *arg) { +#if _MSC_VER && !defined(__clang__) + _ReadWriteBarrier(); +#else + __asm__ __volatile__("" : : "r" (arg) : "memory"); #endif +} + +struct SignalContext { + void *context; + uptr addr; + uptr pc; + uptr sp; + uptr bp; + + SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) : + context(context), addr(addr), pc(pc), sp(sp), bp(bp) { + } + + // Creates signal context in a platform-specific manner. + static SignalContext Create(void *siginfo, void *context); +}; + +void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp); + } // namespace __sanitizer inline void *operator new(__sanitizer::operator_new_size_type size, diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc index 10f321838e8..9b8c77e6901 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc @@ -15,10 +15,12 @@ // COMMON_INTERCEPTOR_READ_RANGE // COMMON_INTERCEPTOR_WRITE_RANGE // COMMON_INTERCEPTOR_INITIALIZE_RANGE +// COMMON_INTERCEPTOR_DIR_ACQUIRE // COMMON_INTERCEPTOR_FD_ACQUIRE // COMMON_INTERCEPTOR_FD_RELEASE // COMMON_INTERCEPTOR_FD_ACCESS // COMMON_INTERCEPTOR_SET_THREAD_NAME +// COMMON_INTERCEPTOR_ON_DLOPEN // COMMON_INTERCEPTOR_ON_EXIT // COMMON_INTERCEPTOR_MUTEX_LOCK // COMMON_INTERCEPTOR_MUTEX_UNLOCK @@ -27,6 +29,7 @@ // COMMON_INTERCEPTOR_HANDLE_RECVMSG // COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED //===----------------------------------------------------------------------===// + #include "interception/interception.h" #include "sanitizer_addrhashmap.h" #include "sanitizer_placement_new.h" @@ -35,12 +38,31 @@ #include <stdarg.h> +#if SANITIZER_INTERCEPTOR_HOOKS +#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \ + do { \ + if (f) \ + f(__VA_ARGS__); \ + } while (false); +#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \ + extern "C" { \ + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__); \ + } // extern "C" +#else +#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) +#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) + +#endif // SANITIZER_INTERCEPTOR_HOOKS + #if SANITIZER_WINDOWS && !defined(va_copy) #define va_copy(dst, src) ((dst) = (src)) #endif // _WIN32 #if SANITIZER_FREEBSD #define pthread_setname_np pthread_set_name_np +#define inet_aton __inet_aton +#define inet_pton __inet_pton +#define iconv __bsd_iconv #endif #ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE @@ -80,7 +102,7 @@ #endif #ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED -#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, map) {} +#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {} #endif #ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED @@ -96,6 +118,29 @@ #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0) #endif +#define COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n) \ + COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \ + common_flags()->strict_string_checks ? (len) + 1 : (n) ) + +#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \ + COMMON_INTERCEPTOR_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n)) + +#ifndef COMMON_INTERCEPTOR_ON_DLOPEN +#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) {} +#endif + +#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE +#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) *begin = *end = 0; +#endif + +#ifndef COMMON_INTERCEPTOR_ACQUIRE +#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) {} +#endif + +#ifndef COMMON_INTERCEPTOR_RELEASE +#define COMMON_INTERCEPTOR_RELEASE(ctx, u) {} +#endif + struct FileMetadata { // For open_memstream(). char **addr; @@ -149,7 +194,8 @@ UNUSED static void DeleteInterceptorMetadata(void *addr) { INTERCEPTOR(char*, textdomain, const char *domainname) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname); - char* domain = REAL(textdomain)(domainname); + COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0); + char *domain = REAL(textdomain)(domainname); if (domain) { COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1); } @@ -165,9 +211,14 @@ static inline int CharCmpX(unsigned char c1, unsigned char c2) { return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1; } +DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc, + const char *s1, const char *s2) + INTERCEPTOR(int, strcmp, const char *s1, const char *s2) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strcmp, s1, s2); + CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1, + s2); unsigned char c1, c2; uptr i; for (i = 0;; i++) { @@ -175,16 +226,21 @@ INTERCEPTOR(int, strcmp, const char *s1, const char *s2) { c2 = (unsigned char)s2[i]; if (c1 != c2 || c1 == '\0') break; } - COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, i + 1); - COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, i + 1); + COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1); + COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1); return CharCmpX(c1, c2); } +DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, uptr called_pc, + const char *s1, const char *s2, uptr n) + INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) { if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) return internal_strncmp(s1, s2, size); void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size); + CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1, + s2, size); unsigned char c1 = 0, c2 = 0; uptr i; for (i = 0; i < size; i++) { @@ -221,8 +277,8 @@ INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) { c2 = (unsigned char)s2[i]; if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break; } - COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, i + 1); - COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, i + 1); + COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1); + COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1); return CharCaseCmp(c1, c2); } @@ -248,12 +304,145 @@ INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) { #define INIT_STRNCASECMP #endif +#if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR +static inline void StrstrCheck(void *ctx, char *r, const char *s1, + const char *s2) { + uptr len1 = REAL(strlen)(s1); + uptr len2 = REAL(strlen)(s2); + COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s1, len1, + r ? r - s1 + len2 : len1 + 1); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1); +} +#endif + +#if SANITIZER_INTERCEPT_STRSTR +INTERCEPTOR(char*, strstr, const char *s1, const char *s2) { + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return internal_strstr(s1, s2); + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strstr, s1, s2); + char *r = REAL(strstr)(s1, s2); + if (common_flags()->intercept_strstr) + StrstrCheck(ctx, r, s1, s2); + return r; +} + +#define INIT_STRSTR COMMON_INTERCEPT_FUNCTION(strstr); +#else +#define INIT_STRSTR +#endif + +#if SANITIZER_INTERCEPT_STRCASESTR +INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strcasestr, s1, s2); + char *r = REAL(strcasestr)(s1, s2); + if (common_flags()->intercept_strstr) + StrstrCheck(ctx, r, s1, s2); + return r; +} + +#define INIT_STRCASESTR COMMON_INTERCEPT_FUNCTION(strcasestr); +#else +#define INIT_STRCASESTR +#endif + +#if SANITIZER_INTERCEPT_STRSPN +INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2); + SIZE_T r = REAL(strspn)(s1, s2); + if (common_flags()->intercept_strspn) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1); + COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1); + } + return r; +} + +INTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2); + SIZE_T r = REAL(strcspn)(s1, s2); + if (common_flags()->intercept_strspn) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1); + COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1); + } + return r; +} + +#define INIT_STRSPN \ + COMMON_INTERCEPT_FUNCTION(strspn); \ + COMMON_INTERCEPT_FUNCTION(strcspn); +#else +#define INIT_STRSPN +#endif + +#if SANITIZER_INTERCEPT_STRPBRK +INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2); + char *r = REAL(strpbrk)(s1, s2); + if (common_flags()->intercept_strpbrk) { + COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1); + COMMON_INTERCEPTOR_READ_STRING(ctx, s1, + r ? r - s1 + 1 : REAL(strlen)(s1) + 1); + } + return r; +} + +#define INIT_STRPBRK COMMON_INTERCEPT_FUNCTION(strpbrk); +#else +#define INIT_STRPBRK +#endif + +#if SANITIZER_INTERCEPT_MEMCMP + +DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc, + const void *s1, const void *s2, uptr n) + +INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, memcmp, a1, a2, size); + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return internal_memcmp(a1, a2, size); + CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1, + a2, size); + if (common_flags()->intercept_memcmp) { + if (common_flags()->strict_memcmp) { + // Check the entire regions even if the first bytes of the buffers are + // different. + COMMON_INTERCEPTOR_READ_RANGE(ctx, a1, size); + COMMON_INTERCEPTOR_READ_RANGE(ctx, a2, size); + // Fallthrough to REAL(memcmp) below. + } else { + unsigned char c1 = 0, c2 = 0; + const unsigned char *s1 = (const unsigned char*)a1; + const unsigned char *s2 = (const unsigned char*)a2; + uptr i; + for (i = 0; i < size; i++) { + c1 = s1[i]; + c2 = s2[i]; + if (c1 != c2) break; + } + COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size)); + COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size)); + return CharCmpX(c1, c2); + } + } + return REAL(memcmp(a1, a2, size)); +} + +#define INIT_MEMCMP COMMON_INTERCEPT_FUNCTION(memcmp) +#else +#define INIT_MEMCMP +#endif + #if SANITIZER_INTERCEPT_MEMCHR INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n); void *res = REAL(memchr)(s, c, n); - uptr len = res ? (char*)res - (char*)s + 1 : n; + uptr len = res ? (char *)res - (const char *)s + 1 : n; COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len); return res; } @@ -717,12 +906,12 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) { // its metadata. See // https://code.google.com/p/address-sanitizer/issues/detail?id=321. char *res = REAL(strptime)(s, format, tm); - if (res) { - COMMON_INTERCEPTOR_READ_RANGE(ctx, s, res - s); + COMMON_INTERCEPTOR_READ_STRING(ctx, s, res ? res - s : 0); + if (res && tm) { // Do not call unpoison_tm here, because strptime does not, in fact, // initialize the entire struct tm. For example, tm_zone pointer is left // uninitialized. - if (tm) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm)); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm)); } return res; } @@ -913,6 +1102,16 @@ INTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format, va_list ap) VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap) +#if SANITIZER_INTERCEPT_PRINTF_L +INTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc, + const char *format, va_list ap) +VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap) + +INTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc, + const char *format, ...) +FORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format) +#endif // SANITIZER_INTERCEPT_PRINTF_L + INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap) VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap) @@ -989,6 +1188,14 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size, #define INIT_PRINTF #endif +#if SANITIZER_INTERCEPT_PRINTF_L +#define INIT_PRINTF_L \ + COMMON_INTERCEPT_FUNCTION(snprintf_l); \ + COMMON_INTERCEPT_FUNCTION(vsnprintf_l); +#else +#define INIT_PRINTF_L +#endif + #if SANITIZER_INTERCEPT_ISOC99_PRINTF #define INIT_ISOC99_PRINTF \ COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \ @@ -1005,8 +1212,18 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size, #if SANITIZER_INTERCEPT_IOCTL #include "sanitizer_common_interceptors_ioctl.inc" -INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) { - void *ctx; +INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) { + // We need a frame pointer, because we call into ioctl_common_[pre|post] which + // can trigger a report and we need to be able to unwind through this + // function. On Mac in debug mode we might not have a frame pointer, because + // ioctl_common_[pre|post] doesn't get inlined here. + ENABLE_FRAME_POINTER; + + void *ctx; + va_list ap; + va_start(ap, request); + void *arg = va_arg(ap, void *); + va_end(ap); COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg); CHECK(ioctl_initialized); @@ -1015,6 +1232,10 @@ INTERCEPTOR(int, ioctl, int d, unsigned request, void *arg) { // This effectively disables ioctl handling in TSan. if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg); + // Although request is unsigned long, the rest of the interceptor uses it + // as just "unsigned" to save space, because we know that all values fit in + // "unsigned" - they are compile-time constants. + const ioctl_desc *desc = ioctl_lookup(request); ioctl_desc decoded_desc; if (!desc) { @@ -1097,14 +1318,14 @@ INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) { COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name); COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); __sanitizer_passwd *res = REAL(getpwnam)(name); - if (res != 0) unpoison_passwd(ctx, res); + if (res) unpoison_passwd(ctx, res); return res; } INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid); __sanitizer_passwd *res = REAL(getpwuid)(uid); - if (res != 0) unpoison_passwd(ctx, res); + if (res) unpoison_passwd(ctx, res); return res; } INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) { @@ -1112,14 +1333,14 @@ INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) { COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name); COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1); __sanitizer_group *res = REAL(getgrnam)(name); - if (res != 0) unpoison_group(ctx, res); + if (res) unpoison_group(ctx, res); return res; } INTERCEPTOR(__sanitizer_group *, getgrgid, u32 gid) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid); __sanitizer_group *res = REAL(getgrgid)(gid); - if (res != 0) unpoison_group(ctx, res); + if (res) unpoison_group(ctx, res); return res; } #define INIT_GETPWNAM_AND_FRIENDS \ @@ -1208,14 +1429,14 @@ INTERCEPTOR(__sanitizer_passwd *, getpwent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getpwent, dummy); __sanitizer_passwd *res = REAL(getpwent)(dummy); - if (res != 0) unpoison_passwd(ctx, res); + if (res) unpoison_passwd(ctx, res); return res; } INTERCEPTOR(__sanitizer_group *, getgrent, int dummy) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, getgrent, dummy); __sanitizer_group *res = REAL(getgrent)(dummy); - if (res != 0) unpoison_group(ctx, res);; + if (res) unpoison_group(ctx, res);; return res; } #define INIT_GETPWENT \ @@ -1230,14 +1451,14 @@ INTERCEPTOR(__sanitizer_passwd *, fgetpwent, void *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent, fp); __sanitizer_passwd *res = REAL(fgetpwent)(fp); - if (res != 0) unpoison_passwd(ctx, res); + if (res) unpoison_passwd(ctx, res); return res; } INTERCEPTOR(__sanitizer_group *, fgetgrent, void *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent, fp); __sanitizer_group *res = REAL(fgetgrent)(fp); - if (res != 0) unpoison_group(ctx, res); + if (res) unpoison_group(ctx, res); return res; } #define INIT_FGETPWENT \ @@ -1440,30 +1661,30 @@ static THREADLOCAL __sanitizer_glob_t *pglob_copy; static void wrapped_gl_closedir(void *dir) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); - IndirectExternCall(pglob_copy->gl_closedir)(dir); + pglob_copy->gl_closedir(dir); } static void *wrapped_gl_readdir(void *dir) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); - return IndirectExternCall(pglob_copy->gl_readdir)(dir); + return pglob_copy->gl_readdir(dir); } static void *wrapped_gl_opendir(const char *s) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1); - return IndirectExternCall(pglob_copy->gl_opendir)(s); + return pglob_copy->gl_opendir(s); } static int wrapped_gl_lstat(const char *s, void *st) { COMMON_INTERCEPTOR_UNPOISON_PARAM(2); COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1); - return IndirectExternCall(pglob_copy->gl_lstat)(s, st); + return pglob_copy->gl_lstat(s, st); } static int wrapped_gl_stat(const char *s, void *st) { COMMON_INTERCEPTOR_UNPOISON_PARAM(2); COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1); - return IndirectExternCall(pglob_copy->gl_stat)(s, st); + return pglob_copy->gl_stat(s, st); } INTERCEPTOR(int, glob, const char *pattern, int flags, @@ -1471,6 +1692,7 @@ INTERCEPTOR(int, glob, const char *pattern, int flags, __sanitizer_glob_t *pglob) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob); + COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0); __sanitizer_glob_t glob_copy = { 0, 0, 0, 0, wrapped_gl_closedir, wrapped_gl_readdir, @@ -1501,6 +1723,7 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags, __sanitizer_glob_t *pglob) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob); + COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0); __sanitizer_glob_t glob_copy = { 0, 0, 0, 0, wrapped_gl_closedir, wrapped_gl_readdir, @@ -1647,6 +1870,7 @@ INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) { INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst); + COMMON_INTERCEPTOR_READ_STRING(ctx, src, 0); // FIXME: figure out read size based on the address family. // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See @@ -2137,6 +2361,16 @@ INTERCEPTOR(int, sysinfo, void *info) { #endif #if SANITIZER_INTERCEPT_READDIR +INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, opendir, path); + COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); + __sanitizer_dirent *res = REAL(opendir)(path); + if (res) + COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path); + return res; +} + INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp); @@ -2165,6 +2399,7 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry, } #define INIT_READDIR \ + COMMON_INTERCEPT_FUNCTION(opendir); \ COMMON_INTERCEPT_FUNCTION(readdir); \ COMMON_INTERCEPT_FUNCTION(readdir_r); #else @@ -2306,6 +2541,37 @@ INTERCEPTOR(char *, get_current_dir_name, int fake) { #define INIT_GET_CURRENT_DIR_NAME #endif +UNUSED static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) { + CHECK(endptr); + if (nptr == *endptr) { + // No digits were found at strtol call, we need to find out the last + // symbol accessed by strtoll on our own. + // We get this symbol by skipping leading blanks and optional +/- sign. + while (IsSpace(*nptr)) nptr++; + if (*nptr == '+' || *nptr == '-') nptr++; + *endptr = const_cast<char *>(nptr); + } + CHECK(*endptr >= nptr); +} + +UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr, + char **endptr, char *real_endptr, int base) { + if (endptr) { + *endptr = real_endptr; + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr)); + } + // If base has unsupported value, strtol can exit with EINVAL + // without reading any characters. So do additional checks only + // if base is valid. + bool is_valid_base = (base == 0) || (2 <= base && base <= 36); + if (is_valid_base) { + FixRealStrtolEndptr(nptr, &real_endptr); + } + COMMON_INTERCEPTOR_READ_STRING(ctx, nptr, is_valid_base ? + (real_endptr - nptr) + 1 : 0); +} + + #if SANITIZER_INTERCEPT_STRTOIMAX INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) { void *ctx; @@ -2313,8 +2579,9 @@ INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) { // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://code.google.com/p/address-sanitizer/issues/detail?id=321. - INTMAX_T res = REAL(strtoimax)(nptr, endptr, base); - if (endptr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr)); + char *real_endptr; + INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base); + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return res; } @@ -2324,8 +2591,9 @@ INTERCEPTOR(INTMAX_T, strtoumax, const char *nptr, char **endptr, int base) { // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://code.google.com/p/address-sanitizer/issues/detail?id=321. - INTMAX_T res = REAL(strtoumax)(nptr, endptr, base); - if (endptr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr)); + char *real_endptr; + INTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base); + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); return res; } @@ -2456,7 +2724,7 @@ INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms, // its metadata. See // https://code.google.com/p/address-sanitizer/issues/detail?id=321. SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps); - if (res != (SIZE_T) - 1 && dest && src) { + if (res != ((SIZE_T)-1) && dest && src) { SIZE_T write_cnt = res + !*src; COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt); } @@ -2468,6 +2736,28 @@ INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms, #define INIT_WCSNRTOMBS #endif + +#if SANITIZER_INTERCEPT_WCRTOMB +INTERCEPTOR(SIZE_T, wcrtomb, char *dest, wchar_t src, void *ps) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, wcrtomb, dest, src, ps); + if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz); + // FIXME: under ASan the call below may write to freed memory and corrupt + // its metadata. See + // https://code.google.com/p/address-sanitizer/issues/detail?id=321. + SIZE_T res = REAL(wcrtomb)(dest, src, ps); + if (res != ((SIZE_T)-1) && dest) { + SIZE_T write_cnt = res; + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt); + } + return res; +} + +#define INIT_WCRTOMB COMMON_INTERCEPT_FUNCTION(wcrtomb); +#else +#define INIT_WCRTOMB +#endif + #if SANITIZER_INTERCEPT_TCGETATTR INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) { void *ctx; @@ -2496,7 +2786,7 @@ INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) { // version of a versioned symbol. For realpath(), this gives us something // (called __old_realpath) that does not handle NULL in the second argument. // Handle it as part of the interceptor. - char *allocated_path = 0; + char *allocated_path = nullptr; if (!resolved_path) allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1); @@ -2558,6 +2848,19 @@ INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) { #define INIT_SCHED_GETAFFINITY #endif +#if SANITIZER_INTERCEPT_SCHED_GETPARAM +INTERCEPTOR(int, sched_getparam, int pid, void *param) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param); + int res = REAL(sched_getparam)(pid, param); + if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz); + return res; +} +#define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam); +#else +#define INIT_SCHED_GETPARAM +#endif + #if SANITIZER_INTERCEPT_STRERROR INTERCEPTOR(char *, strerror, int errnum) { void *ctx; @@ -2631,7 +2934,7 @@ static THREADLOCAL scandir_compar_f scandir_compar; static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen); - return IndirectExternCall(scandir_filter)(dir); + return scandir_filter(dir); } static int wrapped_scandir_compar(const struct __sanitizer_dirent **a, @@ -2641,7 +2944,7 @@ static int wrapped_scandir_compar(const struct __sanitizer_dirent **a, COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen); COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b)); COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen); - return IndirectExternCall(scandir_compar)(a, b); + return scandir_compar(a, b); } INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist, @@ -2654,10 +2957,11 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist, // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://code.google.com/p/address-sanitizer/issues/detail?id=321. - int res = REAL(scandir)(dirp, namelist, filter ? wrapped_scandir_filter : 0, - compar ? wrapped_scandir_compar : 0); - scandir_filter = 0; - scandir_compar = 0; + int res = REAL(scandir)(dirp, namelist, + filter ? wrapped_scandir_filter : nullptr, + compar ? wrapped_scandir_compar : nullptr); + scandir_filter = nullptr; + scandir_compar = nullptr; if (namelist && res > 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res); @@ -2683,7 +2987,7 @@ static THREADLOCAL scandir64_compar_f scandir64_compar; static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) { COMMON_INTERCEPTOR_UNPOISON_PARAM(1); COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen); - return IndirectExternCall(scandir64_filter)(dir); + return scandir64_filter(dir); } static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a, @@ -2693,7 +2997,7 @@ static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a, COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen); COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b)); COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen); - return IndirectExternCall(scandir64_compar)(a, b); + return scandir64_compar(a, b); } INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist, @@ -2707,10 +3011,11 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist, // its metadata. See // https://code.google.com/p/address-sanitizer/issues/detail?id=321. int res = - REAL(scandir64)(dirp, namelist, filter ? wrapped_scandir64_filter : 0, - compar ? wrapped_scandir64_compar : 0); - scandir64_filter = 0; - scandir64_compar = 0; + REAL(scandir64)(dirp, namelist, + filter ? wrapped_scandir64_filter : nullptr, + compar ? wrapped_scandir64_compar : nullptr); + scandir64_filter = nullptr; + scandir64_compar = nullptr; if (namelist && res > 0) { COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist)); COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res); @@ -3576,6 +3881,7 @@ INTERCEPTOR(char *, tempnam, char *dir, char *pfx) { INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name); + COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0); COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name); return REAL(pthread_setname_np)(thread, name); } @@ -3792,25 +4098,33 @@ INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) { } return res; } -INTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim, - void *stream) { - void *ctx; - COMMON_INTERCEPTOR_ENTER(ctx, __getdelim, lineptr, n, delim, stream); - // FIXME: under ASan the call below may write to freed memory and corrupt - // its metadata. See - // https://code.google.com/p/address-sanitizer/issues/detail?id=321. - SSIZE_T res = REAL(__getdelim)(lineptr, n, delim, stream); - if (res > 0) { - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr)); - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1); + +// FIXME: under ASan the call below may write to freed memory and corrupt its +// metadata. See +// https://code.google.com/p/address-sanitizer/issues/detail?id=321. +#define GETDELIM_INTERCEPTOR_IMPL(vname) \ + { \ + void *ctx; \ + COMMON_INTERCEPTOR_ENTER(ctx, vname, lineptr, n, delim, stream); \ + SSIZE_T res = REAL(vname)(lineptr, n, delim, stream); \ + if (res > 0) { \ + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr)); \ + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); \ + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1); \ + } \ + return res; \ } - return res; -} + +INTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim, + void *stream) +GETDELIM_INTERCEPTOR_IMPL(__getdelim) + +// There's no __getdelim() on FreeBSD so we supply the getdelim() interceptor +// with its own body. INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim, - void *stream) { - return __getdelim(lineptr, n, delim, stream); -} + void *stream) +GETDELIM_INTERCEPTOR_IMPL(getdelim) + #define INIT_GETLINE \ COMMON_INTERCEPT_FUNCTION(getline); \ COMMON_INTERCEPT_FUNCTION(__getdelim); \ @@ -3831,7 +4145,7 @@ INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft, COMMON_INTERCEPTOR_READ_RANGE(ctx, *inbuf, *inbytesleft); if (outbytesleft) COMMON_INTERCEPTOR_READ_RANGE(ctx, outbytesleft, sizeof(*outbytesleft)); - void *outbuf_orig = outbuf ? *outbuf : 0; + void *outbuf_orig = outbuf ? *outbuf : nullptr; // FIXME: under ASan the call below may write to freed memory and corrupt // its metadata. See // https://code.google.com/p/address-sanitizer/issues/detail?id=321. @@ -3866,11 +4180,19 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) { #if SANITIZER_INTERCEPT_TLS_GET_ADDR #define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr) +// If you see any crashes around this functions, there are 2 known issues with +// it: 1. __tls_get_addr can be called with mis-aligned stack due to: +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066 +// 2. It can be called recursively if sanitizer code uses __tls_get_addr +// to access thread local variables (it should not happen normally, +// because sanitizers use initial-exec tls model). INTERCEPTOR(void *, __tls_get_addr, void *arg) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg); void *res = REAL(__tls_get_addr)(arg); - DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res); + uptr tls_begin, tls_end; + COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end); + DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, tls_begin, tls_end); if (dtv) { // New DTLS block has been allocated. COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size); @@ -4106,9 +4428,9 @@ INTERCEPTOR(int, capset, void *hdrp, const void *datap) { #endif #if SANITIZER_INTERCEPT_AEABI_MEM -DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr); -DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr); -DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr); +DECLARE_REAL_AND_INTERCEPTOR(void *, memmove, void *, const void *, uptr) +DECLARE_REAL_AND_INTERCEPTOR(void *, memcpy, void *, const void *, uptr) +DECLARE_REAL_AND_INTERCEPTOR(void *, memset, void *, int, uptr) INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) { return WRAP(memmove)(to, from, size); @@ -4448,7 +4770,7 @@ INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp); - COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); + if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1); COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp); __sanitizer_FILE *res = REAL(freopen)(path, mode, fp); @@ -4479,7 +4801,7 @@ INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp); - COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); + if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1); COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1); COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp); __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp); @@ -4608,15 +4930,14 @@ INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) { INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) { void *ctx; COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp); - if (fp) { - COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp); - const FileMetadata *m = GetInterceptorMetadata(fp); - if (m) { - COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size); - DeleteInterceptorMetadata(fp); - } + COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp); + const FileMetadata *m = GetInterceptorMetadata(fp); + int res = REAL(fclose)(fp); + if (m) { + COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size); + DeleteInterceptorMetadata(fp); } - return REAL(fclose)(fp); + return res; } #define INIT_FCLOSE COMMON_INTERCEPT_FUNCTION(fclose); #else @@ -4627,6 +4948,8 @@ INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) { INTERCEPTOR(void*, dlopen, const char *filename, int flag) { void *ctx; COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag); + if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0); + COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag); void *res = REAL(dlopen)(filename, flag); COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res); return res; @@ -4697,7 +5020,7 @@ static void MlockIsUnsupported() { static atomic_uint8_t printed; if (atomic_exchange(&printed, 1, memory_order_relaxed)) return; - VPrintf(1, "INFO: %s ignores mlock/mlockall/munlock/munlockall\n", + VPrintf(1, "%s ignores mlock/mlockall/munlock/munlockall\n", SanitizerToolName); } @@ -4731,6 +5054,186 @@ INTERCEPTOR(int, munlockall, void) { #define INIT_MLOCKX #endif // SANITIZER_INTERCEPT_MLOCKX +#if SANITIZER_INTERCEPT_FOPENCOOKIE +struct WrappedCookie { + void *real_cookie; + __sanitizer_cookie_io_functions_t real_io_funcs; +}; + +static uptr wrapped_read(void *cookie, char *buf, uptr size) { + COMMON_INTERCEPTOR_UNPOISON_PARAM(3); + WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie; + __sanitizer_cookie_io_read real_read = wrapped_cookie->real_io_funcs.read; + return real_read ? real_read(wrapped_cookie->real_cookie, buf, size) : 0; +} + +static uptr wrapped_write(void *cookie, const char *buf, uptr size) { + COMMON_INTERCEPTOR_UNPOISON_PARAM(3); + WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie; + __sanitizer_cookie_io_write real_write = wrapped_cookie->real_io_funcs.write; + return real_write ? real_write(wrapped_cookie->real_cookie, buf, size) : size; +} + +static int wrapped_seek(void *cookie, u64 *offset, int whence) { + COMMON_INTERCEPTOR_UNPOISON_PARAM(3); + COMMON_INTERCEPTOR_INITIALIZE_RANGE(offset, sizeof(*offset)); + WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie; + __sanitizer_cookie_io_seek real_seek = wrapped_cookie->real_io_funcs.seek; + return real_seek ? real_seek(wrapped_cookie->real_cookie, offset, whence) + : -1; +} + +static int wrapped_close(void *cookie) { + COMMON_INTERCEPTOR_UNPOISON_PARAM(1); + WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie; + __sanitizer_cookie_io_close real_close = wrapped_cookie->real_io_funcs.close; + int res = real_close ? real_close(wrapped_cookie->real_cookie) : 0; + InternalFree(wrapped_cookie); + return res; +} + +INTERCEPTOR(__sanitizer_FILE *, fopencookie, void *cookie, const char *mode, + __sanitizer_cookie_io_functions_t io_funcs) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, fopencookie, cookie, mode, io_funcs); + WrappedCookie *wrapped_cookie = + (WrappedCookie *)InternalAlloc(sizeof(WrappedCookie)); + wrapped_cookie->real_cookie = cookie; + wrapped_cookie->real_io_funcs = io_funcs; + __sanitizer_FILE *res = + REAL(fopencookie)(wrapped_cookie, mode, {wrapped_read, wrapped_write, + wrapped_seek, wrapped_close}); + return res; +} + +#define INIT_FOPENCOOKIE COMMON_INTERCEPT_FUNCTION(fopencookie); +#else +#define INIT_FOPENCOOKIE +#endif // SANITIZER_INTERCEPT_FOPENCOOKIE + +#if SANITIZER_INTERCEPT_SEM +INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value); + // Workaround a bug in glibc's "old" semaphore implementation by + // zero-initializing the sem_t contents. This has to be done here because + // interceptors bind to the lowest symbols version by default, hitting the + // buggy code path while the non-sanitized build of the same code works fine. + REAL(memset)(s, 0, sizeof(*s)); + int res = REAL(sem_init)(s, pshared, value); + return res; +} + +INTERCEPTOR(int, sem_destroy, __sanitizer_sem_t *s) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sem_destroy, s); + int res = REAL(sem_destroy)(s); + return res; +} + +INTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sem_wait, s); + int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_wait)(s); + if (res == 0) { + COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s); + } + return res; +} + +INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s); + int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_trywait)(s); + if (res == 0) { + COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s); + } + return res; +} + +INTERCEPTOR(int, sem_timedwait, __sanitizer_sem_t *s, void *abstime) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sem_timedwait, s, abstime); + COMMON_INTERCEPTOR_READ_RANGE(ctx, abstime, struct_timespec_sz); + int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_timedwait)(s, abstime); + if (res == 0) { + COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s); + } + return res; +} + +INTERCEPTOR(int, sem_post, __sanitizer_sem_t *s) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sem_post, s); + COMMON_INTERCEPTOR_RELEASE(ctx, (uptr)s); + int res = REAL(sem_post)(s); + return res; +} + +INTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, sem_getvalue, s, sval); + int res = REAL(sem_getvalue)(s, sval); + if (res == 0) { + COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s); + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sval, sizeof(*sval)); + } + return res; +} +#define INIT_SEM \ + COMMON_INTERCEPT_FUNCTION(sem_init); \ + COMMON_INTERCEPT_FUNCTION(sem_destroy); \ + COMMON_INTERCEPT_FUNCTION(sem_wait); \ + COMMON_INTERCEPT_FUNCTION(sem_trywait); \ + COMMON_INTERCEPT_FUNCTION(sem_timedwait); \ + COMMON_INTERCEPT_FUNCTION(sem_post); \ + COMMON_INTERCEPT_FUNCTION(sem_getvalue); +#else +#define INIT_SEM +#endif // SANITIZER_INTERCEPT_SEM + +#if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL +INTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcancelstate, state, oldstate); + int res = REAL(pthread_setcancelstate)(state, oldstate); + if (res == 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldstate, sizeof(*oldstate)); + return res; +} + +INTERCEPTOR(int, pthread_setcanceltype, int type, int *oldtype) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcanceltype, type, oldtype); + int res = REAL(pthread_setcanceltype)(type, oldtype); + if (res == 0) + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldtype, sizeof(*oldtype)); + return res; +} +#define INIT_PTHREAD_SETCANCEL \ + COMMON_INTERCEPT_FUNCTION(pthread_setcancelstate); \ + COMMON_INTERCEPT_FUNCTION(pthread_setcanceltype); +#else +#define INIT_PTHREAD_SETCANCEL +#endif + +#if SANITIZER_INTERCEPT_MINCORE +INTERCEPTOR(int, mincore, void *addr, uptr length, unsigned char *vec) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, mincore, addr, length, vec); + int res = REAL(mincore)(addr, length, vec); + if (res == 0) { + uptr page_size = GetPageSizeCached(); + uptr vec_size = ((length + page_size - 1) & (~(page_size - 1))) / page_size; + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, vec, vec_size); + } + return res; +} +#define INIT_MINCORE COMMON_INTERCEPT_FUNCTION(mincore); +#else +#define INIT_MINCORE +#endif + static void InitializeCommonInterceptors() { static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1]; interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap(); @@ -4740,7 +5243,12 @@ static void InitializeCommonInterceptors() { INIT_STRNCMP; INIT_STRCASECMP; INIT_STRNCASECMP; + INIT_STRSTR; + INIT_STRCASESTR; + INIT_STRSPN; + INIT_STRPBRK; INIT_MEMCHR; + INIT_MEMCMP; INIT_MEMRCHR; INIT_READ; INIT_PREAD; @@ -4760,6 +5268,7 @@ static void InitializeCommonInterceptors() { INIT_SCANF; INIT_ISOC99_SCANF; INIT_PRINTF; + INIT_PRINTF_L; INIT_ISOC99_PRINTF; INIT_FREXP; INIT_FREXPF_FREXPL; @@ -4805,11 +5314,13 @@ static void InitializeCommonInterceptors() { INIT_MBSNRTOWCS; INIT_WCSTOMBS; INIT_WCSNRTOMBS; + INIT_WCRTOMB; INIT_TCGETATTR; INIT_REALPATH; INIT_CANONICALIZE_FILE_NAME; INIT_CONFSTR; INIT_SCHED_GETAFFINITY; + INIT_SCHED_GETPARAM; INIT_STRERROR; INIT_STRERROR_R; INIT_XPG_STRERROR_R; @@ -4892,4 +5403,8 @@ static void InitializeCommonInterceptors() { INIT_GETPASS; INIT_TIMERFD; INIT_MLOCKX; + INIT_FOPENCOOKIE; + INIT_SEM; + INIT_PTHREAD_SETCANCEL; + INIT_MINCORE; } diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc index 26cbe68dc99..9133be7097d 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_format.inc @@ -11,6 +11,7 @@ // with a few common GNU extensions. // //===----------------------------------------------------------------------===// + #include <stdarg.h> static const char *parse_number(const char *p, int *out) { @@ -189,7 +190,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc, continue; } if (*p == '\0') { - return 0; + return nullptr; } // %n$ p = maybe_parse_param_index(p, &dir->argIdx); @@ -204,7 +205,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc, p = parse_number(p, &dir->fieldWidth); CHECK(p); if (dir->fieldWidth <= 0) // Width if at all must be non-zero - return 0; + return nullptr; } // m if (*p == 'm') { @@ -224,8 +225,8 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc, while (*p && *p != ']') ++p; if (*p == 0) - return 0; // unexpected end of string - // Consume the closing ']'. + return nullptr; // unexpected end of string + // Consume the closing ']'. ++p; } // This is unfortunately ambiguous between old GNU extension @@ -249,7 +250,7 @@ static const char *scanf_parse_next(const char *p, bool allowGnuMalloc, while (*q && *q != ']' && *q != '%') ++q; if (*q == 0 || *q == '%') - return 0; + return nullptr; p = q + 1; // Consume the closing ']'. dir->maybeGnuMalloc = true; } @@ -393,7 +394,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) { continue; } if (*p == '\0') { - return 0; + return nullptr; } // %n$ p = maybe_parse_param_index(p, &dir->precisionIdx); @@ -406,7 +407,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) { p = maybe_parse_number_or_star(p, &dir->fieldWidth, &dir->starredWidth); if (!p) - return 0; + return nullptr; // Precision if (*p == '.') { ++p; @@ -414,7 +415,7 @@ static const char *printf_parse_next(const char *p, PrintfDirective *dir) { p = maybe_parse_number_or_star(p, &dir->fieldPrecision, &dir->starredPrecision); if (!p) - return 0; + return nullptr; // m$ if (dir->starredPrecision) { p = maybe_parse_param_index(p, &dir->precisionIdx); @@ -554,4 +555,4 @@ static void printf_common(void *ctx, const char *format, va_list aq) { } } -#endif // SANITIZER_INTERCEPT_PRINTF +#endif // SANITIZER_INTERCEPT_PRINTF diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc index ca264d899a7..6c5fda09fbf 100755 --- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc @@ -518,7 +518,7 @@ static const ioctl_desc *ioctl_table_lookup(unsigned req) { if (left == right && ioctl_table[left].req == req) return ioctl_table + left; else - return 0; + return nullptr; } static bool ioctl_decode(unsigned req, ioctl_desc *desc) { @@ -565,7 +565,7 @@ static const ioctl_desc *ioctl_lookup(unsigned req) { (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READ)) return desc; - return 0; + return nullptr; } static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d, @@ -576,14 +576,10 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d, } if (desc->type != ioctl_desc::CUSTOM) return; - switch (request) { - case 0x00008912: { // SIOCGIFCONF - struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg; - COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len)); - break; - } + if (request == IOCTL_SIOCGIFCONF) { + struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg; + COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len)); } - return; } static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d, @@ -595,12 +591,8 @@ static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d, } if (desc->type != ioctl_desc::CUSTOM) return; - switch (request) { - case 0x00008912: { // SIOCGIFCONF - struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg; - COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len); - break; - } + if (request == IOCTL_SIOCGIFCONF) { + struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg; + COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len); } - return; } diff --git a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc index b5251444f0f..5a76c4ebd8b 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc @@ -11,35 +11,31 @@ #include "sanitizer_common.h" #include "sanitizer_flags.h" +#include "sanitizer_stackdepot.h" #include "sanitizer_stacktrace.h" #include "sanitizer_symbolizer.h" +#if SANITIZER_POSIX +#include "sanitizer_posix.h" +#endif + namespace __sanitizer { -bool PrintsToTty() { - MaybeOpenReportFile(); - return internal_isatty(report_fd) != 0; +bool ReportFile::SupportsColors() { + SpinMutexLock l(mu); + ReopenIfNecessary(); + return SupportsColoredOutput(fd); } -bool PrintsToTtyCached() { +bool ColorizeReports() { // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color // printing on Windows. if (SANITIZER_WINDOWS) - return 0; - - static int cached = 0; - static bool prints_to_tty; - if (!cached) { // Not thread-safe. - prints_to_tty = PrintsToTty(); - cached = 1; - } - return prints_to_tty; -} + return false; -bool ColorizeReports() { const char *flag = common_flags()->color; return internal_strcmp(flag, "always") == 0 || - (internal_strcmp(flag, "auto") == 0 && PrintsToTtyCached()); + (internal_strcmp(flag, "auto") == 0 && report_file.SupportsColors()); } static void (*sandboxing_callback)(); @@ -50,16 +46,82 @@ void SetSandboxingCallback(void (*f)()) { void ReportErrorSummary(const char *error_type, StackTrace *stack) { if (!common_flags()->print_summary) return; - AddressInfo ai; -#if !SANITIZER_GO - if (stack->size > 0 && Symbolizer::GetOrInit()->CanReturnFileLineInfo()) { - // Currently, we include the first stack frame into the report summary. - // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc). - uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]); - Symbolizer::GetOrInit()->SymbolizePC(pc, &ai, 1); + if (stack->size == 0) { + ReportErrorSummary(error_type); + return; } + // Currently, we include the first stack frame into the report summary. + // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc). + uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]); + SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc); + ReportErrorSummary(error_type, frame->info); + frame->ClearAll(); +} + +static void (*SoftRssLimitExceededCallback)(bool exceeded); +void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) { + CHECK_EQ(SoftRssLimitExceededCallback, nullptr); + SoftRssLimitExceededCallback = Callback; +} + +void BackgroundThread(void *arg) { + uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb; + uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb; + uptr prev_reported_rss = 0; + uptr prev_reported_stack_depot_size = 0; + bool reached_soft_rss_limit = false; + while (true) { + SleepForMillis(100); + uptr current_rss_mb = GetRSS() >> 20; + if (Verbosity()) { + // If RSS has grown 10% since last time, print some information. + if (prev_reported_rss * 11 / 10 < current_rss_mb) { + Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb); + prev_reported_rss = current_rss_mb; + } + // If stack depot has grown 10% since last time, print it too. + StackDepotStats *stack_depot_stats = StackDepotGetStats(); + if (prev_reported_stack_depot_size * 11 / 10 < + stack_depot_stats->allocated) { + Printf("%s: StackDepot: %zd ids; %zdM allocated\n", + SanitizerToolName, + stack_depot_stats->n_uniq_ids, + stack_depot_stats->allocated >> 20); + prev_reported_stack_depot_size = stack_depot_stats->allocated; + } + } + // Check RSS against the limit. + if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) { + Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n", + SanitizerToolName, hard_rss_limit_mb, current_rss_mb); + DumpProcessMap(); + Die(); + } + if (soft_rss_limit_mb) { + if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) { + reached_soft_rss_limit = true; + Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n", + SanitizerToolName, soft_rss_limit_mb, current_rss_mb); + if (SoftRssLimitExceededCallback) + SoftRssLimitExceededCallback(true); + } else if (soft_rss_limit_mb >= current_rss_mb && + reached_soft_rss_limit) { + reached_soft_rss_limit = false; + if (SoftRssLimitExceededCallback) + SoftRssLimitExceededCallback(false); + } + } + } +} + +void MaybeStartBackgroudThread() { +#if SANITIZER_LINUX // Need to implement/test on other platforms. + // Start the background thread if one of the rss limits is given. + if (!common_flags()->hard_rss_limit_mb && + !common_flags()->soft_rss_limit_mb) return; + if (!&real_pthread_create) return; // Can't spawn the thread anyway. + internal_start_thread(BackgroundThread, nullptr); #endif - ReportErrorSummary(error_type, ai.file, ai.line, ai.function); } } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc index 104c27cd5d2..5e83ce9786c 100644 --- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc +++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc @@ -2297,7 +2297,9 @@ PRE_SYSCALL(ni_syscall)() {} POST_SYSCALL(ni_syscall)(long res) {} PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { -#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64)) +#if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__)) if (data) { if (request == ptrace_setregs) { PRE_READ((void *)data, struct_user_regs_struct_sz); @@ -2316,7 +2318,9 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { } POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) { -#if !SANITIZER_ANDROID && (defined(__i386) || defined (__x86_64)) +#if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__)) if (res >= 0 && data) { // Note that this is different from the interceptor in // sanitizer_common_interceptors.inc. diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc index 214e8a98d0f..108a6163d19 100644 --- a/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc @@ -10,18 +10,24 @@ // // Compiler instrumentation: // For every interesting basic block the compiler injects the following code: -// if (*Guard) { -// __sanitizer_cov(); -// *Guard = 1; +// if (Guard < 0) { +// __sanitizer_cov(&Guard); // } +// At the module start up time __sanitizer_cov_module_init sets the guards +// to consecutive negative numbers (-1, -2, -3, ...). // It's fine to call __sanitizer_cov more than once for a given block. // // Run-time: // - __sanitizer_cov(): record that we've executed the PC (GET_CALLER_PC). +// and atomically set Guard to -Guard. // - __sanitizer_cov_dump: dump the coverage data to disk. // For every module of the current process that has coverage data -// this will create a file module_name.PID.sancov. The file format is simple: -// it's just a sorted sequence of 4-byte offsets in the module. +// this will create a file module_name.PID.sancov. +// +// The file format is simple: the first 8 bytes is the magic, +// one of 0xC0BFFFFFFFFFFF64 and 0xC0BFFFFFFFFFFF32. The last byte of the +// magic defines the size of the following offsets. +// The rest of the data is the offsets in the module. // // Eventually, this coverage implementation should be obsoleted by a more // powerful general purpose Clang/LLVM coverage instrumentation. @@ -39,7 +45,12 @@ #include "sanitizer_symbolizer.h" #include "sanitizer_flags.h" -atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once. +static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL; +static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL; + +static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once. + +static atomic_uintptr_t coverage_counter; // pc_array is the array containing the covered PCs. // To make the pc_array thread- and async-signal-safe it has to be large enough. @@ -50,29 +61,55 @@ atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once. // dump current memory layout to another file. static bool cov_sandboxed = false; -static int cov_fd = kInvalidFd; +static fd_t cov_fd = kInvalidFd; static unsigned int cov_max_block_size = 0; +static bool coverage_enabled = false; +static const char *coverage_dir; namespace __sanitizer { class CoverageData { public: void Init(); + void Enable(); + void Disable(); + void ReInit(); void BeforeFork(); void AfterFork(int child_pid); void Extend(uptr npcs); - void Add(uptr pc); + void Add(uptr pc, u32 *guard); void IndirCall(uptr caller, uptr callee, uptr callee_cache[], uptr cache_size); void DumpCallerCalleePairs(); + void DumpTrace(); + void DumpAsBitSet(); + void DumpCounters(); + void DumpOffsets(); + void DumpAll(); + + ALWAYS_INLINE + void TraceBasicBlock(s32 *id); + + void InitializeGuardArray(s32 *guards); + void InitializeGuards(s32 *guards, uptr n, const char *module_name, + uptr caller_pc); + void InitializeCounters(u8 *counters, uptr n); + void ReinitializeGuards(); + uptr GetNumberOf8bitCounters(); + uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset); uptr *data(); uptr size(); private: + void DirectOpen(); + void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end); + // Maximal size pc array may ever grow. // We MmapNoReserve this space to ensure that the array is contiguous. - static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27); + static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64( + 1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)), + 1 << 27); // The amount file mapping for the pc array is grown by. static const uptr kPcArrayMmapSize = 64 * 1024; @@ -86,7 +123,27 @@ class CoverageData { // Current file mapped size of the pc array. uptr pc_array_mapped_size; // Descriptor of the file mapped pc array. - int pc_fd; + fd_t pc_fd; + + // Vector of coverage guard arrays, protected by mu. + InternalMmapVectorNoCtor<s32*> guard_array_vec; + + struct NamedPcRange { + const char *copied_module_name; + uptr beg, end; // elements [beg,end) in pc_array. + }; + + // Vector of module and compilation unit pc ranges. + InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec; + InternalMmapVectorNoCtor<NamedPcRange> module_name_vec; + + struct CounterAndSize { + u8 *counters; + uptr n; + }; + + InternalMmapVectorNoCtor<CounterAndSize> counters_vec; + uptr num_8bit_counters; // Caller-Callee (cc) array, size and current index. static const uptr kCcArrayMaxSize = FIRST_32_SECOND_64(1 << 18, 1 << 24); @@ -94,59 +151,131 @@ class CoverageData { atomic_uintptr_t cc_array_index; atomic_uintptr_t cc_array_size; + // Tracing event array, size and current pointer. + // We record all events (basic block entries) in a global buffer of u32 + // values. Each such value is the index in pc_array. + // So far the tracing is highly experimental: + // - not thread-safe; + // - does not support long traces; + // - not tuned for performance. + static const uptr kTrEventArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 30); + u32 *tr_event_array; + uptr tr_event_array_size; + u32 *tr_event_pointer; + static const uptr kTrPcArrayMaxSize = FIRST_32_SECOND_64(1 << 22, 1 << 27); StaticSpinMutex mu; - - void DirectOpen(); - void ReInit(); }; static CoverageData coverage_data; +void CovUpdateMapping(const char *path, uptr caller_pc = 0); + void CoverageData::DirectOpen() { - InternalScopedString path(1024); + InternalScopedString path(kMaxPathLength); internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.raw", - common_flags()->coverage_dir, internal_getpid()); - pc_fd = OpenFile(path.data(), true); - if (internal_iserror(pc_fd)) { - Report(" Coverage: failed to open %s for writing\n", path.data()); + coverage_dir, internal_getpid()); + pc_fd = OpenFile(path.data(), RdWr); + if (pc_fd == kInvalidFd) { + Report("Coverage: failed to open %s for reading/writing\n", path.data()); Die(); } pc_array_mapped_size = 0; - CovUpdateMapping(); + CovUpdateMapping(coverage_dir); } void CoverageData::Init() { + pc_fd = kInvalidFd; +} + +void CoverageData::Enable() { + if (pc_array) + return; pc_array = reinterpret_cast<uptr *>( MmapNoReserveOrDie(sizeof(uptr) * kPcArrayMaxSize, "CovInit")); - pc_fd = kInvalidFd; + atomic_store(&pc_array_index, 0, memory_order_relaxed); if (common_flags()->coverage_direct) { atomic_store(&pc_array_size, 0, memory_order_relaxed); - atomic_store(&pc_array_index, 0, memory_order_relaxed); } else { atomic_store(&pc_array_size, kPcArrayMaxSize, memory_order_relaxed); - atomic_store(&pc_array_index, 0, memory_order_relaxed); } cc_array = reinterpret_cast<uptr **>(MmapNoReserveOrDie( sizeof(uptr *) * kCcArrayMaxSize, "CovInit::cc_array")); atomic_store(&cc_array_size, kCcArrayMaxSize, memory_order_relaxed); atomic_store(&cc_array_index, 0, memory_order_relaxed); + + // Allocate tr_event_array with a guard page at the end. + tr_event_array = reinterpret_cast<u32 *>(MmapNoReserveOrDie( + sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + GetMmapGranularity(), + "CovInit::tr_event_array")); + MprotectNoAccess( + reinterpret_cast<uptr>(&tr_event_array[kTrEventArrayMaxSize]), + GetMmapGranularity()); + tr_event_array_size = kTrEventArrayMaxSize; + tr_event_pointer = tr_event_array; + + num_8bit_counters = 0; +} + +void CoverageData::InitializeGuardArray(s32 *guards) { + Enable(); // Make sure coverage is enabled at this point. + s32 n = guards[0]; + for (s32 j = 1; j <= n; j++) { + uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed); + guards[j] = -static_cast<s32>(idx + 1); + } +} + +void CoverageData::Disable() { + if (pc_array) { + UnmapOrDie(pc_array, sizeof(uptr) * kPcArrayMaxSize); + pc_array = nullptr; + } + if (cc_array) { + UnmapOrDie(cc_array, sizeof(uptr *) * kCcArrayMaxSize); + cc_array = nullptr; + } + if (tr_event_array) { + UnmapOrDie(tr_event_array, + sizeof(tr_event_array[0]) * kTrEventArrayMaxSize + + GetMmapGranularity()); + tr_event_array = nullptr; + tr_event_pointer = nullptr; + } + if (pc_fd != kInvalidFd) { + CloseFile(pc_fd); + pc_fd = kInvalidFd; + } +} + +void CoverageData::ReinitializeGuards() { + // Assuming single thread. + atomic_store(&pc_array_index, 0, memory_order_relaxed); + for (uptr i = 0; i < guard_array_vec.size(); i++) + InitializeGuardArray(guard_array_vec[i]); } void CoverageData::ReInit() { - internal_munmap(pc_array, sizeof(uptr) * kPcArrayMaxSize); - if (pc_fd != kInvalidFd) internal_close(pc_fd); - if (common_flags()->coverage_direct) { - // In memory-mapped mode we must extend the new file to the known array - // size. - uptr size = atomic_load(&pc_array_size, memory_order_relaxed); - Init(); - if (size) Extend(size); - } else { - Init(); + Disable(); + if (coverage_enabled) { + if (common_flags()->coverage_direct) { + // In memory-mapped mode we must extend the new file to the known array + // size. + uptr size = atomic_load(&pc_array_size, memory_order_relaxed); + uptr npcs = size / sizeof(uptr); + Enable(); + if (size) Extend(npcs); + if (coverage_enabled) CovUpdateMapping(coverage_dir); + } else { + Enable(); + } } + // Re-initialize the guards. + // We are single-threaded now, no need to grab any lock. + CHECK_EQ(atomic_load(&pc_array_index, memory_order_relaxed), 0); + ReinitializeGuards(); } void CoverageData::BeforeFork() { @@ -164,15 +293,16 @@ void CoverageData::Extend(uptr npcs) { if (!common_flags()->coverage_direct) return; SpinMutexLock l(&mu); - if (pc_fd == kInvalidFd) DirectOpen(); - CHECK_NE(pc_fd, kInvalidFd); - uptr size = atomic_load(&pc_array_size, memory_order_relaxed); size += npcs * sizeof(uptr); - if (size > pc_array_mapped_size) { + if (coverage_enabled && size > pc_array_mapped_size) { + if (pc_fd == kInvalidFd) DirectOpen(); + CHECK_NE(pc_fd, kInvalidFd); + uptr new_mapped_size = pc_array_mapped_size; while (size > new_mapped_size) new_mapped_size += kPcArrayMmapSize; + CHECK_LE(new_mapped_size, sizeof(uptr) * kPcArrayMaxSize); // Extend the file and map the new space at the end of pc_array. uptr res = internal_ftruncate(pc_fd, new_mapped_size); @@ -181,24 +311,100 @@ void CoverageData::Extend(uptr npcs) { Printf("failed to extend raw coverage file: %d\n", err); Die(); } - void *p = MapWritableFileToMemory(pc_array + pc_array_mapped_size, + + uptr next_map_base = ((uptr)pc_array) + pc_array_mapped_size; + void *p = MapWritableFileToMemory((void *)next_map_base, new_mapped_size - pc_array_mapped_size, pc_fd, pc_array_mapped_size); - CHECK_EQ(p, pc_array + pc_array_mapped_size); + CHECK_EQ((uptr)p, next_map_base); pc_array_mapped_size = new_mapped_size; } atomic_store(&pc_array_size, size, memory_order_release); } -// Simply add the pc into the vector under lock. If the function is called more -// than once for a given PC it will be inserted multiple times, which is fine. -void CoverageData::Add(uptr pc) { +void CoverageData::InitializeCounters(u8 *counters, uptr n) { + if (!counters) return; + CHECK_EQ(reinterpret_cast<uptr>(counters) % 16, 0); + n = RoundUpTo(n, 16); // The compiler must ensure that counters is 16-aligned. + SpinMutexLock l(&mu); + counters_vec.push_back({counters, n}); + num_8bit_counters += n; +} + +void CoverageData::UpdateModuleNameVec(uptr caller_pc, uptr range_beg, + uptr range_end) { + auto sym = Symbolizer::GetOrInit(); + if (!sym) + return; + const char *module_name = sym->GetModuleNameForPc(caller_pc); + if (!module_name) return; + if (module_name_vec.empty() || + module_name_vec.back().copied_module_name != module_name) + module_name_vec.push_back({module_name, range_beg, range_end}); + else + module_name_vec.back().end = range_end; +} + +void CoverageData::InitializeGuards(s32 *guards, uptr n, + const char *comp_unit_name, + uptr caller_pc) { + // The array 'guards' has n+1 elements, we use the element zero + // to store 'n'. + CHECK_LT(n, 1 << 30); + guards[0] = static_cast<s32>(n); + InitializeGuardArray(guards); + SpinMutexLock l(&mu); + uptr range_end = atomic_load(&pc_array_index, memory_order_relaxed); + uptr range_beg = range_end - n; + comp_unit_name_vec.push_back({comp_unit_name, range_beg, range_end}); + guard_array_vec.push_back(guards); + UpdateModuleNameVec(caller_pc, range_beg, range_end); +} + +static const uptr kBundleCounterBits = 16; + +// When coverage_order_pcs==true and SANITIZER_WORDSIZE==64 +// we insert the global counter into the first 16 bits of the PC. +uptr BundlePcAndCounter(uptr pc, uptr counter) { + if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs) + return pc; + static const uptr kMaxCounter = (1 << kBundleCounterBits) - 1; + if (counter > kMaxCounter) + counter = kMaxCounter; + CHECK_EQ(0, pc >> (SANITIZER_WORDSIZE - kBundleCounterBits)); + return pc | (counter << (SANITIZER_WORDSIZE - kBundleCounterBits)); +} + +uptr UnbundlePc(uptr bundle) { + if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs) + return bundle; + return (bundle << kBundleCounterBits) >> kBundleCounterBits; +} + +uptr UnbundleCounter(uptr bundle) { + if (SANITIZER_WORDSIZE != 64 || !common_flags()->coverage_order_pcs) + return 0; + return bundle >> (SANITIZER_WORDSIZE - kBundleCounterBits); +} + +// If guard is negative, atomically set it to -guard and store the PC in +// pc_array. +void CoverageData::Add(uptr pc, u32 *guard) { + atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard); + s32 guard_value = atomic_load(atomic_guard, memory_order_relaxed); + if (guard_value >= 0) return; + + atomic_store(atomic_guard, -guard_value, memory_order_relaxed); if (!pc_array) return; - uptr idx = atomic_fetch_add(&pc_array_index, 1, memory_order_relaxed); + + uptr idx = -guard_value - 1; + if (idx >= atomic_load(&pc_array_index, memory_order_acquire)) + return; // May happen after fork when pc_array_index becomes 0. CHECK_LT(idx * sizeof(uptr), atomic_load(&pc_array_size, memory_order_acquire)); - pc_array[idx] = pc; + uptr counter = atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed); + pc_array[idx] = BundlePcAndCounter(pc, counter); } // Registers a pair caller=>callee. @@ -226,13 +432,73 @@ void CoverageData::IndirCall(uptr caller, uptr callee, uptr callee_cache[], for (uptr i = 2; i < cache_size; i++) { uptr was = 0; if (atomic_compare_exchange_strong(&atomic_callee_cache[i], &was, callee, - memory_order_seq_cst)) + memory_order_seq_cst)) { + atomic_fetch_add(&coverage_counter, 1, memory_order_relaxed); return; + } if (was == callee) // Already have this callee. return; } } +uptr CoverageData::GetNumberOf8bitCounters() { + return num_8bit_counters; +} + +// Map every 8bit counter to a 8-bit bitset and clear the counter. +uptr CoverageData::Update8bitCounterBitsetAndClearCounters(u8 *bitset) { + uptr num_new_bits = 0; + uptr cur = 0; + // For better speed we map 8 counters to 8 bytes of bitset at once. + static const uptr kBatchSize = 8; + CHECK_EQ(reinterpret_cast<uptr>(bitset) % kBatchSize, 0); + for (uptr i = 0, len = counters_vec.size(); i < len; i++) { + u8 *c = counters_vec[i].counters; + uptr n = counters_vec[i].n; + CHECK_EQ(n % 16, 0); + CHECK_EQ(cur % kBatchSize, 0); + CHECK_EQ(reinterpret_cast<uptr>(c) % kBatchSize, 0); + if (!bitset) { + internal_bzero_aligned16(c, n); + cur += n; + continue; + } + for (uptr j = 0; j < n; j += kBatchSize, cur += kBatchSize) { + CHECK_LT(cur, num_8bit_counters); + u64 *pc64 = reinterpret_cast<u64*>(c + j); + u64 *pb64 = reinterpret_cast<u64*>(bitset + cur); + u64 c64 = *pc64; + u64 old_bits_64 = *pb64; + u64 new_bits_64 = old_bits_64; + if (c64) { + *pc64 = 0; + for (uptr k = 0; k < kBatchSize; k++) { + u64 x = (c64 >> (8 * k)) & 0xff; + if (x) { + u64 bit = 0; + /**/ if (x >= 128) bit = 128; + else if (x >= 32) bit = 64; + else if (x >= 16) bit = 32; + else if (x >= 8) bit = 16; + else if (x >= 4) bit = 8; + else if (x >= 3) bit = 4; + else if (x >= 2) bit = 2; + else if (x >= 1) bit = 1; + u64 mask = bit << (8 * k); + if (!(new_bits_64 & mask)) { + num_new_bits++; + new_bits_64 |= mask; + } + } + } + *pb64 = new_bits_64; + } + } + } + CHECK_EQ(cur, num_8bit_counters); + return num_new_bits; +} + uptr *CoverageData::data() { return pc_array; } @@ -251,15 +517,15 @@ struct CovHeader { static void CovWritePacked(int pid, const char *module, const void *blob, unsigned int blob_size) { - if (cov_fd < 0) return; + if (cov_fd == kInvalidFd) return; unsigned module_name_length = internal_strlen(module); CovHeader header = {pid, module_name_length, blob_size}; if (cov_max_block_size == 0) { // Writing to a file. Just go ahead. - internal_write(cov_fd, &header, sizeof(header)); - internal_write(cov_fd, module, module_name_length); - internal_write(cov_fd, blob, blob_size); + WriteToFile(cov_fd, &header, sizeof(header)); + WriteToFile(cov_fd, module, module_name_length); + WriteToFile(cov_fd, blob, blob_size); } else { // Writing to a socket. We want to split the data into appropriately sized // blocks. @@ -275,15 +541,14 @@ static void CovWritePacked(int pid, const char *module, const void *blob, internal_memcpy(block_pos, module, module_name_length); block_pos += module_name_length; char *block_data_begin = block_pos; - char *blob_pos = (char *)blob; + const char *blob_pos = (const char *)blob; while (blob_size > 0) { unsigned int payload_size = Min(blob_size, max_payload_size); blob_size -= payload_size; internal_memcpy(block_data_begin, blob_pos, payload_size); blob_pos += payload_size; ((CovHeader *)block.data())->data_length = payload_size; - internal_write(cov_fd, block.data(), - header_size_with_module + payload_size); + WriteToFile(cov_fd, block.data(), header_size_with_module + payload_size); } } } @@ -292,29 +557,77 @@ static void CovWritePacked(int pid, const char *module, const void *blob, // If packed = true and name == 0: <pid>.<sancov>.<packed>. // If packed = true and name != 0: <name>.<sancov>.<packed> (name is // user-supplied). -static int CovOpenFile(bool packed, const char* name) { - InternalScopedBuffer<char> path(1024); +static fd_t CovOpenFile(InternalScopedString *path, bool packed, + const char *name, const char *extension = "sancov") { + path->clear(); if (!packed) { CHECK(name); - internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov", - common_flags()->coverage_dir, name, internal_getpid()); + path->append("%s/%s.%zd.%s", coverage_dir, name, internal_getpid(), + extension); } else { if (!name) - internal_snprintf((char *)path.data(), path.size(), - "%s/%zd.sancov.packed", common_flags()->coverage_dir, - internal_getpid()); + path->append("%s/%zd.%s.packed", coverage_dir, internal_getpid(), + extension); else - internal_snprintf((char *)path.data(), path.size(), "%s/%s.sancov.packed", - common_flags()->coverage_dir, name); - } - uptr fd = OpenFile(path.data(), true); - if (internal_iserror(fd)) { - Report(" SanitizerCoverage: failed to open %s for writing\n", path.data()); - return -1; + path->append("%s/%s.%s.packed", coverage_dir, name, extension); } + error_t err; + fd_t fd = OpenFile(path->data(), WrOnly, &err); + if (fd == kInvalidFd) + Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n", + path->data(), err); return fd; } +// Dump trace PCs and trace events into two separate files. +void CoverageData::DumpTrace() { + uptr max_idx = tr_event_pointer - tr_event_array; + if (!max_idx) return; + auto sym = Symbolizer::GetOrInit(); + if (!sym) + return; + InternalScopedString out(32 << 20); + for (uptr i = 0, n = size(); i < n; i++) { + const char *module_name = "<unknown>"; + uptr module_address = 0; + sym->GetModuleNameAndOffsetForPC(UnbundlePc(pc_array[i]), &module_name, + &module_address); + out.append("%s 0x%zx\n", module_name, module_address); + } + InternalScopedString path(kMaxPathLength); + fd_t fd = CovOpenFile(&path, false, "trace-points"); + if (fd == kInvalidFd) return; + WriteToFile(fd, out.data(), out.length()); + CloseFile(fd); + + fd = CovOpenFile(&path, false, "trace-compunits"); + if (fd == kInvalidFd) return; + out.clear(); + for (uptr i = 0; i < comp_unit_name_vec.size(); i++) + out.append("%s\n", comp_unit_name_vec[i].copied_module_name); + WriteToFile(fd, out.data(), out.length()); + CloseFile(fd); + + fd = CovOpenFile(&path, false, "trace-events"); + if (fd == kInvalidFd) return; + uptr bytes_to_write = max_idx * sizeof(tr_event_array[0]); + u8 *event_bytes = reinterpret_cast<u8*>(tr_event_array); + // The trace file could be huge, and may not be written with a single syscall. + while (bytes_to_write) { + uptr actually_written; + if (WriteToFile(fd, event_bytes, bytes_to_write, &actually_written) && + actually_written <= bytes_to_write) { + bytes_to_write -= actually_written; + event_bytes += actually_written; + } else { + break; + } + } + CloseFile(fd); + VReport(1, " CovDump: Trace: %zd PCs written\n", size()); + VReport(1, " CovDump: Trace: %zd Events written\n", max_idx); +} + // This function dumps the caller=>callee pairs into a file as a sequence of // lines like "module_name offset". void CoverageData::DumpCallerCalleePairs() { @@ -347,88 +660,166 @@ void CoverageData::DumpCallerCalleePairs() { callee_module_address); } } - int fd = CovOpenFile(false, "caller-callee"); - if (fd < 0) return; - internal_write(fd, out.data(), out.length()); - internal_close(fd); + InternalScopedString path(kMaxPathLength); + fd_t fd = CovOpenFile(&path, false, "caller-callee"); + if (fd == kInvalidFd) return; + WriteToFile(fd, out.data(), out.length()); + CloseFile(fd); VReport(1, " CovDump: %zd caller-callee pairs written\n", total); } -// Dump the coverage on disk. -static void CovDump() { - if (!common_flags()->coverage || common_flags()->coverage_direct) return; -#if !SANITIZER_WINDOWS - if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed)) - return; - uptr size = coverage_data.size(); - InternalMmapVector<u32> offsets(size); - uptr *vb = coverage_data.data(); - uptr *ve = vb + size; - SortArray(vb, size); - MemoryMappingLayout proc_maps(/*cache_enabled*/true); - uptr mb, me, off, prot; - InternalScopedBuffer<char> module(4096); - InternalScopedBuffer<char> path(4096 * 2); - for (int i = 0; - proc_maps.Next(&mb, &me, &off, module.data(), module.size(), &prot); - i++) { - if ((prot & MemoryMappingLayout::kProtectionExecute) == 0) - continue; - while (vb < ve && *vb < mb) vb++; - if (vb >= ve) break; - if (*vb < me) { - offsets.clear(); - const uptr *old_vb = vb; - CHECK_LE(off, *vb); - for (; vb < ve && *vb < me; vb++) { - uptr diff = *vb - (i ? mb : 0) + off; - CHECK_LE(diff, 0xffffffffU); - offsets.push_back(static_cast<u32>(diff)); - } - const char *module_name = StripModuleName(module.data()); - if (cov_sandboxed) { - if (cov_fd >= 0) { - CovWritePacked(internal_getpid(), module_name, offsets.data(), - offsets.size() * sizeof(u32)); - VReport(1, " CovDump: %zd PCs written to packed file\n", vb - old_vb); - } - } else { - // One file per module per process. - internal_snprintf((char *)path.data(), path.size(), "%s/%s.%zd.sancov", - common_flags()->coverage_dir, module_name, - internal_getpid()); - int fd = CovOpenFile(false /* packed */, module_name); - if (fd > 0) { - internal_write(fd, offsets.data(), offsets.size() * sizeof(u32)); - internal_close(fd); - VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), - vb - old_vb); - } +// Record the current PC into the event buffer. +// Every event is a u32 value (index in tr_pc_array_index) so we compute +// it once and then cache in the provided 'cache' storage. +// +// This function will eventually be inlined by the compiler. +void CoverageData::TraceBasicBlock(s32 *id) { + // Will trap here if + // 1. coverage is not enabled at run-time. + // 2. The array tr_event_array is full. + *tr_event_pointer = static_cast<u32>(*id - 1); + tr_event_pointer++; +} + +void CoverageData::DumpCounters() { + if (!common_flags()->coverage_counters) return; + uptr n = coverage_data.GetNumberOf8bitCounters(); + if (!n) return; + InternalScopedBuffer<u8> bitset(n); + coverage_data.Update8bitCounterBitsetAndClearCounters(bitset.data()); + InternalScopedString path(kMaxPathLength); + + for (uptr m = 0; m < module_name_vec.size(); m++) { + auto r = module_name_vec[m]; + CHECK(r.copied_module_name); + CHECK_LE(r.beg, r.end); + CHECK_LE(r.end, size()); + const char *base_name = StripModuleName(r.copied_module_name); + fd_t fd = + CovOpenFile(&path, /* packed */ false, base_name, "counters-sancov"); + if (fd == kInvalidFd) return; + WriteToFile(fd, bitset.data() + r.beg, r.end - r.beg); + CloseFile(fd); + VReport(1, " CovDump: %zd counters written for '%s'\n", r.end - r.beg, + base_name); + } +} + +void CoverageData::DumpAsBitSet() { + if (!common_flags()->coverage_bitset) return; + if (!size()) return; + InternalScopedBuffer<char> out(size()); + InternalScopedString path(kMaxPathLength); + for (uptr m = 0; m < module_name_vec.size(); m++) { + uptr n_set_bits = 0; + auto r = module_name_vec[m]; + CHECK(r.copied_module_name); + CHECK_LE(r.beg, r.end); + CHECK_LE(r.end, size()); + for (uptr i = r.beg; i < r.end; i++) { + uptr pc = UnbundlePc(pc_array[i]); + out[i] = pc ? '1' : '0'; + if (pc) + n_set_bits++; + } + const char *base_name = StripModuleName(r.copied_module_name); + fd_t fd = CovOpenFile(&path, /* packed */false, base_name, "bitset-sancov"); + if (fd == kInvalidFd) return; + WriteToFile(fd, out.data() + r.beg, r.end - r.beg); + CloseFile(fd); + VReport(1, + " CovDump: bitset of %zd bits written for '%s', %zd bits are set\n", + r.end - r.beg, base_name, n_set_bits); + } +} + +void CoverageData::DumpOffsets() { + auto sym = Symbolizer::GetOrInit(); + if (!common_flags()->coverage_pcs) return; + CHECK_NE(sym, nullptr); + InternalMmapVector<uptr> offsets(0); + InternalScopedString path(kMaxPathLength); + for (uptr m = 0; m < module_name_vec.size(); m++) { + offsets.clear(); + uptr num_words_for_magic = SANITIZER_WORDSIZE == 64 ? 1 : 2; + for (uptr i = 0; i < num_words_for_magic; i++) + offsets.push_back(0); + auto r = module_name_vec[m]; + CHECK(r.copied_module_name); + CHECK_LE(r.beg, r.end); + CHECK_LE(r.end, size()); + for (uptr i = r.beg; i < r.end; i++) { + uptr pc = UnbundlePc(pc_array[i]); + uptr counter = UnbundleCounter(pc_array[i]); + if (!pc) continue; // Not visited. + uptr offset = 0; + sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset); + offsets.push_back(BundlePcAndCounter(offset, counter)); + } + + CHECK_GE(offsets.size(), num_words_for_magic); + SortArray(offsets.data(), offsets.size()); + for (uptr i = 0; i < offsets.size(); i++) + offsets[i] = UnbundlePc(offsets[i]); + + uptr num_offsets = offsets.size() - num_words_for_magic; + u64 *magic_p = reinterpret_cast<u64*>(offsets.data()); + CHECK_EQ(*magic_p, 0ULL); + // FIXME: we may want to write 32-bit offsets even in 64-mode + // if all the offsets are small enough. + *magic_p = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32; + + const char *module_name = StripModuleName(r.copied_module_name); + if (cov_sandboxed) { + if (cov_fd != kInvalidFd) { + CovWritePacked(internal_getpid(), module_name, offsets.data(), + offsets.size() * sizeof(offsets[0])); + VReport(1, " CovDump: %zd PCs written to packed file\n", num_offsets); } + } else { + // One file per module per process. + fd_t fd = CovOpenFile(&path, false /* packed */, module_name); + if (fd == kInvalidFd) continue; + WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0])); + CloseFile(fd); + VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets); } } - if (cov_fd >= 0) - internal_close(cov_fd); - coverage_data.DumpCallerCalleePairs(); -#endif // !SANITIZER_WINDOWS + if (cov_fd != kInvalidFd) + CloseFile(cov_fd); +} + +void CoverageData::DumpAll() { + if (!coverage_enabled || common_flags()->coverage_direct) return; + if (atomic_fetch_add(&dump_once_guard, 1, memory_order_relaxed)) + return; + DumpAsBitSet(); + DumpCounters(); + DumpTrace(); + DumpOffsets(); + DumpCallerCalleePairs(); } void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args) { if (!args) return; - if (!common_flags()->coverage) return; + if (!coverage_enabled) return; cov_sandboxed = args->coverage_sandboxed; if (!cov_sandboxed) return; - cov_fd = args->coverage_fd; cov_max_block_size = args->coverage_max_block_size; - if (cov_fd < 0) + if (args->coverage_fd >= 0) { + cov_fd = (fd_t)args->coverage_fd; + } else { + InternalScopedString path(kMaxPathLength); // Pre-open the file now. The sandbox won't allow us to do it later. - cov_fd = CovOpenFile(true /* packed */, 0); + cov_fd = CovOpenFile(&path, true /* packed */, nullptr); + } } -int MaybeOpenCovFile(const char *name) { +fd_t MaybeOpenCovFile(const char *name) { CHECK(name); - if (!common_flags()->coverage) return -1; - return CovOpenFile(true /* packed */, name); + if (!coverage_enabled) return kInvalidFd; + InternalScopedString path(kMaxPathLength); + return CovOpenFile(&path, true /* packed */, name); } void CovBeforeFork() { @@ -439,32 +830,114 @@ void CovAfterFork(int child_pid) { coverage_data.AfterFork(child_pid); } -} // namespace __sanitizer +static void MaybeDumpCoverage() { + if (common_flags()->coverage) + __sanitizer_cov_dump(); +} + +void InitializeCoverage(bool enabled, const char *dir) { + if (coverage_enabled) + return; // May happen if two sanitizer enable coverage in the same process. + coverage_enabled = enabled; + coverage_dir = dir; + coverage_data.Init(); + if (enabled) coverage_data.Enable(); + if (!common_flags()->coverage_direct) Atexit(__sanitizer_cov_dump); + AddDieCallback(MaybeDumpCoverage); +} + +void ReInitializeCoverage(bool enabled, const char *dir) { + coverage_enabled = enabled; + coverage_dir = dir; + coverage_data.ReInit(); +} + +void CoverageUpdateMapping() { + if (coverage_enabled) + CovUpdateMapping(coverage_dir); +} + +} // namespace __sanitizer extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov() { - coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC())); +SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(u32 *guard) { + coverage_data.Add(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()), + guard); +} +SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_with_check(u32 *guard) { + atomic_uint32_t *atomic_guard = reinterpret_cast<atomic_uint32_t*>(guard); + if (static_cast<s32>( + __sanitizer::atomic_load(atomic_guard, memory_order_relaxed)) < 0) + __sanitizer_cov(guard); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_indir_call16(uptr callee, uptr callee_cache16[]) { coverage_data.IndirCall(StackTrace::GetPreviousInstructionPc(GET_CALLER_PC()), callee, callee_cache16, 16); } -SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { CovDump(); } SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() { + coverage_enabled = true; + coverage_dir = common_flags()->coverage_dir; coverage_data.Init(); } -SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_module_init(uptr npcs) { - if (!common_flags()->coverage || !common_flags()->coverage_direct) return; - if (SANITIZER_ANDROID) { +SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() { + coverage_data.DumpAll(); +} +SANITIZER_INTERFACE_ATTRIBUTE void +__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters, + const char *comp_unit_name) { + coverage_data.InitializeGuards(guards, npcs, comp_unit_name, GET_CALLER_PC()); + coverage_data.InitializeCounters(counters, npcs); + if (!common_flags()->coverage_direct) return; + if (SANITIZER_ANDROID && coverage_enabled) { // dlopen/dlclose interceptors do not work on Android, so we rely on // Extend() calls to update .sancov.map. - CovUpdateMapping(GET_CALLER_PC()); + CovUpdateMapping(coverage_dir, GET_CALLER_PC()); } coverage_data.Extend(npcs); } SANITIZER_INTERFACE_ATTRIBUTE sptr __sanitizer_maybe_open_cov_file(const char *name) { - return MaybeOpenCovFile(name); + return (sptr)MaybeOpenCovFile(name); +} +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_total_unique_coverage() { + return atomic_load(&coverage_counter, memory_order_relaxed); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_cov_trace_func_enter(s32 *id) { + coverage_data.TraceBasicBlock(id); +} +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_cov_trace_basic_block(s32 *id) { + coverage_data.TraceBasicBlock(id); +} +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_reset_coverage() { + coverage_data.ReinitializeGuards(); + internal_bzero_aligned16( + coverage_data.data(), + RoundUpTo(coverage_data.size() * sizeof(coverage_data.data()[0]), 16)); +} +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_coverage_guards(uptr **data) { + *data = coverage_data.data(); + return coverage_data.size(); +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_get_number_of_counters() { + return coverage_data.GetNumberOf8bitCounters(); +} + +SANITIZER_INTERFACE_ATTRIBUTE +uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) { + return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset); } -} // extern "C" +// Default empty implementations (weak). Users should redefine them. +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_cov_trace_cmp() {} +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +void __sanitizer_cov_trace_switch() {} +} // extern "C" diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc index b88814b8167..ebac6812881 100644 --- a/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc @@ -33,7 +33,6 @@ namespace __sanitizer { -static const uptr kMaxNumberOfModules = 1 << 14; static const uptr kMaxTextSize = 64 * 1024; struct CachedMapping { @@ -60,8 +59,8 @@ struct CachedMapping { static CachedMapping cached_mapping; static StaticSpinMutex mapping_mu; -void CovUpdateMapping(uptr caller_pc) { - if (!common_flags()->coverage || !common_flags()->coverage_direct) return; +void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) { + if (!common_flags()->coverage_direct) return; SpinMutexLock l(&mapping_mu); @@ -69,57 +68,58 @@ void CovUpdateMapping(uptr caller_pc) { return; InternalScopedString text(kMaxTextSize); - InternalScopedBuffer<char> modules_data(kMaxNumberOfModules * - sizeof(LoadedModule)); - LoadedModule *modules = (LoadedModule *)modules_data.data(); - CHECK(modules); - int n_modules = GetListOfModules(modules, kMaxNumberOfModules, - /* filter */ 0); - - text.append("%d\n", sizeof(uptr) * 8); - for (int i = 0; i < n_modules; ++i) { - const char *module_name = StripModuleName(modules[i].full_name()); - for (unsigned j = 0; j < modules[i].n_ranges(); ++j) { - if (modules[i].address_range_executable(j)) { - uptr start = modules[i].address_range_start(j); - uptr end = modules[i].address_range_end(j); - uptr base = modules[i].base_address(); - text.append("%zx %zx %zx %s\n", start, end, base, module_name); - if (caller_pc && caller_pc >= start && caller_pc < end) - cached_mapping.SetModuleRange(start, end); + + { + InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules); + CHECK(modules.data()); + int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules, + /* filter */ nullptr); + + text.append("%d\n", sizeof(uptr) * 8); + for (int i = 0; i < n_modules; ++i) { + const char *module_name = StripModuleName(modules[i].full_name()); + uptr base = modules[i].base_address(); + for (auto iter = modules[i].ranges(); iter.hasNext();) { + const auto *range = iter.next(); + if (range->executable) { + uptr start = range->beg; + uptr end = range->end; + text.append("%zx %zx %zx %s\n", start, end, base, module_name); + if (caller_pc && caller_pc >= start && caller_pc < end) + cached_mapping.SetModuleRange(start, end); + } } + modules[i].clear(); } } - int err; - InternalScopedString tmp_path(64 + - internal_strlen(common_flags()->coverage_dir)); + error_t err; + InternalScopedString tmp_path(64 + internal_strlen(coverage_dir)); uptr res = internal_snprintf((char *)tmp_path.data(), tmp_path.size(), - "%s/%zd.sancov.map.tmp", common_flags()->coverage_dir, - internal_getpid()); + "%s/%zd.sancov.map.tmp", coverage_dir, + internal_getpid()); CHECK_LE(res, tmp_path.size()); - uptr map_fd = OpenFile(tmp_path.data(), true); - if (internal_iserror(map_fd)) { - Report(" Coverage: failed to open %s for writing\n", tmp_path.data()); + fd_t map_fd = OpenFile(tmp_path.data(), WrOnly, &err); + if (map_fd == kInvalidFd) { + Report("Coverage: failed to open %s for writing: %d\n", tmp_path.data(), + err); Die(); } - res = internal_write(map_fd, text.data(), text.length()); - if (internal_iserror(res, &err)) { + if (!WriteToFile(map_fd, text.data(), text.length(), nullptr, &err)) { Printf("sancov.map write failed: %d\n", err); Die(); } - internal_close(map_fd); + CloseFile(map_fd); - InternalScopedString path(64 + internal_strlen(common_flags()->coverage_dir)); + InternalScopedString path(64 + internal_strlen(coverage_dir)); res = internal_snprintf((char *)path.data(), path.size(), "%s/%zd.sancov.map", - common_flags()->coverage_dir, internal_getpid()); + coverage_dir, internal_getpid()); CHECK_LE(res, path.size()); - res = internal_rename(tmp_path.data(), path.data()); - if (internal_iserror(res, &err)) { + if (!RenameFile(tmp_path.data(), path.data(), &err)) { Printf("sancov.map rename failed: %d\n", err); Die(); } } -} // namespace __sanitizer +} // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc index fc6f5dcfbb4..7a318c963f7 100644 --- a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc +++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc @@ -38,19 +38,20 @@ struct DD : public DDetector { explicit DD(const DDFlags *flags); - DDPhysicalThread* CreatePhysicalThread(); - void DestroyPhysicalThread(DDPhysicalThread *pt); + DDPhysicalThread *CreatePhysicalThread() override; + void DestroyPhysicalThread(DDPhysicalThread *pt) override; - DDLogicalThread* CreateLogicalThread(u64 ctx); - void DestroyLogicalThread(DDLogicalThread *lt); + DDLogicalThread *CreateLogicalThread(u64 ctx) override; + void DestroyLogicalThread(DDLogicalThread *lt) override; - void MutexInit(DDCallback *cb, DDMutex *m); - void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock); - void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock); - void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock); - void MutexDestroy(DDCallback *cb, DDMutex *m); + void MutexInit(DDCallback *cb, DDMutex *m) override; + void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override; + void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, + bool trylock) override; + void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override; + void MutexDestroy(DDCallback *cb, DDMutex *m) override; - DDReport *GetReport(DDCallback *cb); + DDReport *GetReport(DDCallback *cb) override; void MutexEnsureID(DDLogicalThread *lt, DDMutex *m); void ReportDeadlock(DDCallback *cb, DDMutex *m); @@ -68,7 +69,7 @@ DD::DD(const DDFlags *flags) } DDPhysicalThread* DD::CreatePhysicalThread() { - return 0; + return nullptr; } void DD::DestroyPhysicalThread(DDPhysicalThread *pt) { @@ -178,10 +179,10 @@ void DD::MutexDestroy(DDCallback *cb, DDReport *DD::GetReport(DDCallback *cb) { if (!cb->lt->report_pending) - return 0; + return nullptr; cb->lt->report_pending = false; return &cb->lt->rep; } -} // namespace __sanitizer -#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1 +} // namespace __sanitizer +#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1 diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h index 59d8d93fe13..07c3755155f 100644 --- a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h +++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h @@ -70,10 +70,10 @@ struct DDCallback { struct DDetector { static DDetector *Create(const DDFlags *flags); - virtual DDPhysicalThread* CreatePhysicalThread() { return 0; } + virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; } virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {} - virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return 0; } + virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; } virtual void DestroyLogicalThread(DDLogicalThread *lt) {} virtual void MutexInit(DDCallback *cb, DDMutex *m) {} @@ -83,7 +83,7 @@ struct DDetector { virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {} virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {} - virtual DDReport *GetReport(DDCallback *cb) { return 0; } + virtual DDReport *GetReport(DDCallback *cb) { return nullptr; } }; } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_flag_parser.cc b/libsanitizer/sanitizer_common/sanitizer_flag_parser.cc new file mode 100644 index 00000000000..1fc6b2edb0c --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_flag_parser.cc @@ -0,0 +1,169 @@ +//===-- sanitizer_flag_parser.cc ------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer/AddressSanitizer runtime. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_flag_parser.h" + +#include "sanitizer_common.h" +#include "sanitizer_libc.h" +#include "sanitizer_flags.h" +#include "sanitizer_flag_parser.h" + +namespace __sanitizer { + +LowLevelAllocator FlagParser::Alloc; + +class UnknownFlags { + static const int kMaxUnknownFlags = 20; + const char *unknown_flags_[kMaxUnknownFlags]; + int n_unknown_flags_; + + public: + void Add(const char *name) { + CHECK_LT(n_unknown_flags_, kMaxUnknownFlags); + unknown_flags_[n_unknown_flags_++] = name; + } + + void Report() { + if (!n_unknown_flags_) return; + Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_); + for (int i = 0; i < n_unknown_flags_; ++i) + Printf(" %s\n", unknown_flags_[i]); + n_unknown_flags_ = 0; + } +}; + +UnknownFlags unknown_flags; + +void ReportUnrecognizedFlags() { + unknown_flags.Report(); +} + +char *FlagParser::ll_strndup(const char *s, uptr n) { + uptr len = internal_strnlen(s, n); + char *s2 = (char*)Alloc.Allocate(len + 1); + internal_memcpy(s2, s, len); + s2[len] = 0; + return s2; +} + +void FlagParser::PrintFlagDescriptions() { + Printf("Available flags for %s:\n", SanitizerToolName); + for (int i = 0; i < n_flags_; ++i) + Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc); +} + +void FlagParser::fatal_error(const char *err) { + Printf("ERROR: %s\n", err); + Die(); +} + +bool FlagParser::is_space(char c) { + return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' || + c == '\r'; +} + +void FlagParser::skip_whitespace() { + while (is_space(buf_[pos_])) ++pos_; +} + +void FlagParser::parse_flag() { + uptr name_start = pos_; + while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_; + if (buf_[pos_] != '=') fatal_error("expected '='"); + char *name = ll_strndup(buf_ + name_start, pos_ - name_start); + + uptr value_start = ++pos_; + char *value; + if (buf_[pos_] == '\'' || buf_[pos_] == '"') { + char quote = buf_[pos_++]; + while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_; + if (buf_[pos_] == 0) fatal_error("unterminated string"); + value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1); + ++pos_; // consume the closing quote + } else { + while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_; + if (buf_[pos_] != 0 && !is_space(buf_[pos_])) + fatal_error("expected separator or eol"); + value = ll_strndup(buf_ + value_start, pos_ - value_start); + } + + bool res = run_handler(name, value); + if (!res) fatal_error("Flag parsing failed."); +} + +void FlagParser::parse_flags() { + while (true) { + skip_whitespace(); + if (buf_[pos_] == 0) break; + parse_flag(); + } + + // Do a sanity check for certain flags. + if (common_flags_dont_use.malloc_context_size < 1) + common_flags_dont_use.malloc_context_size = 1; +} + +void FlagParser::ParseString(const char *s) { + if (!s) return; + // Backup current parser state to allow nested ParseString() calls. + const char *old_buf_ = buf_; + uptr old_pos_ = pos_; + buf_ = s; + pos_ = 0; + + parse_flags(); + + buf_ = old_buf_; + pos_ = old_pos_; +} + +bool FlagParser::ParseFile(const char *path, bool ignore_missing) { + static const uptr kMaxIncludeSize = 1 << 15; + char *data; + uptr data_mapped_size; + error_t err; + uptr len; + if (!ReadFileToBuffer(path, &data, &data_mapped_size, &len, + Max(kMaxIncludeSize, GetPageSizeCached()), &err)) { + if (ignore_missing) + return true; + Printf("Failed to read options from '%s': error %d\n", path, err); + return false; + } + ParseString(data); + UnmapOrDie(data, data_mapped_size); + return true; +} + +bool FlagParser::run_handler(const char *name, const char *value) { + for (int i = 0; i < n_flags_; ++i) { + if (internal_strcmp(name, flags_[i].name) == 0) + return flags_[i].handler->Parse(value); + } + // Unrecognized flag. This is not a fatal error, we may print a warning later. + unknown_flags.Add(name); + return true; +} + +void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler, + const char *desc) { + CHECK_LT(n_flags_, kMaxFlags); + flags_[n_flags_].name = name; + flags_[n_flags_].desc = desc; + flags_[n_flags_].handler = handler; + ++n_flags_; +} + +FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) { + flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags); +} + +} // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_flag_parser.h b/libsanitizer/sanitizer_common/sanitizer_flag_parser.h new file mode 100644 index 00000000000..7827d735770 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_flag_parser.h @@ -0,0 +1,120 @@ +//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer/AddressSanitizer runtime. +// +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_FLAG_REGISTRY_H +#define SANITIZER_FLAG_REGISTRY_H + +#include "sanitizer_internal_defs.h" +#include "sanitizer_libc.h" +#include "sanitizer_common.h" + +namespace __sanitizer { + +class FlagHandlerBase { + public: + virtual bool Parse(const char *value) { return false; } +}; + +template <typename T> +class FlagHandler : public FlagHandlerBase { + T *t_; + + public: + explicit FlagHandler(T *t) : t_(t) {} + bool Parse(const char *value) final; +}; + +template <> +inline bool FlagHandler<bool>::Parse(const char *value) { + if (internal_strcmp(value, "0") == 0 || + internal_strcmp(value, "no") == 0 || + internal_strcmp(value, "false") == 0) { + *t_ = false; + return true; + } + if (internal_strcmp(value, "1") == 0 || + internal_strcmp(value, "yes") == 0 || + internal_strcmp(value, "true") == 0) { + *t_ = true; + return true; + } + Printf("ERROR: Invalid value for bool option: '%s'\n", value); + return false; +} + +template <> +inline bool FlagHandler<const char *>::Parse(const char *value) { + *t_ = internal_strdup(value); + return true; +} + +template <> +inline bool FlagHandler<int>::Parse(const char *value) { + char *value_end; + *t_ = internal_simple_strtoll(value, &value_end, 10); + bool ok = *value_end == 0; + if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value); + return ok; +} + +template <> +inline bool FlagHandler<uptr>::Parse(const char *value) { + char *value_end; + *t_ = internal_simple_strtoll(value, &value_end, 10); + bool ok = *value_end == 0; + if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value); + return ok; +} + +class FlagParser { + static const int kMaxFlags = 200; + struct Flag { + const char *name; + const char *desc; + FlagHandlerBase *handler; + } *flags_; + int n_flags_; + + const char *buf_; + uptr pos_; + + public: + FlagParser(); + void RegisterHandler(const char *name, FlagHandlerBase *handler, + const char *desc); + void ParseString(const char *s); + bool ParseFile(const char *path, bool ignore_missing); + void PrintFlagDescriptions(); + + static LowLevelAllocator Alloc; + + private: + void fatal_error(const char *err); + bool is_space(char c); + void skip_whitespace(); + void parse_flags(); + void parse_flag(); + bool run_handler(const char *name, const char *value); + char *ll_strndup(const char *s, uptr n); +}; + +template <typename T> +static void RegisterFlag(FlagParser *parser, const char *name, const char *desc, + T *var) { + FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var); // NOLINT + parser->RegisterHandler(name, fh, desc); +} + +void ReportUnrecognizedFlags(); + +} // namespace __sanitizer + +#endif // SANITIZER_FLAG_REGISTRY_H diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.cc b/libsanitizer/sanitizer_common/sanitizer_flags.cc index d7e7118212e..a24ff900011 100644 --- a/libsanitizer/sanitizer_common/sanitizer_flags.cc +++ b/libsanitizer/sanitizer_common/sanitizer_flags.cc @@ -14,6 +14,7 @@ #include "sanitizer_common.h" #include "sanitizer_libc.h" #include "sanitizer_list.h" +#include "sanitizer_flag_parser.h" namespace __sanitizer { @@ -32,274 +33,68 @@ IntrusiveList<FlagDescription> flag_descriptions; # define SANITIZER_NEEDS_SEGV 1 #endif -void SetCommonFlagsDefaults(CommonFlags *f) { - f->symbolize = true; - f->external_symbolizer_path = 0; - f->allow_addr2line = false; - f->strip_path_prefix = ""; - f->fast_unwind_on_check = false; - f->fast_unwind_on_fatal = false; - f->fast_unwind_on_malloc = true; - f->handle_ioctl = false; - f->malloc_context_size = 1; - f->log_path = "stderr"; - f->verbosity = 0; - f->detect_leaks = true; - f->leak_check_at_exit = true; - f->allocator_may_return_null = false; - f->print_summary = true; - f->check_printf = true; - // TODO(glider): tools may want to set different defaults for handle_segv. - f->handle_segv = SANITIZER_NEEDS_SEGV; - f->allow_user_segv_handler = false; - f->use_sigaltstack = true; - f->detect_deadlocks = false; - f->clear_shadow_mmap_threshold = 64 * 1024; - f->color = "auto"; - f->legacy_pthread_cond = false; - f->intercept_tls_get_addr = false; - f->coverage = false; - f->coverage_direct = SANITIZER_ANDROID; - f->coverage_dir = "."; - f->full_address_space = false; - f->suppressions = ""; - f->print_suppressions = true; - f->disable_coredump = (SANITIZER_WORDSIZE == 64); - f->symbolize_inline_frames = true; - f->stack_trace_format = "DEFAULT"; +void CommonFlags::SetDefaults() { +#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "sanitizer_flags.inc" +#undef COMMON_FLAG } -void ParseCommonFlagsFromString(CommonFlags *f, const char *str) { - ParseFlag(str, &f->symbolize, "symbolize", - "If set, use the online symbolizer from common sanitizer runtime to turn " - "virtual addresses to file/line locations."); - ParseFlag(str, &f->external_symbolizer_path, "external_symbolizer_path", - "Path to external symbolizer. If empty, the tool will search $PATH for " - "the symbolizer."); - ParseFlag(str, &f->allow_addr2line, "allow_addr2line", - "If set, allows online symbolizer to run addr2line binary to symbolize " - "stack traces (addr2line will only be used if llvm-symbolizer binary is " - "unavailable."); - ParseFlag(str, &f->strip_path_prefix, "strip_path_prefix", - "Strips this prefix from file paths in error reports."); - ParseFlag(str, &f->fast_unwind_on_check, "fast_unwind_on_check", - "If available, use the fast frame-pointer-based unwinder on " - "internal CHECK failures."); - ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal", - "If available, use the fast frame-pointer-based unwinder on fatal " - "errors."); - ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc", - "If available, use the fast frame-pointer-based unwinder on " - "malloc/free."); - ParseFlag(str, &f->handle_ioctl, "handle_ioctl", - "Intercept and handle ioctl requests."); - ParseFlag(str, &f->malloc_context_size, "malloc_context_size", - "Max number of stack frames kept for each allocation/deallocation."); - ParseFlag(str, &f->log_path, "log_path", - "Write logs to \"log_path.pid\". The special values are \"stdout\" and " - "\"stderr\". The default is \"stderr\"."); - ParseFlag(str, &f->verbosity, "verbosity", - "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output)."); - ParseFlag(str, &f->detect_leaks, "detect_leaks", - "Enable memory leak detection."); - ParseFlag(str, &f->leak_check_at_exit, "leak_check_at_exit", - "Invoke leak checking in an atexit handler. Has no effect if " - "detect_leaks=false, or if __lsan_do_leak_check() is called before the " - "handler has a chance to run."); - ParseFlag(str, &f->allocator_may_return_null, "allocator_may_return_null", - "If false, the allocator will crash instead of returning 0 on " - "out-of-memory."); - ParseFlag(str, &f->print_summary, "print_summary", - "If false, disable printing error summaries in addition to error " - "reports."); - ParseFlag(str, &f->check_printf, "check_printf", - "Check printf arguments."); - ParseFlag(str, &f->handle_segv, "handle_segv", - "If set, registers the tool's custom SEGV handler (both SIGBUS and " - "SIGSEGV on OSX)."); - ParseFlag(str, &f->allow_user_segv_handler, "allow_user_segv_handler", - "If set, allows user to register a SEGV handler even if the tool " - "registers one."); - ParseFlag(str, &f->use_sigaltstack, "use_sigaltstack", - "If set, uses alternate stack for signal handling."); - ParseFlag(str, &f->detect_deadlocks, "detect_deadlocks", - "If set, deadlock detection is enabled."); - ParseFlag(str, &f->clear_shadow_mmap_threshold, - "clear_shadow_mmap_threshold", - "Large shadow regions are zero-filled using mmap(NORESERVE) instead of " - "memset(). This is the threshold size in bytes."); - ParseFlag(str, &f->color, "color", - "Colorize reports: (always|never|auto)."); - ParseFlag(str, &f->legacy_pthread_cond, "legacy_pthread_cond", - "Enables support for dynamic libraries linked with libpthread 2.2.5."); - ParseFlag(str, &f->intercept_tls_get_addr, "intercept_tls_get_addr", - "Intercept __tls_get_addr."); - ParseFlag(str, &f->help, "help", "Print the flag descriptions."); - ParseFlag(str, &f->mmap_limit_mb, "mmap_limit_mb", - "Limit the amount of mmap-ed memory (excluding shadow) in Mb; " - "not a user-facing flag, used mosly for testing the tools"); - ParseFlag(str, &f->coverage, "coverage", - "If set, coverage information will be dumped at program shutdown (if the " - "coverage instrumentation was enabled at compile time)."); - ParseFlag(str, &f->coverage_direct, "coverage_direct", - "If set, coverage information will be dumped directly to a memory " - "mapped file. This way data is not lost even if the process is " - "suddenly killed."); - ParseFlag(str, &f->coverage_dir, "coverage_dir", - "Target directory for coverage dumps. Defaults to the current " - "directory."); - ParseFlag(str, &f->full_address_space, "full_address_space", - "Sanitize complete address space; " - "by default kernel area on 32-bit platforms will not be sanitized"); - ParseFlag(str, &f->suppressions, "suppressions", "Suppressions file name."); - ParseFlag(str, &f->print_suppressions, "print_suppressions", - "Print matched suppressions at exit."); - ParseFlag(str, &f->disable_coredump, "disable_coredump", - "Disable core dumping. By default, disable_core=1 on 64-bit to avoid " - "dumping a 16T+ core file. Ignored on OSes that don't dump core by" - "default and for sanitizers that don't reserve lots of virtual memory."); - ParseFlag(str, &f->symbolize_inline_frames, "symbolize_inline_frames", - "Print inlined frames in stacktraces. Defaults to true."); - ParseFlag(str, &f->stack_trace_format, "stack_trace_format", - "Format string used to render stack frames. " - "See sanitizer_stacktrace_printer.h for the format description. " - "Use DEFAULT to get default format."); - - // Do a sanity check for certain flags. - if (f->malloc_context_size < 1) - f->malloc_context_size = 1; -} - -static bool GetFlagValue(const char *env, const char *name, - const char **value, int *value_length) { - if (env == 0) - return false; - const char *pos = 0; - for (;;) { - pos = internal_strstr(env, name); - if (pos == 0) - return false; - const char *name_end = pos + internal_strlen(name); - if ((pos != env && - ((pos[-1] >= 'a' && pos[-1] <= 'z') || pos[-1] == '_')) || - *name_end != '=') { - // Seems to be middle of another flag name or value. - env = pos + 1; - continue; - } - pos = name_end; - break; - } - const char *end; - if (pos[0] != '=') { - end = pos; - } else { - pos += 1; - if (pos[0] == '"') { - pos += 1; - end = internal_strchr(pos, '"'); - } else if (pos[0] == '\'') { - pos += 1; - end = internal_strchr(pos, '\''); - } else { - // Read until the next space or colon. - end = pos + internal_strcspn(pos, " :"); - } - if (end == 0) - end = pos + internal_strlen(pos); - } - *value = pos; - *value_length = end - pos; - return true; -} - -static bool StartsWith(const char *flag, int flag_length, const char *value) { - if (!flag || !value) - return false; - int value_length = internal_strlen(value); - return (flag_length >= value_length) && - (0 == internal_strncmp(flag, value, value_length)); +void CommonFlags::CopyFrom(const CommonFlags &other) { + internal_memcpy(this, &other, sizeof(*this)); } -static LowLevelAllocator allocator_for_flags; - -// The linear scan is suboptimal, but the number of flags is relatively small. -bool FlagInDescriptionList(const char *name) { - IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions); - while (it.hasNext()) { - if (!internal_strcmp(it.next()->name, name)) return true; +// Copy the string from "s" to "out", replacing "%b" with the binary basename. +static void SubstituteBinaryName(const char *s, char *out, uptr out_size) { + char *out_end = out + out_size; + while (*s && out < out_end - 1) { + if (s[0] != '%' || s[1] != 'b') { *out++ = *s++; continue; } + const char *base = GetProcessName(); + CHECK(base); + while (*base && out < out_end - 1) + *out++ = *base++; + s += 2; // skip "%b" } - return false; -} - -void AddFlagDescription(const char *name, const char *description) { - if (FlagInDescriptionList(name)) return; - FlagDescription *new_description = new(allocator_for_flags) FlagDescription; - new_description->name = name; - new_description->description = description; - flag_descriptions.push_back(new_description); + *out = '\0'; } -// TODO(glider): put the descriptions inside CommonFlags. -void PrintFlagDescriptions() { - IntrusiveList<FlagDescription>::Iterator it(&flag_descriptions); - Printf("Available flags for %s:\n", SanitizerToolName); - while (it.hasNext()) { - FlagDescription *descr = it.next(); - Printf("\t%s\n\t\t- %s\n", descr->name, descr->description); +class FlagHandlerInclude : public FlagHandlerBase { + FlagParser *parser_; + bool ignore_missing_; + + public: + explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing) + : parser_(parser), ignore_missing_(ignore_missing) {} + bool Parse(const char *value) final { + if (internal_strchr(value, '%')) { + char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude"); + SubstituteBinaryName(value, buf, kMaxPathLength); + bool res = parser_->ParseFile(buf, ignore_missing_); + UnmapOrDie(buf, kMaxPathLength); + return res; + } + return parser_->ParseFile(value, ignore_missing_); } -} - -void ParseFlag(const char *env, bool *flag, - const char *name, const char *descr) { - const char *value; - int value_length; - AddFlagDescription(name, descr); - if (!GetFlagValue(env, name, &value, &value_length)) - return; - if (StartsWith(value, value_length, "0") || - StartsWith(value, value_length, "no") || - StartsWith(value, value_length, "false")) - *flag = false; - if (StartsWith(value, value_length, "1") || - StartsWith(value, value_length, "yes") || - StartsWith(value, value_length, "true")) - *flag = true; -} +}; -void ParseFlag(const char *env, int *flag, - const char *name, const char *descr) { - const char *value; - int value_length; - AddFlagDescription(name, descr); - if (!GetFlagValue(env, name, &value, &value_length)) - return; - *flag = static_cast<int>(internal_atoll(value)); +void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) { + FlagHandlerInclude *fh_include = new (FlagParser::Alloc) // NOLINT + FlagHandlerInclude(parser, /*ignore_missing*/ false); + parser->RegisterHandler("include", fh_include, + "read more options from the given file"); + FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc) // NOLINT + FlagHandlerInclude(parser, /*ignore_missing*/ true); + parser->RegisterHandler( + "include_if_exists", fh_include_if_exists, + "read more options from the given file (if it exists)"); } -void ParseFlag(const char *env, uptr *flag, - const char *name, const char *descr) { - const char *value; - int value_length; - AddFlagDescription(name, descr); - if (!GetFlagValue(env, name, &value, &value_length)) - return; - *flag = static_cast<uptr>(internal_atoll(value)); -} +void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) { +#define COMMON_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &cf->Name); +#include "sanitizer_flags.inc" +#undef COMMON_FLAG -void ParseFlag(const char *env, const char **flag, - const char *name, const char *descr) { - const char *value; - int value_length; - AddFlagDescription(name, descr); - if (!GetFlagValue(env, name, &value, &value_length)) - return; - // Copy the flag value. Don't use locks here, as flags are parsed at - // tool startup. - char *value_copy = (char*)(allocator_for_flags.Allocate(value_length + 1)); - internal_memcpy(value_copy, value, value_length); - value_copy[value_length] = '\0'; - *flag = value_copy; + RegisterIncludeFlags(parser, cf); } } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.h b/libsanitizer/sanitizer_common/sanitizer_flags.h index b7a50131811..f1b872b79e2 100644 --- a/libsanitizer/sanitizer_common/sanitizer_flags.h +++ b/libsanitizer/sanitizer_common/sanitizer_flags.h @@ -16,62 +16,38 @@ namespace __sanitizer { -void ParseFlag(const char *env, bool *flag, - const char *name, const char *descr); -void ParseFlag(const char *env, int *flag, - const char *name, const char *descr); -void ParseFlag(const char *env, uptr *flag, - const char *name, const char *descr); -void ParseFlag(const char *env, const char **flag, - const char *name, const char *descr); - struct CommonFlags { - bool symbolize; - const char *external_symbolizer_path; - bool allow_addr2line; - const char *strip_path_prefix; - bool fast_unwind_on_check; - bool fast_unwind_on_fatal; - bool fast_unwind_on_malloc; - bool handle_ioctl; - int malloc_context_size; - const char *log_path; - int verbosity; - bool detect_leaks; - bool leak_check_at_exit; - bool allocator_may_return_null; - bool print_summary; - bool check_printf; - bool handle_segv; - bool allow_user_segv_handler; - bool use_sigaltstack; - bool detect_deadlocks; - uptr clear_shadow_mmap_threshold; - const char *color; - bool legacy_pthread_cond; - bool intercept_tls_get_addr; - bool help; - uptr mmap_limit_mb; - bool coverage; - bool coverage_direct; - const char *coverage_dir; - bool full_address_space; - const char *suppressions; - bool print_suppressions; - bool disable_coredump; - bool symbolize_inline_frames; - const char *stack_trace_format; +#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "sanitizer_flags.inc" +#undef COMMON_FLAG + + void SetDefaults(); + void CopyFrom(const CommonFlags &other); }; -inline CommonFlags *common_flags() { - extern CommonFlags common_flags_dont_use; +// Functions to get/set global CommonFlags shared by all sanitizer runtimes: +extern CommonFlags common_flags_dont_use; +inline const CommonFlags *common_flags() { return &common_flags_dont_use; } -void SetCommonFlagsDefaults(CommonFlags *f); -void ParseCommonFlagsFromString(CommonFlags *f, const char *str); -void PrintFlagDescriptions(); +inline void SetCommonFlagsDefaults() { + common_flags_dont_use.SetDefaults(); +} + +// This function can only be used to setup tool-specific overrides for +// CommonFlags defaults. Generally, it should only be used right after +// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and +// only during the flags initialization (i.e. before they are used for +// the first time). +inline void OverrideCommonFlags(const CommonFlags &cf) { + common_flags_dont_use.CopyFrom(cf); +} +class FlagParser; +void RegisterCommonFlags(FlagParser *parser, + CommonFlags *cf = &common_flags_dont_use); +void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf); } // namespace __sanitizer #endif // SANITIZER_FLAGS_H diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.inc b/libsanitizer/sanitizer_common/sanitizer_flags.inc new file mode 100644 index 00000000000..bca1c4b4d0f --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_flags.inc @@ -0,0 +1,192 @@ +//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes common flags available in all sanitizers. +// +//===----------------------------------------------------------------------===// + +#ifndef COMMON_FLAG +#error "Define COMMON_FLAG prior to including this file!" +#endif + +// COMMON_FLAG(Type, Name, DefaultValue, Description) +// Supported types: bool, const char *, int, uptr. +// Default value must be a compile-time constant. +// Description must be a string literal. + +COMMON_FLAG( + bool, symbolize, true, + "If set, use the online symbolizer from common sanitizer runtime to turn " + "virtual addresses to file/line locations.") +COMMON_FLAG( + const char *, external_symbolizer_path, nullptr, + "Path to external symbolizer. If empty, the tool will search $PATH for " + "the symbolizer.") +COMMON_FLAG( + bool, allow_addr2line, false, + "If set, allows online symbolizer to run addr2line binary to symbolize " + "stack traces (addr2line will only be used if llvm-symbolizer binary is " + "unavailable.") +COMMON_FLAG(const char *, strip_path_prefix, "", + "Strips this prefix from file paths in error reports.") +COMMON_FLAG(bool, fast_unwind_on_check, false, + "If available, use the fast frame-pointer-based unwinder on " + "internal CHECK failures.") +COMMON_FLAG(bool, fast_unwind_on_fatal, false, + "If available, use the fast frame-pointer-based unwinder on fatal " + "errors.") +COMMON_FLAG(bool, fast_unwind_on_malloc, true, + "If available, use the fast frame-pointer-based unwinder on " + "malloc/free.") +COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.") +COMMON_FLAG(int, malloc_context_size, 1, + "Max number of stack frames kept for each allocation/deallocation.") +COMMON_FLAG( + const char *, log_path, "stderr", + "Write logs to \"log_path.pid\". The special values are \"stdout\" and " + "\"stderr\". The default is \"stderr\".") +COMMON_FLAG( + bool, log_exe_name, false, + "Mention name of executable when reporting error and " + "append executable name to logs (as in \"log_path.exe_name.pid\").") +COMMON_FLAG( + bool, log_to_syslog, SANITIZER_ANDROID, + "Write all sanitizer output to syslog in addition to other means of " + "logging.") +COMMON_FLAG( + int, verbosity, 0, + "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).") +COMMON_FLAG(bool, detect_leaks, true, "Enable memory leak detection.") +COMMON_FLAG( + bool, leak_check_at_exit, true, + "Invoke leak checking in an atexit handler. Has no effect if " + "detect_leaks=false, or if __lsan_do_leak_check() is called before the " + "handler has a chance to run.") +COMMON_FLAG(bool, allocator_may_return_null, false, + "If false, the allocator will crash instead of returning 0 on " + "out-of-memory.") +COMMON_FLAG(bool, print_summary, true, + "If false, disable printing error summaries in addition to error " + "reports.") +COMMON_FLAG(bool, check_printf, true, "Check printf arguments.") +COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV, + "If set, registers the tool's custom SIGSEGV/SIGBUS handler.") +COMMON_FLAG(bool, handle_abort, false, + "If set, registers the tool's custom SIGABRT handler.") +COMMON_FLAG(bool, handle_sigfpe, true, + "If set, registers the tool's custom SIGFPE handler.") +COMMON_FLAG(bool, allow_user_segv_handler, false, + "If set, allows user to register a SEGV handler even if the tool " + "registers one.") +COMMON_FLAG(bool, use_sigaltstack, true, + "If set, uses alternate stack for signal handling.") +COMMON_FLAG(bool, detect_deadlocks, false, + "If set, deadlock detection is enabled.") +COMMON_FLAG( + uptr, clear_shadow_mmap_threshold, 64 * 1024, + "Large shadow regions are zero-filled using mmap(NORESERVE) instead of " + "memset(). This is the threshold size in bytes.") +COMMON_FLAG(const char *, color, "auto", + "Colorize reports: (always|never|auto).") +COMMON_FLAG( + bool, legacy_pthread_cond, false, + "Enables support for dynamic libraries linked with libpthread 2.2.5.") +COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.") +COMMON_FLAG(bool, help, false, "Print the flag descriptions.") +COMMON_FLAG(uptr, mmap_limit_mb, 0, + "Limit the amount of mmap-ed memory (excluding shadow) in Mb; " + "not a user-facing flag, used mosly for testing the tools") +COMMON_FLAG(uptr, hard_rss_limit_mb, 0, + "Hard RSS limit in Mb." + " If non-zero, a background thread is spawned at startup" + " which periodically reads RSS and aborts the process if the" + " limit is reached") +COMMON_FLAG(uptr, soft_rss_limit_mb, 0, + "Soft RSS limit in Mb." + " If non-zero, a background thread is spawned at startup" + " which periodically reads RSS. If the limit is reached" + " all subsequent malloc/new calls will fail or return NULL" + " (depending on the value of allocator_may_return_null)" + " until the RSS goes below the soft limit." + " This limit does not affect memory allocations other than" + " malloc/new.") +COMMON_FLAG(bool, can_use_proc_maps_statm, true, + "If false, do not attempt to read /proc/maps/statm." + " Mostly useful for testing sanitizers.") +COMMON_FLAG( + bool, coverage, false, + "If set, coverage information will be dumped at program shutdown (if the " + "coverage instrumentation was enabled at compile time).") +COMMON_FLAG(bool, coverage_pcs, true, + "If set (and if 'coverage' is set too), the coverage information " + "will be dumped as a set of PC offsets for every module.") +COMMON_FLAG(bool, coverage_order_pcs, false, + "If true, the PCs will be dumped in the order they've" + " appeared during the execution.") +COMMON_FLAG(bool, coverage_bitset, false, + "If set (and if 'coverage' is set too), the coverage information " + "will also be dumped as a bitset to a separate file.") +COMMON_FLAG(bool, coverage_counters, false, + "If set (and if 'coverage' is set too), the bitmap that corresponds" + " to coverage counters will be dumped.") +COMMON_FLAG(bool, coverage_direct, SANITIZER_ANDROID, + "If set, coverage information will be dumped directly to a memory " + "mapped file. This way data is not lost even if the process is " + "suddenly killed.") +COMMON_FLAG(const char *, coverage_dir, ".", + "Target directory for coverage dumps. Defaults to the current " + "directory.") +COMMON_FLAG(bool, full_address_space, false, + "Sanitize complete address space; " + "by default kernel area on 32-bit platforms will not be sanitized") +COMMON_FLAG(bool, print_suppressions, true, + "Print matched suppressions at exit.") +COMMON_FLAG( + bool, disable_coredump, (SANITIZER_WORDSIZE == 64), + "Disable core dumping. By default, disable_core=1 on 64-bit to avoid " + "dumping a 16T+ core file. Ignored on OSes that don't dump core by" + "default and for sanitizers that don't reserve lots of virtual memory.") +COMMON_FLAG(bool, use_madv_dontdump, true, + "If set, instructs kernel to not store the (huge) shadow " + "in core file.") +COMMON_FLAG(bool, symbolize_inline_frames, true, + "Print inlined frames in stacktraces. Defaults to true.") +COMMON_FLAG(bool, symbolize_vs_style, false, + "Print file locations in Visual Studio style (e.g: " + " file(10,42): ...") +COMMON_FLAG(const char *, stack_trace_format, "DEFAULT", + "Format string used to render stack frames. " + "See sanitizer_stacktrace_printer.h for the format description. " + "Use DEFAULT to get default format.") +COMMON_FLAG(bool, no_huge_pages_for_shadow, true, + "If true, the shadow is not allowed to use huge pages. ") +COMMON_FLAG(bool, strict_string_checks, false, + "If set check that string arguments are properly null-terminated") +COMMON_FLAG(bool, intercept_strstr, true, + "If set, uses custom wrappers for strstr and strcasestr functions " + "to find more errors.") +COMMON_FLAG(bool, intercept_strspn, true, + "If set, uses custom wrappers for strspn and strcspn function " + "to find more errors.") +COMMON_FLAG(bool, intercept_strpbrk, true, + "If set, uses custom wrappers for strpbrk function " + "to find more errors.") +COMMON_FLAG(bool, intercept_memcmp, true, + "If set, uses custom wrappers for memcmp function " + "to find more errors.") +COMMON_FLAG(bool, strict_memcmp, true, + "If true, assume that memcmp(p1, p2, n) always reads n bytes before " + "comparing p1 and p2.") +COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer " + "mappings in /proc/self/maps with " + "user-readable names") +COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool " + "found an error") +COMMON_FLAG( + bool, abort_on_error, SANITIZER_MAC, + "If set, the tool calls abort() instead of _exit() after printing the " + "error report.") diff --git a/libsanitizer/sanitizer_common/sanitizer_interception.h b/libsanitizer/sanitizer_common/sanitizer_interception.h deleted file mode 100644 index 2346fa859b1..00000000000 --- a/libsanitizer/sanitizer_common/sanitizer_interception.h +++ /dev/null @@ -1,23 +0,0 @@ -//===-- sanitizer_interception.h --------------------------------*- C++ -*-===// -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// Common macro definitions for interceptors. -// Always use this headers instead of interception/interception.h. -// -//===----------------------------------------------------------------------===// -#ifndef SANITIZER_INTERCEPTION_H -#define SANITIZER_INTERCEPTION_H - -#include "interception/interception.h" -#include "sanitizer_common.h" - -#if SANITIZER_LINUX && !defined(SANITIZER_GO) -#undef REAL -#define REAL(x) IndirectExternCall(__interception::PTR_TO_REAL(x)) -#endif - -#endif // SANITIZER_INTERCEPTION_H diff --git a/libsanitizer/sanitizer_common/sanitizer_interface_internal.h b/libsanitizer/sanitizer_common/sanitizer_interface_internal.h new file mode 100644 index 00000000000..fe472d8c527 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_interface_internal.h @@ -0,0 +1,56 @@ +//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between run-time libraries of sanitizers. +// +// This header declares the sanitizer runtime interface functions. +// The runtime library has to define these functions so the instrumented program +// could call them. +// +// See also include/sanitizer/common_interface_defs.h +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_INTERFACE_INTERNAL_H +#define SANITIZER_INTERFACE_INTERNAL_H + +#include "sanitizer_internal_defs.h" + +extern "C" { + // Tell the tools to write their reports to "path.<pid>" instead of stderr. + // The special values are "stdout" and "stderr". + SANITIZER_INTERFACE_ATTRIBUTE + void __sanitizer_set_report_path(const char *path); + + typedef struct { + int coverage_sandboxed; + __sanitizer::sptr coverage_fd; + unsigned int coverage_max_block_size; + } __sanitizer_sandbox_arguments; + + // Notify the tools that the sandbox is going to be turned on. + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void + __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args); + + // This function is called by the tool when it has just finished reporting + // an error. 'error_summary' is a one-line string that summarizes + // the error message. This function can be overridden by the client. + SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE + void __sanitizer_report_error_summary(const char *error_summary); + + SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump(); + SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init(); + SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard); + SANITIZER_INTERFACE_ATTRIBUTE + void __sanitizer_annotate_contiguous_container(const void *beg, + const void *end, + const void *old_mid, + const void *new_mid); + SANITIZER_INTERFACE_ATTRIBUTE + int __sanitizer_verify_contiguous_container(const void *beg, const void *mid, + const void *end); +} // extern "C" + +#endif // SANITIZER_INTERFACE_INTERNAL_H diff --git a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h index f1f243c17e2..d76ed7570a7 100644 --- a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h +++ b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h @@ -13,6 +13,10 @@ #include "sanitizer_platform.h" +#ifndef SANITIZER_DEBUG +# define SANITIZER_DEBUG 0 +#endif + // Only use SANITIZER_*ATTRIBUTE* before the function return type! #if SANITIZER_WINDOWS # define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport) @@ -26,7 +30,7 @@ # define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak)) #endif -#if SANITIZER_LINUX && !defined(SANITIZER_GO) +#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO) # define SANITIZER_SUPPORTS_WEAK_HOOKS 1 #else # define SANITIZER_SUPPORTS_WEAK_HOOKS 0 @@ -74,13 +78,22 @@ typedef signed char s8; typedef signed short s16; // NOLINT typedef signed int s32; typedef signed long long s64; // NOLINT +#if SANITIZER_WINDOWS +// On Windows, files are HANDLE, which is a synonim of void*. +// Use void* to avoid including <windows.h> everywhere. +typedef void* fd_t; +typedef unsigned error_t; +#else typedef int fd_t; +typedef int error_t; +#endif // WARNING: OFF_T may be different from OS type off_t, depending on the value of // _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls // like pread and mmap, as opposed to pread64 and mmap64. -// Mac and Linux/x86-64 are special. -#if SANITIZER_MAC || (SANITIZER_LINUX && defined(__x86_64__)) +// FreeBSD, Mac and Linux/x86-64 are special. +#if SANITIZER_FREEBSD || SANITIZER_MAC || \ + (SANITIZER_LINUX && defined(__x86_64__)) typedef u64 OFF_T; #else typedef uptr OFF_T; @@ -94,41 +107,6 @@ typedef u32 operator_new_size_type; #endif } // namespace __sanitizer -extern "C" { - // Tell the tools to write their reports to "path.<pid>" instead of stderr. - // The special values are "stdout" and "stderr". - SANITIZER_INTERFACE_ATTRIBUTE - void __sanitizer_set_report_path(const char *path); - - typedef struct { - int coverage_sandboxed; - __sanitizer::sptr coverage_fd; - unsigned int coverage_max_block_size; - } __sanitizer_sandbox_arguments; - - // Notify the tools that the sandbox is going to be turned on. - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void - __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args); - - // This function is called by the tool when it has just finished reporting - // an error. 'error_summary' is a one-line string that summarizes - // the error message. This function can be overridden by the client. - SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE - void __sanitizer_report_error_summary(const char *error_summary); - - SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump(); - SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init(); - SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(); - SANITIZER_INTERFACE_ATTRIBUTE - void __sanitizer_annotate_contiguous_container(const void *beg, - const void *end, - const void *old_mid, - const void *new_mid); - SANITIZER_INTERFACE_ATTRIBUTE - int __sanitizer_verify_contiguous_container(const void *beg, const void *mid, - const void *end); -} // extern "C" - using namespace __sanitizer; // NOLINT // ----------- ATTENTION ------------- @@ -149,7 +127,6 @@ using namespace __sanitizer; // NOLINT # define NOINLINE __declspec(noinline) # define NORETURN __declspec(noreturn) # define THREADLOCAL __declspec(thread) -# define NOTHROW # define LIKELY(x) (x) # define UNLIKELY(x) (x) # define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ @@ -163,7 +140,6 @@ using namespace __sanitizer; // NOLINT # define NOINLINE __attribute__((noinline)) # define NORETURN __attribute__((noreturn)) # define THREADLOCAL __thread -# define NOTHROW throw() # define LIKELY(x) __builtin_expect(!!(x), 1) # define UNLIKELY(x) __builtin_expect(!!(x), 0) # if defined(__i386__) || defined(__x86_64__) @@ -182,6 +158,12 @@ using namespace __sanitizer; // NOLINT # define USED #endif +#if !defined(_MSC_VER) || defined(__clang__) || MSC_PREREQ(1900) +# define NOEXCEPT noexcept +#else +# define NOEXCEPT throw() +#endif + // Unaligned versions of basic types. typedef ALIGNED(1) u16 uu16; typedef ALIGNED(1) u32 uu32; @@ -238,7 +220,7 @@ void NORETURN CheckFailed(const char *file, int line, const char *cond, #define CHECK_GT(a, b) CHECK_IMPL((a), >, (b)) #define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b)) -#if TSAN_DEBUG +#if SANITIZER_DEBUG #define DCHECK(a) CHECK(a) #define DCHECK_EQ(a, b) CHECK_EQ(a, b) #define DCHECK_NE(a, b) CHECK_NE(a, b) @@ -318,4 +300,12 @@ extern "C" void* _ReturnAddress(void); } while (internal_iserror(res, &rverrno) && rverrno == EINTR); \ } +// Forces the compiler to generate a frame pointer in the function. +#define ENABLE_FRAME_POINTER \ + do { \ + volatile uptr enable_fp; \ + enable_fp = GET_CURRENT_FRAME(); \ + (void)enable_fp; \ + } while (0) + #endif // SANITIZER_DEFS_H diff --git a/libsanitizer/sanitizer_common/sanitizer_lfstack.h b/libsanitizer/sanitizer_common/sanitizer_lfstack.h index 8033b9a7f9c..8bd0e91c400 100644 --- a/libsanitizer/sanitizer_common/sanitizer_lfstack.h +++ b/libsanitizer/sanitizer_common/sanitizer_lfstack.h @@ -47,8 +47,8 @@ struct LFStack { u64 cmp = atomic_load(&head_, memory_order_acquire); for (;;) { T *cur = (T*)(uptr)(cmp & kPtrMask); - if (cur == 0) - return 0; + if (!cur) + return nullptr; T *nxt = cur->next; u64 cnt = (cmp & kCounterMask); u64 xch = (u64)(uptr)nxt | cnt; @@ -66,6 +66,6 @@ struct LFStack { atomic_uint64_t head_; }; -} // namespace __sanitizer +} // namespace __sanitizer -#endif // #ifndef SANITIZER_LFSTACK_H +#endif // SANITIZER_LFSTACK_H diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.cc b/libsanitizer/sanitizer_common/sanitizer_libc.cc index c13a66d88cd..05fdca622e4 100644 --- a/libsanitizer/sanitizer_common/sanitizer_libc.cc +++ b/libsanitizer/sanitizer_common/sanitizer_libc.cc @@ -8,37 +8,37 @@ // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries. See sanitizer_libc.h for details. //===----------------------------------------------------------------------===// + #include "sanitizer_allocator_internal.h" #include "sanitizer_common.h" #include "sanitizer_libc.h" namespace __sanitizer { -// Make the compiler think that something is going on there. -static inline void break_optimization(void *arg) { -#if _MSC_VER - // FIXME: make sure this is actually enough. - __asm; -#else - __asm__ __volatile__("" : : "r" (arg) : "memory"); -#endif -} - s64 internal_atoll(const char *nptr) { - return internal_simple_strtoll(nptr, (char**)0, 10); + return internal_simple_strtoll(nptr, nullptr, 10); } void *internal_memchr(const void *s, int c, uptr n) { - const char* t = (char*)s; + const char *t = (const char *)s; for (uptr i = 0; i < n; ++i, ++t) if (*t == c) - return (void*)t; - return 0; + return reinterpret_cast<void *>(const_cast<char *>(t)); + return nullptr; +} + +void *internal_memrchr(const void *s, int c, uptr n) { + const char *t = (const char *)s; + void *res = nullptr; + for (uptr i = 0; i < n; ++i, ++t) { + if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t)); + } + return res; } int internal_memcmp(const void* s1, const void* s2, uptr n) { - const char* t1 = (char*)s1; - const char* t2 = (char*)s2; + const char *t1 = (const char *)s1; + const char *t2 = (const char *)s2; for (uptr i = 0; i < n; ++i, ++t1, ++t2) if (*t1 != *t2) return *t1 < *t2 ? -1 : 1; @@ -47,7 +47,7 @@ int internal_memcmp(const void* s1, const void* s2, uptr n) { void *internal_memcpy(void *dest, const void *src, uptr n) { char *d = (char*)dest; - char *s = (char*)src; + const char *s = (const char *)src; for (uptr i = 0; i < n; ++i) d[i] = s[i]; return dest; @@ -55,7 +55,7 @@ void *internal_memcpy(void *dest, const void *src, uptr n) { void *internal_memmove(void *dest, const void *src, uptr n) { char *d = (char*)dest; - char *s = (char*)src; + const char *s = (const char *)src; sptr i, signed_n = (sptr)n; CHECK_GE(signed_n, 0); if (d < s) { @@ -76,7 +76,8 @@ void internal_bzero_aligned16(void *s, uptr n) { CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0); for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) { p->a = p->b = 0; - break_optimization(0); // Make sure this does not become memset. + // Make sure this does not become memset. + SanitizerBreakOptimization(nullptr); } } @@ -95,7 +96,7 @@ void *internal_memset(void* s, int c, uptr n) { uptr internal_strcspn(const char *s, const char *reject) { uptr i; for (i = 0; s[i]; i++) { - if (internal_strchr(reject, s[i]) != 0) + if (internal_strchr(reject, s[i])) return i; } return i; @@ -109,6 +110,14 @@ char* internal_strdup(const char *s) { return s2; } +char* internal_strndup(const char *s, uptr n) { + uptr len = internal_strnlen(s, n); + char *s2 = (char*)InternalAlloc(len + 1); + internal_memcpy(s2, s, len); + s2[len] = 0; + return s2; +} + int internal_strcmp(const char *s1, const char *s2) { while (true) { unsigned c1 = *s1; @@ -136,9 +145,9 @@ int internal_strncmp(const char *s1, const char *s2, uptr n) { char* internal_strchr(const char *s, int c) { while (true) { if (*s == (char)c) - return (char*)s; + return const_cast<char *>(s); if (*s == 0) - return 0; + return nullptr; s++; } } @@ -146,16 +155,16 @@ char* internal_strchr(const char *s, int c) { char *internal_strchrnul(const char *s, int c) { char *res = internal_strchr(s, c); if (!res) - res = (char*)s + internal_strlen(s); + res = const_cast<char *>(s) + internal_strlen(s); return res; } char *internal_strrchr(const char *s, int c) { - const char *res = 0; + const char *res = nullptr; for (uptr i = 0; s[i]; i++) { if (s[i] == c) res = s + i; } - return (char*)res; + return const_cast<char *>(res); } uptr internal_strlen(const char *s) { @@ -191,12 +200,12 @@ char *internal_strstr(const char *haystack, const char *needle) { // This is O(N^2), but we are not using it in hot places. uptr len1 = internal_strlen(haystack); uptr len2 = internal_strlen(needle); - if (len1 < len2) return 0; + if (len1 < len2) return nullptr; for (uptr pos = 0; pos <= len1 - len2; pos++) { if (internal_memcmp(haystack + pos, needle, len2) == 0) - return (char*)haystack + pos; + return const_cast<char *>(haystack) + pos; } - return 0; + return nullptr; } s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) { @@ -205,7 +214,7 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) { int sgn = 1; u64 res = 0; bool have_digits = false; - char *old_nptr = (char*)nptr; + char *old_nptr = const_cast<char *>(nptr); if (*nptr == '+') { sgn = 1; nptr++; @@ -220,8 +229,8 @@ s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) { have_digits = true; nptr++; } - if (endptr != 0) { - *endptr = (have_digits) ? (char*)nptr : old_nptr; + if (endptr) { + *endptr = (have_digits) ? const_cast<char *>(nptr) : old_nptr; } if (sgn > 0) { return (s64)(Min((u64)INT64_MAX, res)); @@ -249,4 +258,4 @@ bool mem_is_zero(const char *beg, uptr size) { return all == 0; } -} // namespace __sanitizer +} // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.h b/libsanitizer/sanitizer_common/sanitizer_libc.h index 680b888e219..1b3f8edfadb 100644 --- a/libsanitizer/sanitizer_common/sanitizer_libc.h +++ b/libsanitizer/sanitizer_common/sanitizer_libc.h @@ -9,7 +9,9 @@ // run-time libraries. // These tools can not use some of the libc functions directly because those // functions are intercepted. Instead, we implement a tiny subset of libc here. +// FIXME: Some of functions declared in this file are in fact POSIX, not libc. //===----------------------------------------------------------------------===// + #ifndef SANITIZER_LIBC_H #define SANITIZER_LIBC_H @@ -24,6 +26,7 @@ namespace __sanitizer { // String functions s64 internal_atoll(const char *nptr); void *internal_memchr(const void *s, int c, uptr n); +void *internal_memrchr(const void *s, int c, uptr n); int internal_memcmp(const void* s1, const void* s2, uptr n); void *internal_memcpy(void *dest, const void *src, uptr n); void *internal_memmove(void *dest, const void *src, uptr n); @@ -36,6 +39,7 @@ char *internal_strchrnul(const char *s, int c); int internal_strcmp(const char *s1, const char *s2); uptr internal_strcspn(const char *s, const char *reject); char *internal_strdup(const char *s); +char *internal_strndup(const char *s, uptr n); uptr internal_strlen(const char *s); char *internal_strncat(char *dst, const char *src, uptr n); int internal_strncmp(const char *s1, const char *s2, uptr n); @@ -52,55 +56,26 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...); // Optimized for the case when the result is true. bool mem_is_zero(const char *mem, uptr size); - -// Memory -uptr internal_mmap(void *addr, uptr length, int prot, int flags, - int fd, u64 offset); -uptr internal_munmap(void *addr, uptr length); - // I/O -const fd_t kInvalidFd = -1; +const fd_t kInvalidFd = (fd_t)-1; const fd_t kStdinFd = 0; -const fd_t kStdoutFd = 1; -const fd_t kStderrFd = 2; -uptr internal_close(fd_t fd); -int internal_isatty(fd_t fd); +const fd_t kStdoutFd = (fd_t)1; +const fd_t kStderrFd = (fd_t)2; -// Use __sanitizer::OpenFile() instead. -uptr internal_open(const char *filename, int flags); -uptr internal_open(const char *filename, int flags, u32 mode); - -uptr internal_read(fd_t fd, void *buf, uptr count); -uptr internal_write(fd_t fd, const void *buf, uptr count); uptr internal_ftruncate(fd_t fd, uptr size); // OS -uptr internal_filesize(fd_t fd); // -1 on error. -uptr internal_stat(const char *path, void *buf); -uptr internal_lstat(const char *path, void *buf); -uptr internal_fstat(fd_t fd, void *buf); -uptr internal_dup2(int oldfd, int newfd); -uptr internal_readlink(const char *path, char *buf, uptr bufsize); -uptr internal_unlink(const char *path); -uptr internal_rename(const char *oldpath, const char *newpath); void NORETURN internal__exit(int exitcode); -uptr internal_lseek(fd_t fd, OFF_T offset, int whence); -uptr internal_ptrace(int request, int pid, void *addr, void *data); -uptr internal_waitpid(int pid, int *status, int options); uptr internal_getpid(); uptr internal_getppid(); -int internal_fork(); - // Threading uptr internal_sched_yield(); // Error handling -bool internal_iserror(uptr retval, int *rverrno = 0); - -int internal_sigaction(int signum, const void *act, void *oldact); +bool internal_iserror(uptr retval, int *rverrno = nullptr); -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_LIBC_H +#endif // SANITIZER_LIBC_H diff --git a/libsanitizer/sanitizer_common/sanitizer_libignore.cc b/libsanitizer/sanitizer_common/sanitizer_libignore.cc index 9877d668658..021ca618d64 100644 --- a/libsanitizer/sanitizer_common/sanitizer_libignore.cc +++ b/libsanitizer/sanitizer_common/sanitizer_libignore.cc @@ -6,10 +6,12 @@ //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" + #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_libignore.h" #include "sanitizer_flags.h" +#include "sanitizer_posix.h" #include "sanitizer_procmaps.h" namespace __sanitizer { @@ -17,35 +19,29 @@ namespace __sanitizer { LibIgnore::LibIgnore(LinkerInitialized) { } -void LibIgnore::Init(const SuppressionContext &supp) { +void LibIgnore::AddIgnoredLibrary(const char *name_templ) { BlockingMutexLock lock(&mutex_); - CHECK_EQ(count_, 0); - const uptr n = supp.SuppressionCount(); - for (uptr i = 0; i < n; i++) { - const Suppression *s = supp.SuppressionAt(i); - if (s->type != SuppressionLib) - continue; - if (count_ >= kMaxLibs) { - Report("%s: too many called_from_lib suppressions (max: %d)\n", - SanitizerToolName, kMaxLibs); - Die(); - } - Lib *lib = &libs_[count_++]; - lib->templ = internal_strdup(s->templ); - lib->name = 0; - lib->loaded = false; + if (count_ >= kMaxLibs) { + Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName, + kMaxLibs); + Die(); } + Lib *lib = &libs_[count_++]; + lib->templ = internal_strdup(name_templ); + lib->name = nullptr; + lib->real_name = nullptr; + lib->loaded = false; } void LibIgnore::OnLibraryLoaded(const char *name) { BlockingMutexLock lock(&mutex_); // Try to match suppressions with symlink target. - InternalScopedBuffer<char> buf(4096); - if (name != 0 && internal_readlink(name, buf.data(), buf.size() - 1) > 0 && - buf.data()[0]) { + InternalScopedString buf(kMaxPathLength); + if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 && + buf[0]) { for (uptr i = 0; i < count_; i++) { Lib *lib = &libs_[i]; - if (!lib->loaded && lib->real_name == 0 && + if (!lib->loaded && (!lib->real_name) && TemplateMatch(lib->templ, name)) lib->real_name = internal_strdup(buf.data()); } @@ -53,7 +49,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) { // Scan suppressions list and find newly loaded and unloaded libraries. MemoryMappingLayout proc_maps(/*cache_enabled*/false); - InternalScopedBuffer<char> module(4096); + InternalScopedString module(kMaxPathLength); for (uptr i = 0; i < count_; i++) { Lib *lib = &libs_[i]; bool loaded = false; @@ -63,7 +59,7 @@ void LibIgnore::OnLibraryLoaded(const char *name) { if ((prot & MemoryMappingLayout::kProtectionExecute) == 0) continue; if (TemplateMatch(lib->templ, module.data()) || - (lib->real_name != 0 && + (lib->real_name && internal_strcmp(lib->real_name, module.data()) == 0)) { if (loaded) { Report("%s: called_from_lib suppression '%s' is matched against" @@ -96,9 +92,9 @@ void LibIgnore::OnLibraryLoaded(const char *name) { } void LibIgnore::OnLibraryUnloaded() { - OnLibraryLoaded(0); + OnLibraryLoaded(nullptr); } -} // namespace __sanitizer +} // namespace __sanitizer -#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX +#endif // #if SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/libsanitizer/sanitizer_common/sanitizer_libignore.h b/libsanitizer/sanitizer_common/sanitizer_libignore.h index 2089c4bbeeb..84419d14fed 100644 --- a/libsanitizer/sanitizer_common/sanitizer_libignore.h +++ b/libsanitizer/sanitizer_common/sanitizer_libignore.h @@ -6,8 +6,8 @@ //===----------------------------------------------------------------------===// // // LibIgnore allows to ignore all interceptors called from a particular set -// of dynamic libraries. LibIgnore remembers all "called_from_lib" suppressions -// from the provided SuppressionContext; finds code ranges for the libraries; +// of dynamic libraries. LibIgnore can be initialized with several templates +// of names of libraries to be ignored. It finds code ranges for the libraries; // and checks whether the provided PC value belongs to the code ranges. // //===----------------------------------------------------------------------===// @@ -17,7 +17,6 @@ #include "sanitizer_internal_defs.h" #include "sanitizer_common.h" -#include "sanitizer_suppressions.h" #include "sanitizer_atomic.h" #include "sanitizer_mutex.h" @@ -27,8 +26,8 @@ class LibIgnore { public: explicit LibIgnore(LinkerInitialized); - // Fetches all "called_from_lib" suppressions from the SuppressionContext. - void Init(const SuppressionContext &supp); + // Must be called during initialization. + void AddIgnoredLibrary(const char *name_templ); // Must be called after a new dynamic library is loaded. void OnLibraryLoaded(const char *name); diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc index 9feb307db9f..00a6c1f094a 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux.cc +++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" + #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_common.h" @@ -28,12 +29,22 @@ #include <asm/param.h> #endif +// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat' +// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To +// access stat from asm/stat.h, without conflicting with definition in +// sys/stat.h, we use this trick. +#if defined(__mips64) +#include <asm/unistd.h> +#include <sys/types.h> +#define stat kernel_stat +#include <asm/stat.h> +#undef stat +#endif + #include <dlfcn.h> #include <errno.h> #include <fcntl.h> -#if !SANITIZER_ANDROID #include <link.h> -#endif #include <pthread.h> #include <sched.h> #include <sys/mman.h> @@ -43,6 +54,7 @@ #include <sys/syscall.h> #include <sys/time.h> #include <sys/types.h> +#include <ucontext.h> #include <unistd.h> #if SANITIZER_FREEBSD @@ -60,11 +72,6 @@ extern char **environ; // provided by crt1 #include <sys/signal.h> #endif -#if SANITIZER_ANDROID -#include <android/log.h> -#include <sys/system_properties.h> -#endif - #if SANITIZER_LINUX // <linux/time.h> struct kernel_timeval { @@ -90,19 +97,23 @@ namespace __sanitizer { #if SANITIZER_LINUX && defined(__x86_64__) #include "sanitizer_syscall_linux_x86_64.inc" +#elif SANITIZER_LINUX && defined(__aarch64__) +#include "sanitizer_syscall_linux_aarch64.inc" #else #include "sanitizer_syscall_generic.inc" #endif // --------------- sanitizer_libc.h -uptr internal_mmap(void *addr, uptr length, int prot, int flags, - int fd, u64 offset) { +uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd, + OFF_T offset) { #if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd, offset); #else + // mmap2 specifies file offset in 4096-byte units. + CHECK(IsAligned(offset, 4096)); return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd, - offset); + offset / 4096); #endif } @@ -110,6 +121,10 @@ uptr internal_munmap(void *addr, uptr length) { return internal_syscall(SYSCALL(munmap), (uptr)addr, length); } +int internal_mprotect(void *addr, uptr length, int prot) { + return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot); +} + uptr internal_close(fd_t fd) { return internal_syscall(SYSCALL(close), fd); } @@ -131,11 +146,6 @@ uptr internal_open(const char *filename, int flags, u32 mode) { #endif } -uptr OpenFile(const char *filename, bool write) { - return internal_open(filename, - write ? O_RDWR | O_CREAT /*| O_CLOEXEC*/ : O_RDONLY, 0660); -} - uptr internal_read(fd_t fd, void *buf, uptr count) { sptr res; HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf, @@ -152,7 +162,8 @@ uptr internal_write(fd_t fd, const void *buf, uptr count) { uptr internal_ftruncate(fd_t fd, uptr size) { sptr res; - HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd, size)); + HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd, + (OFF_T)size)); return res; } @@ -176,6 +187,26 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) { } #endif +#if defined(__mips64) +static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) { + internal_memset(out, 0, sizeof(*out)); + out->st_dev = in->st_dev; + out->st_ino = in->st_ino; + out->st_mode = in->st_mode; + out->st_nlink = in->st_nlink; + out->st_uid = in->st_uid; + out->st_gid = in->st_gid; + out->st_rdev = in->st_rdev; + out->st_size = in->st_size; + out->st_blksize = in->st_blksize; + out->st_blocks = in->st_blocks; + out->st_atime = in->st_atime_nsec; + out->st_mtime = in->st_mtime_nsec; + out->st_ctime = in->st_ctime_nsec; + out->st_ino = in->st_ino; +} +#endif + uptr internal_stat(const char *path, void *buf) { #if SANITIZER_FREEBSD return internal_syscall(SYSCALL(stat), path, buf); @@ -183,7 +214,15 @@ uptr internal_stat(const char *path, void *buf) { return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0); #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS +# if defined(__mips64) + // For mips64, stat syscall fills buffer in the format of kernel_stat + struct kernel_stat kbuf; + int res = internal_syscall(SYSCALL(stat), path, &kbuf); + kernel_stat_to_stat(&kbuf, (struct stat *)buf); + return res; +# else return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf); +# endif #else struct stat64 buf64; int res = internal_syscall(SYSCALL(stat64), path, &buf64); @@ -331,23 +370,23 @@ const char *GetEnv(const char *name) { if (!inited) { inited = true; uptr environ_size; - len = ReadFileToBuffer("/proc/self/environ", - &environ, &environ_size, 1 << 26); + if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len)) + environ = nullptr; } - if (!environ || len == 0) return 0; + if (!environ || len == 0) return nullptr; uptr namelen = internal_strlen(name); const char *p = environ; while (*p != '\0') { // will happen at the \0\0 that terminates the buffer // proc file has the format NAME=value\0NAME=value\0NAME=value\0... const char* endp = (char*)internal_memchr(p, '\0', len - (p - environ)); - if (endp == 0) // this entry isn't NUL terminated - return 0; + if (!endp) // this entry isn't NUL terminated + return nullptr; else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match. return p + namelen + 1; // point after = p = endp + 1; } - return 0; // Not found. + return nullptr; // Not found. #else #error "Unsupported platform" #endif @@ -361,9 +400,13 @@ extern "C" { static void ReadNullSepFileToArray(const char *path, char ***arr, int arr_size) { char *buff; - uptr buff_size = 0; + uptr buff_size; + uptr buff_len; *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray"); - ReadFileToBuffer(path, &buff, &buff_size, 1024 * 1024); + if (!ReadFileToBuffer(path, &buff, &buff_size, &buff_len, 1024 * 1024)) { + (*arr)[0] = nullptr; + return; + } (*arr)[0] = buff; int count, i; for (count = 1, i = 1; ; i++) { @@ -374,7 +417,7 @@ static void ReadNullSepFileToArray(const char *path, char ***arr, count++; } } - (*arr)[count] = 0; + (*arr)[count] = nullptr; } #endif @@ -405,32 +448,18 @@ void ReExec() { Die(); } -// Stub implementation of GetThreadStackAndTls for Go. -#if SANITIZER_GO -void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, - uptr *tls_addr, uptr *tls_size) { - *stk_addr = 0; - *stk_size = 0; - *tls_addr = 0; - *tls_size = 0; -} -#endif // SANITIZER_GO - enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; -BlockingMutex::BlockingMutex(LinkerInitialized) { - CHECK_EQ(owner_, 0); -} - BlockingMutex::BlockingMutex() { internal_memset(this, 0, sizeof(*this)); } void BlockingMutex::Lock() { + CHECK_EQ(owner_, 0); atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) return; @@ -528,19 +557,21 @@ int internal_fork() { } #if SANITIZER_LINUX +#define SA_RESTORER 0x04000000 // Doesn't set sa_restorer, use with caution (see below). int internal_sigaction_norestorer(int signum, const void *act, void *oldact) { __sanitizer_kernel_sigaction_t k_act, k_oldact; internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t)); internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t)); - const __sanitizer_sigaction *u_act = (__sanitizer_sigaction *)act; + const __sanitizer_sigaction *u_act = (const __sanitizer_sigaction *)act; __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact; if (u_act) { k_act.handler = u_act->handler; k_act.sigaction = u_act->sigaction; internal_memcpy(&k_act.sa_mask, &u_act->sa_mask, sizeof(__sanitizer_kernel_sigset_t)); - k_act.sa_flags = u_act->sa_flags; + // Without SA_RESTORER kernel ignores the calls (probably returns EINVAL). + k_act.sa_flags = u_act->sa_flags | SA_RESTORER; // FIXME: most often sa_restorer is unset, however the kernel requires it // to point to a valid signal restorer that calls the rt_sigreturn syscall. // If sa_restorer passed to the kernel is NULL, the program may crash upon @@ -553,8 +584,8 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) { } uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum, - (uptr)(u_act ? &k_act : NULL), - (uptr)(u_oldact ? &k_oldact : NULL), + (uptr)(u_act ? &k_act : nullptr), + (uptr)(u_oldact ? &k_oldact : nullptr), (uptr)sizeof(__sanitizer_kernel_sigset_t)); if ((result == 0) && u_oldact) { @@ -674,45 +705,45 @@ uptr GetPageSize() { #endif } -static char proc_self_exe_cache_str[kMaxPathLength]; -static uptr proc_self_exe_cache_len = 0; - uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { - if (proc_self_exe_cache_len > 0) { - // If available, use the cached module name. - uptr module_name_len = - internal_snprintf(buf, buf_len, "%s", proc_self_exe_cache_str); - CHECK_LT(module_name_len, buf_len); - return module_name_len; - } #if SANITIZER_FREEBSD - const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 }; + const int Mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 }; + const char *default_module_name = "kern.proc.pathname"; size_t Size = buf_len; - bool IsErr = (sysctl(Mib, 4, buf, &Size, NULL, 0) != 0); + bool IsErr = (sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0); int readlink_error = IsErr ? errno : 0; uptr module_name_len = Size; #else + const char *default_module_name = "/proc/self/exe"; uptr module_name_len = internal_readlink( - "/proc/self/exe", buf, buf_len); + default_module_name, buf, buf_len); int readlink_error; bool IsErr = internal_iserror(module_name_len, &readlink_error); #endif if (IsErr) { - // We can't read /proc/self/exe for some reason, assume the name of the - // binary is unknown. - Report("WARNING: readlink(\"/proc/self/exe\") failed with errno %d, " + // We can't read binary name for some reason, assume it's unknown. + Report("WARNING: reading executable name failed with errno %d, " "some stack frames may not be symbolized\n", readlink_error); - module_name_len = internal_snprintf(buf, buf_len, "/proc/self/exe"); + module_name_len = internal_snprintf(buf, buf_len, "%s", + default_module_name); CHECK_LT(module_name_len, buf_len); } return module_name_len; } -void CacheBinaryName() { - if (!proc_self_exe_cache_len) { - proc_self_exe_cache_len = - ReadBinaryName(proc_self_exe_cache_str, kMaxPathLength); +uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { +#if SANITIZER_LINUX + char *tmpbuf; + uptr tmpsize; + uptr tmplen; + if (ReadFileToBuffer("/proc/self/cmdline", &tmpbuf, &tmpsize, &tmplen, + 1024 * 1024)) { + internal_strncpy(buf, tmpbuf, buf_len); + UnmapOrDie(tmpbuf, tmpsize); + return internal_strlen(buf); } +#endif + return ReadBinaryName(buf, buf_len); } // Match full names of the form /path/to/base_name{-,.}* @@ -730,6 +761,7 @@ bool LibraryNameIs(const char *full_name, const char *base_name) { #if !SANITIZER_ANDROID // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) { + CHECK_NE(map, nullptr); #if !SANITIZER_FREEBSD typedef ElfW(Phdr) Elf_Phdr; typedef ElfW(Ehdr) Elf_Ehdr; @@ -829,46 +861,283 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, : "rsp", "memory", "r11", "rcx"); return res; } -#endif // defined(__x86_64__) && SANITIZER_LINUX +#elif defined(__mips__) +uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { + long long res; + if (!fn || !child_stack) + return -EINVAL; + CHECK_EQ(0, (uptr)child_stack % 16); + child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); + ((unsigned long long *)child_stack)[0] = (uptr)fn; + ((unsigned long long *)child_stack)[1] = (uptr)arg; + register void *a3 __asm__("$7") = newtls; + register int *a4 __asm__("$8") = child_tidptr; + // We don't have proper CFI directives here because it requires alot of code + // for very marginal benefits. + __asm__ __volatile__( + /* $v0 = syscall($v0 = __NR_clone, + * $a0 = flags, + * $a1 = child_stack, + * $a2 = parent_tidptr, + * $a3 = new_tls, + * $a4 = child_tidptr) + */ + ".cprestore 16;\n" + "move $4,%1;\n" + "move $5,%2;\n" + "move $6,%3;\n" + "move $7,%4;\n" + /* Store the fifth argument on stack + * if we are using 32-bit abi. + */ +#if SANITIZER_WORDSIZE == 32 + "lw %5,16($29);\n" +#else + "move $8,%5;\n" +#endif + "li $2,%6;\n" + "syscall;\n" -#if SANITIZER_ANDROID -static atomic_uint8_t android_log_initialized; + /* if ($v0 != 0) + * return; + */ + "bnez $2,1f;\n" + + /* Call "fn(arg)". */ + "ld $25,0($29);\n" + "ld $4,8($29);\n" + "jal $25;\n" -void AndroidLogInit() { - atomic_store(&android_log_initialized, 1, memory_order_release); + /* Call _exit($v0). */ + "move $4,$2;\n" + "li $2,%7;\n" + "syscall;\n" + + /* Return to parent. */ + "1:\n" + : "=r" (res) + : "r"(flags), + "r"(child_stack), + "r"(parent_tidptr), + "r"(a3), + "r"(a4), + "i"(__NR_clone), + "i"(__NR_exit) + : "memory", "$29" ); + return res; } -// This thing is not, strictly speaking, async signal safe, but it does not seem -// to cause any issues. Alternative is writing to log devices directly, but -// their location and message format might change in the future, so we'd really -// like to avoid that. -void AndroidLogWrite(const char *buffer) { - if (!atomic_load(&android_log_initialized, memory_order_acquire)) - return; +#elif defined(__aarch64__) +uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { + long long res; + if (!fn || !child_stack) + return -EINVAL; + CHECK_EQ(0, (uptr)child_stack % 16); + child_stack = (char *)child_stack - 2 * sizeof(unsigned long long); + ((unsigned long long *)child_stack)[0] = (uptr)fn; + ((unsigned long long *)child_stack)[1] = (uptr)arg; - char *copy = internal_strdup(buffer); - char *p = copy; - char *q; - // __android_log_write has an implicit message length limit. - // Print one line at a time. - do { - q = internal_strchr(p, '\n'); - if (q) *q = '\0'; - __android_log_write(ANDROID_LOG_INFO, NULL, p); - if (q) p = q + 1; - } while (q); - InternalFree(copy); + register int (*__fn)(void *) __asm__("x0") = fn; + register void *__stack __asm__("x1") = child_stack; + register int __flags __asm__("x2") = flags; + register void *__arg __asm__("x3") = arg; + register int *__ptid __asm__("x4") = parent_tidptr; + register void *__tls __asm__("x5") = newtls; + register int *__ctid __asm__("x6") = child_tidptr; + + __asm__ __volatile__( + "mov x0,x2\n" /* flags */ + "mov x2,x4\n" /* ptid */ + "mov x3,x5\n" /* tls */ + "mov x4,x6\n" /* ctid */ + "mov x8,%9\n" /* clone */ + + "svc 0x0\n" + + /* if (%r0 != 0) + * return %r0; + */ + "cmp x0, #0\n" + "bne 1f\n" + + /* In the child, now. Call "fn(arg)". */ + "ldp x1, x0, [sp], #16\n" + "blr x1\n" + + /* Call _exit(%r0). */ + "mov x8, %10\n" + "svc 0x0\n" + "1:\n" + + : "=r" (res) + : "i"(-EINVAL), + "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg), + "r"(__ptid), "r"(__tls), "r"(__ctid), + "i"(__NR_clone), "i"(__NR_exit) + : "x30", "memory"); + return res; } +#endif // defined(__x86_64__) && SANITIZER_LINUX +#if SANITIZER_ANDROID +#define PROP_VALUE_MAX 92 +extern "C" SANITIZER_WEAK_ATTRIBUTE int __system_property_get(const char *name, + char *value); void GetExtraActivationFlags(char *buf, uptr size) { CHECK(size > PROP_VALUE_MAX); + CHECK(&__system_property_get); __system_property_get("asan.options", buf); } + +#if __ANDROID_API__ < 21 +extern "C" __attribute__((weak)) int dl_iterate_phdr( + int (*)(struct dl_phdr_info *, size_t, void *), void *); +#endif + +static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size, + void *data) { + // Any name starting with "lib" indicates a bug in L where library base names + // are returned instead of paths. + if (info->dlpi_name && info->dlpi_name[0] == 'l' && + info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') { + *(bool *)data = true; + return 1; + } + return 0; +} + +static atomic_uint32_t android_api_level; + +static AndroidApiLevel AndroidDetectApiLevel() { + if (!&dl_iterate_phdr) + return ANDROID_KITKAT; // K or lower + bool base_name_seen = false; + dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen); + if (base_name_seen) + return ANDROID_LOLLIPOP_MR1; // L MR1 + return ANDROID_POST_LOLLIPOP; // post-L + // Plain L (API level 21) is completely broken wrt ASan and not very + // interesting to detect. +} + +AndroidApiLevel AndroidGetApiLevel() { + AndroidApiLevel level = + (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed); + if (level) return level; + level = AndroidDetectApiLevel(); + atomic_store(&android_api_level, level, memory_order_relaxed); + return level; +} + #endif bool IsDeadlySignal(int signum) { - return (signum == SIGSEGV) && common_flags()->handle_segv; + if (common_flags()->handle_abort && signum == SIGABRT) + return true; + if (common_flags()->handle_sigfpe && signum == SIGFPE) + return true; + return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv; +} + +#ifndef SANITIZER_GO +void *internal_start_thread(void(*func)(void *arg), void *arg) { + // Start the thread with signals blocked, otherwise it can steal user signals. + __sanitizer_sigset_t set, old; + internal_sigfillset(&set); +#if SANITIZER_LINUX && !SANITIZER_ANDROID + // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked + // on any thread, setuid call hangs (see test/tsan/setuid.c). + internal_sigdelset(&set, 33); +#endif + internal_sigprocmask(SIG_SETMASK, &set, &old); + void *th; + real_pthread_create(&th, nullptr, (void*(*)(void *arg))func, arg); + internal_sigprocmask(SIG_SETMASK, &old, nullptr); + return th; +} + +void internal_join_thread(void *th) { + real_pthread_join(th, nullptr); +} +#else +void *internal_start_thread(void (*func)(void *), void *arg) { return 0; } + +void internal_join_thread(void *th) {} +#endif + +void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { +#if defined(__arm__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.arm_pc; + *bp = ucontext->uc_mcontext.arm_fp; + *sp = ucontext->uc_mcontext.arm_sp; +#elif defined(__aarch64__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.pc; + *bp = ucontext->uc_mcontext.regs[29]; + *sp = ucontext->uc_mcontext.sp; +#elif defined(__hppa__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.sc_iaoq[0]; + /* GCC uses %r3 whenever a frame pointer is needed. */ + *bp = ucontext->uc_mcontext.sc_gr[3]; + *sp = ucontext->uc_mcontext.sc_gr[30]; +#elif defined(__x86_64__) +# if SANITIZER_FREEBSD + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.mc_rip; + *bp = ucontext->uc_mcontext.mc_rbp; + *sp = ucontext->uc_mcontext.mc_rsp; +# else + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.gregs[REG_RIP]; + *bp = ucontext->uc_mcontext.gregs[REG_RBP]; + *sp = ucontext->uc_mcontext.gregs[REG_RSP]; +# endif +#elif defined(__i386__) +# if SANITIZER_FREEBSD + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.mc_eip; + *bp = ucontext->uc_mcontext.mc_ebp; + *sp = ucontext->uc_mcontext.mc_esp; +# else + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.gregs[REG_EIP]; + *bp = ucontext->uc_mcontext.gregs[REG_EBP]; + *sp = ucontext->uc_mcontext.gregs[REG_ESP]; +# endif +#elif defined(__powerpc__) || defined(__powerpc64__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.regs->nip; + *sp = ucontext->uc_mcontext.regs->gpr[PT_R1]; + // The powerpc{,64}-linux ABIs do not specify r31 as the frame + // pointer, but GCC always uses r31 when we need a frame pointer. + *bp = ucontext->uc_mcontext.regs->gpr[PT_R31]; +#elif defined(__sparc__) + ucontext_t *ucontext = (ucontext_t*)context; + uptr *stk_ptr; +# if defined (__arch64__) + *pc = ucontext->uc_mcontext.mc_gregs[MC_PC]; + *sp = ucontext->uc_mcontext.mc_gregs[MC_O6]; + stk_ptr = (uptr *) (*sp + 2047); + *bp = stk_ptr[15]; +# else + *pc = ucontext->uc_mcontext.gregs[REG_PC]; + *sp = ucontext->uc_mcontext.gregs[REG_O6]; + stk_ptr = (uptr *) *sp; + *bp = stk_ptr[15]; +# endif +#elif defined(__mips__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.pc; + *bp = ucontext->uc_mcontext.gregs[30]; + *sp = ucontext->uc_mcontext.gregs[29]; +#else +# error "Unsupported arch" +#endif } -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h index 086834c3a2f..44977020bce 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux.h +++ b/libsanitizer/sanitizer_common/sanitizer_linux.h @@ -15,6 +15,7 @@ #if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_internal_defs.h" +#include "sanitizer_posix.h" #include "sanitizer_platform_limits_posix.h" struct link_map; // Opaque type returned by dlopen(). @@ -41,7 +42,7 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5); // internal_sigaction instead. int internal_sigaction_norestorer(int signum, const void *act, void *oldact); void internal_sigdelset(__sanitizer_sigset_t *set, int signum); -#if defined(__x86_64__) +#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, int *parent_tidptr, void *newtls, int *child_tidptr); #endif @@ -78,11 +79,6 @@ uptr ThreadSelfOffset(); // information). bool LibraryNameIs(const char *full_name, const char *base_name); -// Read the name of the current binary from /proc/self/exe. -uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); -// Cache the value of /proc/self/exe. -void CacheBinaryName(); - // Call cb for each region mapped by map. void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)); } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc index 36aaafdcc4b..d876e62fa74 100644 --- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc @@ -11,8 +11,11 @@ //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" + #if SANITIZER_FREEBSD || SANITIZER_LINUX +#include "sanitizer_allocator_internal.h" +#include "sanitizer_atomic.h" #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_freebsd.h" @@ -20,13 +23,12 @@ #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" -#include "sanitizer_atomic.h" -#include "sanitizer_symbolizer.h" #if SANITIZER_ANDROID || SANITIZER_FREEBSD #include <dlfcn.h> // for dlsym() #endif +#include <link.h> #include <pthread.h> #include <signal.h> #include <sys/resource.h> @@ -41,9 +43,18 @@ #include <sys/prctl.h> #endif +#if SANITIZER_ANDROID +#include <android/api-level.h> +#endif + +#if SANITIZER_ANDROID && __ANDROID_API__ < 21 +#include <android/log.h> +#else +#include <syslog.h> +#endif + #if !SANITIZER_ANDROID #include <elf.h> -#include <link.h> #include <unistd.h> #endif @@ -53,11 +64,13 @@ namespace __sanitizer { extern "C" { SANITIZER_WEAK_ATTRIBUTE int real_pthread_attr_getstack(void *attr, void **addr, size_t *size); -} // extern "C" +} // extern "C" static int my_pthread_attr_getstack(void *attr, void **addr, size_t *size) { - if (real_pthread_attr_getstack) +#if !SANITIZER_GO + if (&real_pthread_attr_getstack) return real_pthread_attr_getstack((pthread_attr_t *)attr, addr, size); +#endif return pthread_attr_getstack((pthread_attr_t *)attr, addr, size); } @@ -65,9 +78,12 @@ SANITIZER_WEAK_ATTRIBUTE int real_sigaction(int signum, const void *act, void *oldact); int internal_sigaction(int signum, const void *act, void *oldact) { - if (real_sigaction) +#if !SANITIZER_GO + if (&real_sigaction) return real_sigaction(signum, act, oldact); - return sigaction(signum, (struct sigaction *)act, (struct sigaction *)oldact); +#endif + return sigaction(signum, (const struct sigaction *)act, + (struct sigaction *)oldact); } void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, @@ -83,7 +99,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, MemoryMappingLayout proc_maps(/*cache_enabled*/true); uptr start, end, offset; uptr prev_end = 0; - while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) { + while (proc_maps.Next(&start, &end, &offset, nullptr, 0, + /* protection */nullptr)) { if ((uptr)&rl < end) break; prev_end = end; @@ -108,7 +125,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, pthread_attr_init(&attr); CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); uptr stacksize = 0; - void *stackaddr = 0; + void *stackaddr = nullptr; my_pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize); pthread_attr_destroy(&attr); @@ -117,16 +134,18 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, *stack_bottom = (uptr)stackaddr; } +#if !SANITIZER_GO bool SetEnv(const char *name, const char *value) { void *f = dlsym(RTLD_NEXT, "setenv"); - if (f == 0) + if (!f) return false; typedef int(*setenv_ft)(const char *name, const char *value, int overwrite); setenv_ft setenv_f; CHECK_EQ(sizeof(setenv_f), sizeof(f)); internal_memcpy(&setenv_f, &f, sizeof(f)); - return IndirectExternCall(setenv_f)(name, value, 1) == 0; + return setenv_f(name, value, 1) == 0; } +#endif bool SanitizerSetThreadName(const char *name) { #ifdef PR_SET_NAME @@ -149,7 +168,7 @@ bool SanitizerGetThreadName(char *name, int max_len) { #endif } -#if !SANITIZER_FREEBSD +#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO static uptr g_tls_size; #endif @@ -159,8 +178,24 @@ static uptr g_tls_size; # define DL_INTERNAL_FUNCTION #endif +#if defined(__mips__) +// TlsPreTcbSize includes size of struct pthread_descr and size of tcb +// head structure. It lies before the static tls blocks. +static uptr TlsPreTcbSize() { + const uptr kTcbHead = 16; + const uptr kTlsAlign = 16; + const uptr kTlsPreTcbSize = + (ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1); + InitTlsSize(); + g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1); + return kTlsPreTcbSize; +} +#endif + void InitTlsSize() { -#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID +#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO +// all current supported platforms have 16 bytes stack alignment + const size_t kStackAlign = 16; typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION; get_tls_func get_tls; void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); @@ -170,12 +205,16 @@ void InitTlsSize() { CHECK_NE(get_tls, 0); size_t tls_size = 0; size_t tls_align = 0; - IndirectExternCall(get_tls)(&tls_size, &tls_align); - g_tls_size = tls_size; -#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID + get_tls(&tls_size, &tls_align); + if (tls_align < kStackAlign) + tls_align = kStackAlign; + g_tls_size = RoundUpTo(tls_size, tls_align); +#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO } -#if (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX +#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \ + || defined(__aarch64__)) \ + && SANITIZER_LINUX && !SANITIZER_ANDROID // sizeof(struct thread) from glibc. static atomic_uintptr_t kThreadDescriptorSize; @@ -183,6 +222,7 @@ uptr ThreadDescriptorSize() { uptr val = atomic_load(&kThreadDescriptorSize, memory_order_relaxed); if (val) return val; +#if defined(__x86_64__) || defined(__i386__) #ifdef _CS_GNU_LIBC_VERSION char buf[64]; uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf)); @@ -205,6 +245,8 @@ uptr ThreadDescriptorSize() { val = FIRST_32_SECOND_64(1168, 1776); else if (minor <= 12) val = FIRST_32_SECOND_64(1168, 2288); + else if (minor == 13) + val = FIRST_32_SECOND_64(1168, 2304); else val = FIRST_32_SECOND_64(1216, 2304); } @@ -213,6 +255,18 @@ uptr ThreadDescriptorSize() { return val; } #endif +#elif defined(__mips__) + // TODO(sagarthakur): add more values as per different glibc versions. + val = FIRST_32_SECOND_64(1152, 1776); + if (val) + atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); + return val; +#elif defined(__aarch64__) + // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. + val = 1776; + atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed); + return val; +#endif return 0; } @@ -229,12 +283,26 @@ uptr ThreadSelf() { asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset)); # elif defined(__x86_64__) asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset)); +# elif defined(__mips__) + // MIPS uses TLS variant I. The thread pointer (in hardware register $29) + // points to the end of the TCB + 0x7000. The pthread_descr structure is + // immediately in front of the TCB. TlsPreTcbSize() includes the size of the + // TCB and the size of pthread_descr. + const uptr kTlsTcbOffset = 0x7000; + uptr thread_pointer; + asm volatile(".set push;\ + .set mips64r2;\ + rdhwr %0,$29;\ + .set pop" : "=r" (thread_pointer)); + descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize(); +# elif defined(__aarch64__) + descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()); # else # error "unsupported CPU arch" # endif return descr_addr; } -#endif // (defined(__x86_64__) || defined(__i386__)) && SANITIZER_LINUX +#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX #if SANITIZER_FREEBSD static void **ThreadSelfSegbase() { @@ -256,13 +324,17 @@ uptr ThreadSelf() { } #endif // SANITIZER_FREEBSD +#if !SANITIZER_GO static void GetTls(uptr *addr, uptr *size) { -#if SANITIZER_LINUX +#if SANITIZER_LINUX && !SANITIZER_ANDROID # if defined(__x86_64__) || defined(__i386__) *addr = ThreadSelf(); *size = GetTlsSize(); *addr -= *size; *addr += ThreadDescriptorSize(); +# elif defined(__mips__) || defined(__aarch64__) + *addr = ThreadSelf(); + *size = GetTlsSize(); # else *addr = 0; *size = 0; @@ -280,13 +352,18 @@ static void GetTls(uptr *addr, uptr *size) { *addr = (uptr) dtv[2]; *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]); } +#elif SANITIZER_ANDROID + *addr = 0; + *size = 0; #else # error "Unknown OS" #endif } +#endif +#if !SANITIZER_GO uptr GetTlsSize() { -#if SANITIZER_FREEBSD +#if SANITIZER_FREEBSD || SANITIZER_ANDROID uptr addr, size; GetTls(&addr, &size); return size; @@ -294,9 +371,14 @@ uptr GetTlsSize() { return g_tls_size; #endif } +#endif void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, uptr *tls_addr, uptr *tls_size) { +#if SANITIZER_GO + // Stub implementation for Go. + *stk_addr = *stk_size = *tls_addr = *tls_size = 0; +#else GetTls(tls_addr, tls_size); uptr stack_top, stack_bottom; @@ -313,8 +395,10 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, *tls_addr = *stk_addr + *stk_size; } } +#endif } +#if !SANITIZER_GO void AdjustStackSize(void *attr_) { pthread_attr_t *attr = (pthread_attr_t *)attr_; uptr stackaddr = 0; @@ -339,14 +423,8 @@ void AdjustStackSize(void *attr_) { } } } +#endif // !SANITIZER_GO -#if SANITIZER_ANDROID -uptr GetListOfModules(LoadedModule *modules, uptr max_modules, - string_predicate_t filter) { - MemoryMappingLayout memory_mapping(false); - return memory_mapping.DumpListOfModules(modules, max_modules, filter); -} -#else // SANITIZER_ANDROID # if !SANITIZER_FREEBSD typedef ElfW(Phdr) Elf_Phdr; # elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2 @@ -367,22 +445,20 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { DlIteratePhdrData *data = (DlIteratePhdrData*)arg; if (data->current_n == data->max_n) return 0; - InternalScopedBuffer<char> module_name(kMaxPathLength); - module_name.data()[0] = '\0'; + InternalScopedString module_name(kMaxPathLength); if (data->first) { data->first = false; // First module is the binary itself. - ReadBinaryName(module_name.data(), module_name.size()); + ReadBinaryNameCached(module_name.data(), module_name.size()); } else if (info->dlpi_name) { - internal_strncpy(module_name.data(), info->dlpi_name, module_name.size()); + module_name.append("%s", info->dlpi_name); } - if (module_name.data()[0] == '\0') + if (module_name[0] == '\0') return 0; if (data->filter && !data->filter(module_name.data())) return 0; - void *mem = &data->modules[data->current_n]; - LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(), - info->dlpi_addr); + LoadedModule *cur_module = &data->modules[data->current_n]; + cur_module->set(module_name.data(), info->dlpi_addr); data->current_n++; for (int i = 0; i < info->dlpi_phnum; i++) { const Elf_Phdr *phdr = &info->dlpi_phdr[i]; @@ -396,36 +472,117 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { return 0; } +#if SANITIZER_ANDROID && __ANDROID_API__ < 21 +extern "C" __attribute__((weak)) int dl_iterate_phdr( + int (*)(struct dl_phdr_info *, size_t, void *), void *); +#endif + uptr GetListOfModules(LoadedModule *modules, uptr max_modules, string_predicate_t filter) { +#if SANITIZER_ANDROID && __ANDROID_API__ <= 22 + u32 api_level = AndroidGetApiLevel(); + // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken. + // The runtime check allows the same library to work with + // both K and L (and future) Android releases. + if (api_level <= ANDROID_LOLLIPOP_MR1) { // L or earlier + MemoryMappingLayout memory_mapping(false); + return memory_mapping.DumpListOfModules(modules, max_modules, filter); + } +#endif CHECK(modules); DlIteratePhdrData data = {modules, 0, true, max_modules, filter}; dl_iterate_phdr(dl_iterate_phdr_cb, &data); return data.current_n; } -#endif // SANITIZER_ANDROID -uptr indirect_call_wrapper; +// getrusage does not give us the current RSS, only the max RSS. +// Still, this is better than nothing if /proc/self/statm is not available +// for some reason, e.g. due to a sandbox. +static uptr GetRSSFromGetrusage() { + struct rusage usage; + if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox. + return 0; + return usage.ru_maxrss << 10; // ru_maxrss is in Kb. +} + +uptr GetRSS() { + if (!common_flags()->can_use_proc_maps_statm) + return GetRSSFromGetrusage(); + fd_t fd = OpenFile("/proc/self/statm", RdOnly); + if (fd == kInvalidFd) + return GetRSSFromGetrusage(); + char buf[64]; + uptr len = internal_read(fd, buf, sizeof(buf) - 1); + internal_close(fd); + if ((sptr)len <= 0) + return 0; + buf[len] = 0; + // The format of the file is: + // 1084 89 69 11 0 79 0 + // We need the second number which is RSS in pages. + char *pos = buf; + // Skip the first number. + while (*pos >= '0' && *pos <= '9') + pos++; + // Skip whitespaces. + while (!(*pos >= '0' && *pos <= '9') && *pos != 0) + pos++; + // Read the number. + uptr rss = 0; + while (*pos >= '0' && *pos <= '9') + rss = rss * 10 + *pos++ - '0'; + return rss * GetPageSizeCached(); +} + +// 64-bit Android targets don't provide the deprecated __android_log_write. +// Starting with the L release, syslog() works and is preferable to +// __android_log_write. +#if SANITIZER_LINUX + +#if SANITIZER_ANDROID +static atomic_uint8_t android_log_initialized; -void SetIndirectCallWrapper(uptr wrapper) { - CHECK(!indirect_call_wrapper); - CHECK(wrapper); - indirect_call_wrapper = wrapper; +void AndroidLogInit() { + atomic_store(&android_log_initialized, 1, memory_order_release); } -void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) { - // Some kinds of sandboxes may forbid filesystem access, so we won't be able - // to read the file mappings from /proc/self/maps. Luckily, neither the - // process will be able to load additional libraries, so it's fine to use the - // cached mappings. - MemoryMappingLayout::CacheMemoryMappings(); - // Same for /proc/self/exe in the symbolizer. -#if !SANITIZER_GO - Symbolizer::GetOrInit()->PrepareForSandboxing(); - CovPrepareForSandboxing(args); +static bool IsSyslogAvailable() { + return atomic_load(&android_log_initialized, memory_order_acquire); +} +#else +void AndroidLogInit() {} + +static bool IsSyslogAvailable() { return true; } +#endif // SANITIZER_ANDROID + +static void WriteOneLineToSyslog(const char *s) { +#if SANITIZER_ANDROID &&__ANDROID_API__ < 21 + __android_log_write(ANDROID_LOG_INFO, NULL, s); +#else + syslog(LOG_INFO, "%s", s); #endif } -} // namespace __sanitizer +void WriteToSyslog(const char *buffer) { + if (!IsSyslogAvailable()) + return; + char *copy = internal_strdup(buffer); + char *p = copy; + char *q; + // syslog, at least on Android, has an implicit message length limit. + // Print one line at a time. + do { + q = internal_strchr(p, '\n'); + if (q) + *q = '\0'; + WriteOneLineToSyslog(p); + if (q) + p = q + 1; + } while (q); + InternalFree(copy); +} +#endif // SANITIZER_LINUX + +} // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/libsanitizer/sanitizer_common/sanitizer_list.h b/libsanitizer/sanitizer_common/sanitizer_list.h index b72c548e385..9216ede67fb 100644 --- a/libsanitizer/sanitizer_common/sanitizer_list.h +++ b/libsanitizer/sanitizer_common/sanitizer_list.h @@ -9,6 +9,7 @@ // ThreadSanitizer, etc run-times. // //===----------------------------------------------------------------------===// + #ifndef SANITIZER_LIST_H #define SANITIZER_LIST_H @@ -27,7 +28,7 @@ struct IntrusiveList { friend class Iterator; void clear() { - first_ = last_ = 0; + first_ = last_ = nullptr; size_ = 0; } @@ -36,11 +37,11 @@ struct IntrusiveList { void push_back(Item *x) { if (empty()) { - x->next = 0; + x->next = nullptr; first_ = last_ = x; size_ = 1; } else { - x->next = 0; + x->next = nullptr; last_->next = x; last_ = x; size_++; @@ -49,7 +50,7 @@ struct IntrusiveList { void push_front(Item *x) { if (empty()) { - x->next = 0; + x->next = nullptr; first_ = last_ = x; size_ = 1; } else { @@ -62,8 +63,8 @@ struct IntrusiveList { void pop_front() { CHECK(!empty()); first_ = first_->next; - if (first_ == 0) - last_ = 0; + if (!first_) + last_ = nullptr; size_--; } @@ -113,27 +114,31 @@ struct IntrusiveList { } } - class Iterator { + template<class ListTy, class ItemTy> + class IteratorBase { public: - explicit Iterator(IntrusiveList<Item> *list) + explicit IteratorBase(ListTy *list) : list_(list), current_(list->first_) { } - Item *next() { - Item *ret = current_; + ItemTy *next() { + ItemTy *ret = current_; if (current_) current_ = current_->next; return ret; } - bool hasNext() const { return current_ != 0; } + bool hasNext() const { return current_ != nullptr; } private: - IntrusiveList<Item> *list_; - Item *current_; + ListTy *list_; + ItemTy *current_; }; + typedef IteratorBase<IntrusiveList<Item>, Item> Iterator; + typedef IteratorBase<const IntrusiveList<Item>, const Item> ConstIterator; + // private, don't use directly. uptr size_; Item *first_; Item *last_; }; -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_LIST_H +#endif // SANITIZER_LIST_H diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_mac.cc index 17b931cb535..8cc07f12788 100644 --- a/libsanitizer/sanitizer_common/sanitizer_mac.cc +++ b/libsanitizer/sanitizer_common/sanitizer_mac.cc @@ -25,21 +25,30 @@ #include "sanitizer_libc.h" #include "sanitizer_mac.h" #include "sanitizer_placement_new.h" +#include "sanitizer_platform_limits_posix.h" #include "sanitizer_procmaps.h" +#if !SANITIZER_IOS #include <crt_externs.h> // for _NSGetEnviron +#else +extern char **environ; +#endif + +#include <errno.h> #include <fcntl.h> +#include <libkern/OSAtomic.h> +#include <mach-o/dyld.h> +#include <mach/mach.h> #include <pthread.h> #include <sched.h> #include <signal.h> +#include <stdlib.h> #include <sys/mman.h> #include <sys/resource.h> #include <sys/stat.h> #include <sys/sysctl.h> #include <sys/types.h> #include <unistd.h> -#include <libkern/OSAtomic.h> -#include <errno.h> namespace __sanitizer { @@ -55,6 +64,10 @@ uptr internal_munmap(void *addr, uptr length) { return munmap(addr, length); } +int internal_mprotect(void *addr, uptr length, int prot) { + return mprotect(addr, length, prot); +} + uptr internal_close(fd_t fd) { return close(fd); } @@ -67,11 +80,6 @@ uptr internal_open(const char *filename, int flags, u32 mode) { return open(filename, flags, mode); } -uptr OpenFile(const char *filename, bool write) { - return internal_open(filename, - write ? O_WRONLY | O_CREAT : O_RDONLY, 0660); -} - uptr internal_read(fd_t fd, void *buf, uptr count) { return read(fd, buf, count); } @@ -107,6 +115,10 @@ uptr internal_readlink(const char *path, char *buf, uptr bufsize) { return readlink(path, buf, bufsize); } +uptr internal_unlink(const char *path) { + return unlink(path); +} + uptr internal_sched_yield() { return sched_yield(); } @@ -124,6 +136,13 @@ int internal_sigaction(int signum, const void *act, void *oldact) { (struct sigaction *)act, (struct sigaction *)oldact); } +void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); } + +uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set, + __sanitizer_sigset_t *oldset) { + return sigprocmask(how, set, oldset); +} + int internal_fork() { // TODO(glider): this may call user's pthread_atfork() handlers which is bad. return fork(); @@ -174,7 +193,8 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, *stack_bottom = *stack_top - stacksize; } -const char *GetEnv(const char *name) { +char **GetEnviron() { +#if !SANITIZER_IOS char ***env_ptr = _NSGetEnviron(); if (!env_ptr) { Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is " @@ -182,37 +202,53 @@ const char *GetEnv(const char *name) { CHECK(env_ptr); } char **environ = *env_ptr; +#endif CHECK(environ); + return environ; +} + +const char *GetEnv(const char *name) { + char **env = GetEnviron(); uptr name_len = internal_strlen(name); - while (*environ != 0) { - uptr len = internal_strlen(*environ); + while (*env != 0) { + uptr len = internal_strlen(*env); if (len > name_len) { - const char *p = *environ; + const char *p = *env; if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') { // Match. - return *environ + name_len + 1; // String starting after =. + return *env + name_len + 1; // String starting after =. } } - environ++; + env++; } return 0; } -void ReExec() { - UNIMPLEMENTED(); +uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { + CHECK_LE(kMaxPathLength, buf_len); + + // On OS X the executable path is saved to the stack by dyld. Reading it + // from there is much faster than calling dladdr, especially for large + // binaries with symbols. + InternalScopedString exe_path(kMaxPathLength); + uint32_t size = exe_path.size(); + if (_NSGetExecutablePath(exe_path.data(), &size) == 0 && + realpath(exe_path.data(), buf) != 0) { + return internal_strlen(buf); + } + return 0; } -void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) { - (void)args; - // Nothing here for now. +uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { + return ReadBinaryName(buf, buf_len); } -uptr GetPageSize() { - return sysconf(_SC_PAGESIZE); +void ReExec() { + UNIMPLEMENTED(); } -BlockingMutex::BlockingMutex(LinkerInitialized) { - // We assume that OS_SPINLOCK_INIT is zero +uptr GetPageSize() { + return sysconf(_SC_PAGESIZE); } BlockingMutex::BlockingMutex() { @@ -296,7 +332,11 @@ MacosVersion GetMacosVersionInternal() { case '2': return MACOS_VERSION_MOUNTAIN_LION; case '3': return MACOS_VERSION_MAVERICKS; case '4': return MACOS_VERSION_YOSEMITE; - default: return MACOS_VERSION_UNKNOWN; + default: + if (IsDigit(version[1])) + return MACOS_VERSION_UNKNOWN_NEWER; + else + return MACOS_VERSION_UNKNOWN; } } default: return MACOS_VERSION_UNKNOWN; @@ -315,6 +355,48 @@ MacosVersion GetMacosVersion() { return result; } +uptr GetRSS() { + struct task_basic_info info; + unsigned count = TASK_BASIC_INFO_COUNT; + kern_return_t result = + task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count); + if (UNLIKELY(result != KERN_SUCCESS)) { + Report("Cannot get task info. Error: %d\n", result); + Die(); + } + return info.resident_size; +} + +void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; } +void internal_join_thread(void *th) { } + +void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { + ucontext_t *ucontext = (ucontext_t*)context; +# if defined(__aarch64__) + *pc = ucontext->uc_mcontext->__ss.__pc; +# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0 + *bp = ucontext->uc_mcontext->__ss.__fp; +# else + *bp = ucontext->uc_mcontext->__ss.__lr; +# endif + *sp = ucontext->uc_mcontext->__ss.__sp; +# elif defined(__x86_64__) + *pc = ucontext->uc_mcontext->__ss.__rip; + *bp = ucontext->uc_mcontext->__ss.__rbp; + *sp = ucontext->uc_mcontext->__ss.__rsp; +# elif defined(__arm__) + *pc = ucontext->uc_mcontext->__ss.__pc; + *bp = ucontext->uc_mcontext->__ss.__r[7]; + *sp = ucontext->uc_mcontext->__ss.__sp; +# elif defined(__i386__) + *pc = ucontext->uc_mcontext->__ss.__eip; + *bp = ucontext->uc_mcontext->__ss.__ebp; + *sp = ucontext->uc_mcontext->__ss.__esp; +# else +# error "Unknown architecture" +# endif +} + } // namespace __sanitizer #endif // SANITIZER_MAC diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h index 47739f71c3c..2aac83e3657 100644 --- a/libsanitizer/sanitizer_common/sanitizer_mac.h +++ b/libsanitizer/sanitizer_common/sanitizer_mac.h @@ -13,6 +13,7 @@ #include "sanitizer_platform.h" #if SANITIZER_MAC +#include "sanitizer_posix.h" namespace __sanitizer { @@ -25,10 +26,13 @@ enum MacosVersion { MACOS_VERSION_MOUNTAIN_LION, MACOS_VERSION_MAVERICKS, MACOS_VERSION_YOSEMITE, + MACOS_VERSION_UNKNOWN_NEWER }; MacosVersion GetMacosVersion(); +char **GetEnviron(); + } // namespace __sanitizer #endif // SANITIZER_MAC diff --git a/libsanitizer/sanitizer_common/sanitizer_mutex.h b/libsanitizer/sanitizer_common/sanitizer_mutex.h index adc3add6008..75f495a4d65 100644 --- a/libsanitizer/sanitizer_common/sanitizer_mutex.h +++ b/libsanitizer/sanitizer_common/sanitizer_mutex.h @@ -71,7 +71,13 @@ class SpinMutex : public StaticSpinMutex { class BlockingMutex { public: +#if SANITIZER_WINDOWS + // Windows does not currently support LinkerInitialized explicit BlockingMutex(LinkerInitialized); +#else + explicit constexpr BlockingMutex(LinkerInitialized) + : opaque_storage_ {0, }, owner_(0) {} +#endif BlockingMutex(); void Lock(); void Unlock(); diff --git a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h index e29b7bd57aa..71185033eff 100644 --- a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h +++ b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h @@ -8,6 +8,7 @@ // A fast memory allocator that does not support free() nor realloc(). // All allocations are forever. //===----------------------------------------------------------------------===// + #ifndef SANITIZER_PERSISTENT_ALLOCATOR_H #define SANITIZER_PERSISTENT_ALLOCATOR_H @@ -34,7 +35,7 @@ inline void *PersistentAllocator::tryAlloc(uptr size) { for (;;) { uptr cmp = atomic_load(®ion_pos, memory_order_acquire); uptr end = atomic_load(®ion_end, memory_order_acquire); - if (cmp == 0 || cmp + size > end) return 0; + if (cmp == 0 || cmp + size > end) return nullptr; if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size, memory_order_acquire)) return (void *)cmp; @@ -66,4 +67,4 @@ inline void *PersistentAlloc(uptr sz) { } // namespace __sanitizer -#endif // SANITIZER_PERSISTENT_ALLOCATOR_H +#endif // SANITIZER_PERSISTENT_ALLOCATOR_H diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h index e0e740225e2..6a4eab333cd 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform.h @@ -36,9 +36,15 @@ # else # define SANITIZER_IOS 0 # endif +# if TARGET_IPHONE_SIMULATOR +# define SANITIZER_IOSSIM 1 +# else +# define SANITIZER_IOSSIM 0 +# endif #else # define SANITIZER_MAC 0 # define SANITIZER_IOS 0 +# define SANITIZER_IOSSIM 0 #endif #if defined(_WIN32) @@ -73,13 +79,23 @@ # define SANITIZER_X32 0 #endif +// VMA size definition for architecture that support multiple sizes. +// AArch64 has 3 VMA sizes: 39, 42 and 48. +#if !defined(SANITIZER_AARCH64_VMA) +# define SANITIZER_AARCH64_VMA 39 +#else +# if SANITIZER_AARCH64_VMA != 39 && SANITIZER_AARCH64_VMA != 42 +# error "invalid SANITIZER_AARCH64_VMA size" +# endif +#endif + // By default we allow to use SizeClassAllocator64 on 64-bit platform. // But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64 // does not work well and we need to fallback to SizeClassAllocator32. // For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or // change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here. #ifndef SANITIZER_CAN_USE_ALLOCATOR64 -# if defined(__aarch64__) || defined(__mips64) +# if defined(__mips64) || (defined(__aarch64__) && SANITIZER_AARCH64_VMA == 39) # define SANITIZER_CAN_USE_ALLOCATOR64 0 # else # define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64) @@ -91,7 +107,13 @@ // e.g. on AArch64 it is most likely (1ULL << 39). Larger values will still work // but will consume more memory for TwoLevelByteMap. #if defined(__aarch64__) -# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 39) +# if SANITIZER_AARCH64_VMA == 39 +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 39) +# elif SANITIZER_AARCH64_VMA == 42 +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 42) +# endif +#elif defined(__mips__) +# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40) #else # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) #endif @@ -120,8 +142,10 @@ #define SANITIZER_USES_UID16_SYSCALLS 0 #endif -#ifdef __mips__ +#if defined(__mips__) || (defined(__aarch64__) && SANITIZER_AARCH64_VMA == 39) # define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10) +#elif defined(__aarch64__) && SANITIZER_AARCH64_VMA == 42 +# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 11) #else # define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12) #endif @@ -132,4 +156,15 @@ # define HAVE_TIRPC_RPC_XDR_H 0 #endif +/// \macro MSC_PREREQ +/// \brief Is the compiler MSVC of at least the specified version? +/// The common \param version values to check for are: +/// * 1800: Microsoft Visual Studio 2013 / 12.0 +/// * 1900: Microsoft Visual Studio 2015 / 14.0 +#ifdef _MSC_VER +# define MSC_PREREQ(version) (_MSC_VER >= (version)) +#else +# define MSC_PREREQ(version) 0 +#endif + #endif // SANITIZER_PLATFORM_H diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h index fc2d7ba990c..c8adc5fcaed 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h @@ -52,10 +52,15 @@ #endif #define SANITIZER_INTERCEPT_STRCMP 1 +#define SANITIZER_INTERCEPT_STRSTR 1 +#define SANITIZER_INTERCEPT_STRCASESTR SI_NOT_WINDOWS +#define SANITIZER_INTERCEPT_STRSPN 1 +#define SANITIZER_INTERCEPT_STRPBRK 1 #define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS +#define SANITIZER_INTERCEPT_MEMCMP 1 #define SANITIZER_INTERCEPT_MEMCHR 1 -#define SANITIZER_INTERCEPT_MEMRCHR SI_LINUX +#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX #define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS @@ -68,7 +73,7 @@ #define SANITIZER_INTERCEPT_READV SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_WRITEV SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_PREADV SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_PREADV SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID @@ -83,6 +88,7 @@ #ifndef SANITIZER_INTERCEPT_PRINTF # define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS +# define SANITIZER_INTERCEPT_PRINTF_L SI_FREEBSD # define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID #endif @@ -91,12 +97,13 @@ #define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \ - SI_MAC || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_GETPWENT SI_MAC || SI_LINUX_NOT_ANDROID + SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_GETPWENT \ + SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_GETPWENT_R SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_GETPWENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_LINUX +#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_FREEBSD || SI_LINUX #define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID @@ -107,10 +114,10 @@ #define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_LINUX -#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_FREEBSD || SI_LINUX +#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_FREEBSD || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_FREEBSD || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID @@ -123,7 +130,8 @@ #define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \ - (defined(__i386) || defined (__x86_64)) // NOLINT + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__)) #define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID @@ -131,21 +139,28 @@ #define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_WCSNRTOMBS SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_WCSNRTOMBS \ + SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_WCRTOMB \ + SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_CONFSTR SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_CONFSTR \ + SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_SCANDIR SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_SCANDIR \ + SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_WORDEXP (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_WORDEXP \ + SI_FREEBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID @@ -153,24 +168,25 @@ SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_BACKTRACE SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_BACKTRACE SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX #define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_STATFS SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_STATFS SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STATFS64 \ (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_STATVFS SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_STATVFS SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_ETHER_HOST SI_MAC || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_ETHER_R SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_ETHER_HOST \ + SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_ETHER_R SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_SHMCTL \ - (SI_LINUX_NOT_ANDROID && SANITIZER_WORDSIZE == 64) + ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64) #define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \ - SI_MAC || SI_LINUX_NOT_ANDROID + SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS @@ -191,15 +207,16 @@ #define SANITIZER_INTERCEPT_SINCOS SI_LINUX #define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS -#define SANITIZER_INTERCEPT_LGAMMA_R SI_LINUX +#define SANITIZER_INTERCEPT_LGAMMA_R SI_FREEBSD || SI_LINUX #define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_RAND_R SI_MAC || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_ICONV SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_RAND_R \ + SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_ICONV SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS // FIXME: getline seems to be available on OSX 10.7 -#define SANITIZER_INTERCEPT_GETLINE SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_GETLINE SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD @@ -207,17 +224,20 @@ #define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \ SI_FREEBSD || SI_LINUX_NOT_ANDROID -#define SANITIZER_INTERCEPT_TLS_GET_ADDR SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_TLS_GET_ADDR \ + SI_FREEBSD || SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX #define SANITIZER_INTERCEPT_GETXATTR SI_LINUX #define SANITIZER_INTERCEPT_GETRESID SI_LINUX -#define SANITIZER_INTERCEPT_GETIFADDRS SI_LINUX_NOT_ANDROID || SI_MAC -#define SANITIZER_INTERCEPT_IF_INDEXTONAME SI_LINUX_NOT_ANDROID || SI_MAC +#define SANITIZER_INTERCEPT_GETIFADDRS \ + SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC +#define SANITIZER_INTERCEPT_IF_INDEXTONAME \ + SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC #define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_AEABI_MEM SI_LINUX && defined(__arm__) #define SANITIZER_INTERCEPT___BZERO SI_MAC -#define SANITIZER_INTERCEPT_FTIME SI_NOT_WINDOWS +#define SANITIZER_INTERCEPT_FTIME !SI_FREEBSD && SI_NOT_WINDOWS #define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_TSEARCH SI_LINUX_NOT_ANDROID || SI_MAC #define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID @@ -233,5 +253,11 @@ #define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID #define SANITIZER_INTERCEPT_MLOCKX SI_NOT_WINDOWS +#define SANITIZER_INTERCEPT_FOPENCOOKIE SI_LINUX_NOT_ANDROID +#define SANITIZER_INTERCEPT_SEM SI_LINUX || SI_FREEBSD +#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_NOT_WINDOWS +#define SANITIZER_INTERCEPT_MINCORE SI_LINUX + +#define SANITIZER_INTERCEPTOR_HOOKS SI_LINUX #endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc index a1f04325033..8779d8adf72 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc @@ -36,7 +36,6 @@ #define uid_t __kernel_uid_t #define gid_t __kernel_gid_t #define off_t __kernel_off_t -#define time_t __kernel_time_t // This header seems to contain the definitions of _kernel_ stat* structs. #include <asm/stat.h> #undef ino_t @@ -61,7 +60,7 @@ namespace __sanitizer { } // namespace __sanitizer #if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\ - && !defined(__mips__) && !defined(__sparc__) + && !defined(__mips__) COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat)); #endif diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc index 2ef4ca22276..05f8c06a498 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc @@ -10,8 +10,8 @@ // Sizes and layouts of platform-specific POSIX data structures. //===----------------------------------------------------------------------===// - #include "sanitizer_platform.h" + #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC // Tests in this file assume that off_t-dependent data structures match the // libc ABI. For example, struct dirent here is what readdir() function (as @@ -32,8 +32,6 @@ #include <grp.h> #include <limits.h> #include <net/if.h> -#include <net/if_arp.h> -#include <net/route.h> #include <netdb.h> #include <poll.h> #include <pthread.h> @@ -52,6 +50,10 @@ #include <time.h> #include <wchar.h> +#if !SANITIZER_IOS +#include <net/route.h> +#endif + #if !SANITIZER_ANDROID #include <sys/mount.h> #include <sys/timeb.h> @@ -73,6 +75,7 @@ #include <linux/sysctl.h> #include <linux/utsname.h> #include <linux/posix_types.h> +#include <net/if_arp.h> #endif #if SANITIZER_FREEBSD @@ -95,7 +98,6 @@ # include <sys/link_elf.h> # include <netinet/ip_mroute.h> # include <netinet/in.h> -# include <netinet/ip_compat.h> # include <net/ethernet.h> # include <net/ppp_defs.h> # include <glob.h> @@ -115,6 +117,10 @@ #if SANITIZER_LINUX || SANITIZER_FREEBSD # include <utime.h> # include <sys/ptrace.h> +# if defined(__mips64) || defined(__aarch64__) +# include <asm/ptrace.h> +# endif +# include <semaphore.h> #endif #if !SANITIZER_ANDROID @@ -142,6 +148,9 @@ #include <sys/shm.h> #include <sys/statvfs.h> #include <sys/timex.h> +#if defined(__mips64) +# include <sys/procfs.h> +#endif #include <sys/user.h> #include <sys/ustat.h> #include <linux/cyclades.h> @@ -185,7 +194,7 @@ namespace __sanitizer { unsigned struct_stat_sz = sizeof(struct stat); #if !SANITIZER_IOS && !SANITIZER_FREEBSD unsigned struct_stat64_sz = sizeof(struct stat64); -#endif // !SANITIZER_IOS && !SANITIZER_FREEBSD +#endif // !SANITIZER_IOS && !SANITIZER_FREEBSD unsigned struct_rusage_sz = sizeof(struct rusage); unsigned struct_tm_sz = sizeof(struct tm); unsigned struct_passwd_sz = sizeof(struct passwd); @@ -226,27 +235,27 @@ namespace __sanitizer { unsigned struct_new_utsname_sz = sizeof(struct new_utsname); unsigned struct_old_utsname_sz = sizeof(struct old_utsname); unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname); -#endif // SANITIZER_LINUX +#endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD unsigned struct_rlimit_sz = sizeof(struct rlimit); unsigned struct_timespec_sz = sizeof(struct timespec); unsigned struct_utimbuf_sz = sizeof(struct utimbuf); unsigned struct_itimerspec_sz = sizeof(struct itimerspec); -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID unsigned struct_ustat_sz = sizeof(struct ustat); unsigned struct_rlimit64_sz = sizeof(struct rlimit64); unsigned struct_statvfs64_sz = sizeof(struct statvfs64); -#endif // SANITIZER_LINUX && !SANITIZER_ANDROID +#endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID unsigned struct_timex_sz = sizeof(struct timex); unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds); unsigned struct_mq_attr_sz = sizeof(struct mq_attr); unsigned struct_statvfs_sz = sizeof(struct statvfs); -#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID +#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID uptr sig_ign = (uptr)SIG_IGN; uptr sig_dfl = (uptr)SIG_DFL; @@ -280,30 +289,61 @@ namespace __sanitizer { return 0; } +#if SANITIZER_LINUX +unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr)); +#elif SANITIZER_FREEBSD +unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); +#endif + #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID int glob_nomatch = GLOB_NOMATCH; int glob_altdirfunc = GLOB_ALTDIRFUNC; #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ - (defined(__i386) || defined(__x86_64)) + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__)) +#if defined(__mips64) || defined(__powerpc64__) + unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t); +#elif defined(__aarch64__) + unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state); +#else unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct); -#ifdef __x86_64 +#endif // __mips64 || __powerpc64__ || __aarch64__ +#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ + defined(__aarch64__) unsigned struct_user_fpxregs_struct_sz = 0; #else unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); -#endif +#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ int ptrace_peektext = PTRACE_PEEKTEXT; int ptrace_peekdata = PTRACE_PEEKDATA; int ptrace_peekuser = PTRACE_PEEKUSER; +#if defined(PT_GETREGS) && defined(PT_SETREGS) int ptrace_getregs = PTRACE_GETREGS; int ptrace_setregs = PTRACE_SETREGS; +#else + int ptrace_getregs = -1; + int ptrace_setregs = -1; +#endif +#if defined(PT_GETFPREGS) && defined(PT_SETFPREGS) int ptrace_getfpregs = PTRACE_GETFPREGS; int ptrace_setfpregs = PTRACE_SETFPREGS; +#else + int ptrace_getfpregs = -1; + int ptrace_setfpregs = -1; +#endif +#if defined(PT_GETFPXREGS) && defined(PT_SETFPXREGS) int ptrace_getfpxregs = PTRACE_GETFPXREGS; int ptrace_setfpxregs = PTRACE_SETFPXREGS; +#else + int ptrace_getfpxregs = -1; + int ptrace_setfpxregs = -1; +#endif // PTRACE_GETFPXREGS/PTRACE_SETFPXREGS int ptrace_geteventmsg = PTRACE_GETEVENTMSG; #if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) || \ (defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO)) @@ -312,25 +352,25 @@ namespace __sanitizer { #else int ptrace_getsiginfo = -1; int ptrace_setsiginfo = -1; -#endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO +#endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO #if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET) int ptrace_getregset = PTRACE_GETREGSET; int ptrace_setregset = PTRACE_SETREGSET; #else int ptrace_getregset = -1; int ptrace_setregset = -1; -#endif // PTRACE_GETREGSET/PTRACE_SETREGSET +#endif // PTRACE_GETREGSET/PTRACE_SETREGSET #endif unsigned path_max = PATH_MAX; // ioctl arguments - unsigned struct_arpreq_sz = sizeof(struct arpreq); unsigned struct_ifreq_sz = sizeof(struct ifreq); unsigned struct_termios_sz = sizeof(struct termios); unsigned struct_winsize_sz = sizeof(struct winsize); #if SANITIZER_LINUX + unsigned struct_arpreq_sz = sizeof(struct arpreq); unsigned struct_cdrom_msf_sz = sizeof(struct cdrom_msf); unsigned struct_cdrom_multisession_sz = sizeof(struct cdrom_multisession); unsigned struct_cdrom_read_audio_sz = sizeof(struct cdrom_read_audio); @@ -357,7 +397,7 @@ namespace __sanitizer { unsigned struct_vt_consize_sz = sizeof(struct vt_consize); unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes); unsigned struct_vt_stat_sz = sizeof(struct vt_stat); -#endif // SANITIZER_LINUX +#endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD #if SOUND_VERSION >= 0x040000 @@ -377,7 +417,7 @@ namespace __sanitizer { unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec); unsigned struct_synth_info_sz = sizeof(struct synth_info); unsigned struct_vt_mode_sz = sizeof(struct vt_mode); -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct); @@ -402,12 +442,12 @@ namespace __sanitizer { unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25); unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc); unsigned struct_unimapinit_sz = sizeof(struct unimapinit); -#endif // SANITIZER_LINUX && !SANITIZER_ANDROID +#endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info); unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats); -#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID +#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID #if !SANITIZER_ANDROID && !SANITIZER_MAC unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req); @@ -622,7 +662,7 @@ namespace __sanitizer { unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE; unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = SOUND_PCM_WRITE_CHANNELS; unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER; -#endif // SOUND_VERSION +#endif // SOUND_VERSION unsigned IOCTL_TCFLSH = TCFLSH; unsigned IOCTL_TCGETA = TCGETA; unsigned IOCTL_TCGETS = TCGETS; @@ -745,7 +785,7 @@ namespace __sanitizer { unsigned IOCTL_VT_RELDISP = VT_RELDISP; unsigned IOCTL_VT_SETMODE = VT_SETMODE; unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE; -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_LINUX && !SANITIZER_ANDROID unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH; @@ -836,7 +876,7 @@ namespace __sanitizer { unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI; unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI; unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL; -#endif // SANITIZER_LINUX && !SANITIZER_ANDROID +#endif // SANITIZER_LINUX && !SANITIZER_ANDROID #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP; @@ -854,7 +894,7 @@ namespace __sanitizer { unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP; unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE; unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE; -#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID +#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID const int errno_EINVAL = EINVAL; // EOWNERDEAD is not present in some older platforms. @@ -866,7 +906,7 @@ namespace __sanitizer { const int si_SEGV_MAPERR = SEGV_MAPERR; const int si_SEGV_ACCERR = SEGV_ACCERR; -} // namespace __sanitizer +} // namespace __sanitizer COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t)); @@ -896,7 +936,7 @@ COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678)); COMPILER_CHECK(IOC_DIR(0x12345678) == _IOC_DIR(0x12345678)); COMPILER_CHECK(IOC_NR(0x12345678) == _IOC_NR(0x12345678)); COMPILER_CHECK(IOC_TYPE(0x12345678) == _IOC_TYPE(0x12345678)); -#endif // SANITIZER_LINUX +#endif // SANITIZER_LINUX #if SANITIZER_LINUX || SANITIZER_FREEBSD // There are more undocumented fields in dl_phdr_info that we are not interested @@ -906,7 +946,7 @@ CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr); CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name); CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr); CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum); -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID CHECK_TYPE_SIZE(glob_t); @@ -1103,14 +1143,14 @@ CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask); # if SANITIZER_FREEBSD CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr); # else -COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)NULL)->ifa_dstaddr) == - sizeof(((ifaddrs *)NULL)->ifa_ifu)); +COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) == + sizeof(((ifaddrs *)nullptr)->ifa_ifu)); COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) == offsetof(ifaddrs, ifa_ifu)); -# endif // SANITIZER_FREEBSD +# endif // SANITIZER_FREEBSD #else CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr); -#endif // SANITIZER_LINUX +#endif // SANITIZER_LINUX CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data); #endif @@ -1192,6 +1232,16 @@ CHECK_SIZE_AND_OFFSET(obstack, chunk_size); CHECK_SIZE_AND_OFFSET(obstack, chunk); CHECK_SIZE_AND_OFFSET(obstack, object_base); CHECK_SIZE_AND_OFFSET(obstack, next_free); + +CHECK_TYPE_SIZE(cookie_io_functions_t); +CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, read); +CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write); +CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek); +CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close); +#endif + +#if SANITIZER_LINUX || SANITIZER_FREEBSD +CHECK_TYPE_SIZE(sem_t); #endif -#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC +#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h index 657cd85264e..53118b5bde5 100644 --- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h +++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h @@ -16,6 +16,15 @@ #include "sanitizer_internal_defs.h" #include "sanitizer_platform.h" +#if SANITIZER_FREEBSD +// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that +// incroporates the map structure. +# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \ + ((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544))) +#else +# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle)) +#endif // !SANITIZER_FREEBSD + namespace __sanitizer { extern unsigned struct_utsname_sz; extern unsigned struct_stat_sz; @@ -72,14 +81,6 @@ namespace __sanitizer { const unsigned struct_kernel_stat_sz = 144; #endif const unsigned struct_kernel_stat64_sz = 104; -#elif defined(__sparc__) && defined(__arch64__) - const unsigned struct___old_kernel_stat_sz = 0; - const unsigned struct_kernel_stat_sz = 104; - const unsigned struct_kernel_stat64_sz = 144; -#elif defined(__sparc__) && !defined(__arch64__) - const unsigned struct___old_kernel_stat_sz = 0; - const unsigned struct_kernel_stat_sz = 64; - const unsigned struct_kernel_stat64_sz = 104; #endif struct __sanitizer_perf_event_attr { unsigned type; @@ -102,7 +103,7 @@ namespace __sanitizer { #if defined(__powerpc64__) const unsigned struct___old_kernel_stat_sz = 0; -#elif !defined(__sparc__) +#else const unsigned struct___old_kernel_stat_sz = 32; #endif @@ -147,6 +148,18 @@ namespace __sanitizer { }; const unsigned old_sigset_t_sz = sizeof(unsigned long); + + struct __sanitizer_sem_t { +#if SANITIZER_ANDROID && defined(_LP64) + int data[4]; +#elif SANITIZER_ANDROID && !defined(_LP64) + int data; +#elif SANITIZER_LINUX + uptr data[4]; +#elif SANITIZER_FREEBSD + u32 data[4]; +#endif + }; #endif // SANITIZER_LINUX || SANITIZER_FREEBSD #if SANITIZER_ANDROID @@ -181,18 +194,6 @@ namespace __sanitizer { unsigned short __pad1; unsigned long __unused1; unsigned long __unused2; -#elif defined(__sparc__) -# if defined(__arch64__) - unsigned mode; - unsigned short __pad1; -# else - unsigned short __pad1; - unsigned short mode; - unsigned short __pad2; -# endif - unsigned short __seq; - unsigned long long __unused1; - unsigned long long __unused2; #else unsigned short mode; unsigned short __pad1; @@ -210,26 +211,6 @@ namespace __sanitizer { struct __sanitizer_shmid_ds { __sanitizer_ipc_perm shm_perm; - #if defined(__sparc__) - # if !defined(__arch64__) - u32 __pad1; - # endif - long shm_atime; - # if !defined(__arch64__) - u32 __pad2; - # endif - long shm_dtime; - # if !defined(__arch64__) - u32 __pad3; - # endif - long shm_ctime; - uptr shm_segsz; - int shm_cpid; - int shm_lpid; - unsigned long shm_nattch; - unsigned long __glibc_reserved1; - unsigned long __glibc_reserved2; - #else #ifndef __powerpc__ uptr shm_segsz; #elif !defined(__powerpc64__) @@ -267,7 +248,6 @@ namespace __sanitizer { uptr __unused4; uptr __unused5; #endif -#endif }; #elif SANITIZER_FREEBSD struct __sanitizer_ipc_perm { @@ -351,7 +331,7 @@ namespace __sanitizer { long pw_change; char *pw_class; #endif -#if !SANITIZER_ANDROID +#if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)) char *pw_gecos; #endif char *pw_dir; @@ -413,7 +393,7 @@ namespace __sanitizer { }; #endif -#if SANITIZER_ANDROID || SANITIZER_MAC || SANITIZER_FREEBSD +#if SANITIZER_MAC || SANITIZER_FREEBSD struct __sanitizer_msghdr { void *msg_name; unsigned msg_namelen; @@ -550,6 +530,27 @@ namespace __sanitizer { #endif // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros. +#if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64) + struct __sanitizer_sigaction { + unsigned sa_flags; + union { + void (*sigaction)(int sig, void *siginfo, void *uctx); + void (*handler)(int sig); + }; + __sanitizer_sigset_t sa_mask; + void (*sa_restorer)(); + }; +#elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32) + struct __sanitizer_sigaction { + union { + void (*sigaction)(int sig, void *siginfo, void *uctx); + void (*handler)(int sig); + }; + __sanitizer_sigset_t sa_mask; + uptr sa_flags; + void (*sa_restorer)(); + }; +#else // !SANITIZER_ANDROID struct __sanitizer_sigaction { #if defined(__mips__) && !SANITIZER_FREEBSD unsigned int sa_flags; @@ -564,13 +565,9 @@ namespace __sanitizer { #else __sanitizer_sigset_t sa_mask; #ifndef __mips__ -#if defined(__sparc__) - unsigned long sa_flags; -#else int sa_flags; #endif #endif -#endif #if SANITIZER_LINUX void (*sa_restorer)(); #endif @@ -578,9 +575,14 @@ namespace __sanitizer { int sa_resv[1]; #endif }; +#endif // !SANITIZER_ANDROID #if SANITIZER_FREEBSD typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t; +#elif defined(__mips__) + struct __sanitizer_kernel_sigset_t { + u8 sig[16]; + }; #else struct __sanitizer_kernel_sigset_t { u8 sig[8]; @@ -617,6 +619,8 @@ namespace __sanitizer { const void *dlpi_phdr; short dlpi_phnum; }; + + extern unsigned struct_ElfW_Phdr_sz; #endif struct __sanitizer_addrinfo { @@ -729,7 +733,8 @@ namespace __sanitizer { #endif #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ - (defined(__i386) || defined(__x86_64)) + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__)) extern unsigned struct_user_regs_struct_sz; extern unsigned struct_user_fpregs_struct_sz; extern unsigned struct_user_fpxregs_struct_sz; @@ -786,11 +791,25 @@ struct __sanitizer_obstack { char *next_free; uptr more_fields[7]; }; + +typedef uptr (*__sanitizer_cookie_io_read)(void *cookie, char *buf, uptr size); +typedef uptr (*__sanitizer_cookie_io_write)(void *cookie, const char *buf, + uptr size); +typedef int (*__sanitizer_cookie_io_seek)(void *cookie, u64 *offset, + int whence); +typedef int (*__sanitizer_cookie_io_close)(void *cookie); + +struct __sanitizer_cookie_io_functions_t { + __sanitizer_cookie_io_read read; + __sanitizer_cookie_io_write write; + __sanitizer_cookie_io_seek seek; + __sanitizer_cookie_io_close close; +}; #endif #define IOC_NRBITS 8 #define IOC_TYPEBITS 8 -#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || defined(__sparc__) +#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) #define IOC_SIZEBITS 13 #define IOC_DIRBITS 3 #define IOC_NONE 1U @@ -820,24 +839,14 @@ struct __sanitizer_obstack { #define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK) #define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK) #define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK) - -#if defined(__sparc__) -// In sparc the 14 bits SIZE field overlaps with the -// least significant bit of DIR, so either IOC_READ or -// IOC_WRITE shall be 1 in order to get a non-zero SIZE. -# define IOC_SIZE(nr) \ - ((((((nr) >> 29) & 0x7) & (4U|2U)) == 0)? \ - 0 : (((nr) >> 16) & 0x3fff)) -#else #define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK) -#endif - extern unsigned struct_arpreq_sz; extern unsigned struct_ifreq_sz; extern unsigned struct_termios_sz; extern unsigned struct_winsize_sz; #if SANITIZER_LINUX + extern unsigned struct_arpreq_sz; extern unsigned struct_cdrom_msf_sz; extern unsigned struct_cdrom_multisession_sz; extern unsigned struct_cdrom_read_audio_sz; diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cc b/libsanitizer/sanitizer_common/sanitizer_posix.cc index adb1b2a9d68..1438827e100 100644 --- a/libsanitizer/sanitizer_common/sanitizer_posix.cc +++ b/libsanitizer/sanitizer_common/sanitizer_posix.cc @@ -7,17 +7,21 @@ // // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries and implements POSIX-specific functions from -// sanitizer_libc.h. +// sanitizer_posix.h. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" + #if SANITIZER_POSIX #include "sanitizer_common.h" #include "sanitizer_libc.h" +#include "sanitizer_posix.h" #include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" +#include <fcntl.h> +#include <signal.h> #include <sys/mman.h> #if SANITIZER_LINUX @@ -28,6 +32,13 @@ #include <sys/personality.h> #endif +#if SANITIZER_FREEBSD +// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before +// that, it was never implemented. So just define it to zero. +#undef MAP_NORESERVE +#define MAP_NORESERVE 0 +#endif + namespace __sanitizer { // ------------- sanitizer_common.h @@ -38,15 +49,15 @@ uptr GetMmapGranularity() { #if SANITIZER_WORDSIZE == 32 // Take care of unusable kernel area in top gigabyte. static uptr GetKernelAreaSize() { -#if SANITIZER_LINUX +#if SANITIZER_LINUX && !SANITIZER_X32 const uptr gbyte = 1UL << 30; // Firstly check if there are writable segments // mapped to top gigabyte (e.g. stack). MemoryMappingLayout proc_maps(/*cache_enabled*/true); uptr end, prot; - while (proc_maps.Next(/*start*/0, &end, - /*offset*/0, /*filename*/0, + while (proc_maps.Next(/*start*/nullptr, &end, + /*offset*/nullptr, /*filename*/nullptr, /*filename_size*/0, &prot)) { if ((end >= 3 * gbyte) && (prot & MemoryMappingLayout::kProtectionWrite) != 0) @@ -70,7 +81,7 @@ static uptr GetKernelAreaSize() { return gbyte; #else return 0; -#endif // SANITIZER_LINUX +#endif // SANITIZER_LINUX && !SANITIZER_X32 } #endif // SANITIZER_WORDSIZE == 32 @@ -86,7 +97,7 @@ uptr GetMaxVirtualAddress() { // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit. return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1; # elif defined(__mips64) - return (1ULL << 40) - 1; + return (1ULL << 40) - 1; // 0x000000ffffffffffUL; # else return (1ULL << 47) - 1; // 0x00007fffffffffffUL; # endif @@ -101,25 +112,12 @@ uptr GetMaxVirtualAddress() { void *MmapOrDie(uptr size, const char *mem_type) { size = RoundUpTo(size, GetPageSizeCached()); - uptr res = internal_mmap(0, size, - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANON, -1, 0); + uptr res = internal_mmap(nullptr, size, + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON, -1, 0); int reserrno; - if (internal_iserror(res, &reserrno)) { - static int recursion_count; - if (recursion_count) { - // The Report() and CHECK calls below may call mmap recursively and fail. - // If we went into recursion, just die. - RawWrite("ERROR: Failed to mmap\n"); - Die(); - } - recursion_count++; - Report("ERROR: %s failed to " - "allocate 0x%zx (%zd) bytes of %s (errno: %d)\n", - SanitizerToolName, size, size, mem_type, reserrno); - DumpProcessMap(); - CHECK("unable to mmap" && 0); - } + if (internal_iserror(res, &reserrno)) + ReportMmapFailureAndDie(size, mem_type, reserrno); IncreaseTotalMmap(size); return (void *)res; } @@ -137,11 +135,11 @@ void UnmapOrDie(void *addr, uptr size) { void *MmapNoReserveOrDie(uptr size, const char *mem_type) { uptr PageSize = GetPageSizeCached(); - uptr p = internal_mmap(0, - RoundUpTo(size, PageSize), - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, - -1, 0); + uptr p = internal_mmap(nullptr, + RoundUpTo(size, PageSize), + PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, + -1, 0); int reserrno; if (internal_iserror(p, &reserrno)) { Report("ERROR: %s failed to " @@ -153,22 +151,6 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) { return (void *)p; } -void *MmapFixedNoReserve(uptr fixed_addr, uptr size) { - uptr PageSize = GetPageSizeCached(); - uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), - RoundUpTo(size, PageSize), - PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, - -1, 0); - int reserrno; - if (internal_iserror(p, &reserrno)) - Report("ERROR: %s failed to " - "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n", - SanitizerToolName, size, size, fixed_addr, reserrno); - IncreaseTotalMmap(size); - return (void *)p; -} - void *MmapFixedOrDie(uptr fixed_addr, uptr size) { uptr PageSize = GetPageSizeCached(); uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), @@ -187,33 +169,72 @@ void *MmapFixedOrDie(uptr fixed_addr, uptr size) { return (void *)p; } -void *Mprotect(uptr fixed_addr, uptr size) { - return (void *)internal_mmap((void*)fixed_addr, size, - PROT_NONE, - MAP_PRIVATE | MAP_ANON | MAP_FIXED | - MAP_NORESERVE, -1, 0); +bool MprotectNoAccess(uptr addr, uptr size) { + return 0 == internal_mprotect((void*)addr, size, PROT_NONE); +} + +fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) { + int flags; + switch (mode) { + case RdOnly: flags = O_RDONLY; break; + case WrOnly: flags = O_WRONLY | O_CREAT; break; + case RdWr: flags = O_RDWR | O_CREAT; break; + } + fd_t res = internal_open(filename, flags, 0660); + if (internal_iserror(res, errno_p)) + return kInvalidFd; + return res; +} + +void CloseFile(fd_t fd) { + internal_close(fd); +} + +bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, + error_t *error_p) { + uptr res = internal_read(fd, buff, buff_size); + if (internal_iserror(res, error_p)) + return false; + if (bytes_read) + *bytes_read = res; + return true; +} + +bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, + error_t *error_p) { + uptr res = internal_write(fd, buff, buff_size); + if (internal_iserror(res, error_p)) + return false; + if (bytes_written) + *bytes_written = res; + return true; +} + +bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) { + uptr res = internal_rename(oldpath, newpath); + return !internal_iserror(res, error_p); } void *MapFileToMemory(const char *file_name, uptr *buff_size) { - uptr openrv = OpenFile(file_name, false); - CHECK(!internal_iserror(openrv)); - fd_t fd = openrv; + fd_t fd = OpenFile(file_name, RdOnly); + CHECK(fd != kInvalidFd); uptr fsize = internal_filesize(fd); CHECK_NE(fsize, (uptr)-1); CHECK_GT(fsize, 0); *buff_size = RoundUpTo(fsize, GetPageSizeCached()); - uptr map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0); - return internal_iserror(map) ? 0 : (void *)map; + uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0); + return internal_iserror(map) ? nullptr : (void *)map; } -void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset) { +void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { uptr flags = MAP_SHARED; if (addr) flags |= MAP_FIXED; uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset); - if (internal_iserror(p)) { - Printf("could not map writable file (%zd, %zu, %zu): %zd\n", fd, offset, - size, p); - return 0; + int mmap_errno = 0; + if (internal_iserror(p, &mmap_errno)) { + Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n", + fd, (long long)offset, size, p, mmap_errno); + return nullptr; } return (void *)p; } @@ -233,9 +254,11 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { MemoryMappingLayout proc_maps(/*cache_enabled*/true); uptr start, end; while (proc_maps.Next(&start, &end, - /*offset*/0, /*filename*/0, /*filename_size*/0, - /*protection*/0)) { - if (!IntervalsAreSeparate(start, end, range_start, range_end)) + /*offset*/nullptr, /*filename*/nullptr, + /*filename_size*/0, /*protection*/nullptr)) { + if (start == end) continue; // Empty range. + CHECK_NE(0, end); + if (!IntervalsAreSeparate(start, end - 1, range_start, range_end)) return false; } return true; @@ -247,8 +270,8 @@ void DumpProcessMap() { const sptr kBufSize = 4095; char *filename = (char*)MmapOrDie(kBufSize, __func__); Report("Process memory map follows:\n"); - while (proc_maps.Next(&start, &end, /* file_offset */0, - filename, kBufSize, /* protection */0)) { + while (proc_maps.Next(&start, &end, /* file_offset */nullptr, + filename, kBufSize, /* protection */nullptr)) { Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename); } Report("End of process memory map.\n"); @@ -259,69 +282,28 @@ const char *GetPwd() { return GetEnv("PWD"); } -char *FindPathToBinary(const char *name) { - const char *path = GetEnv("PATH"); - if (!path) - return 0; - uptr name_len = internal_strlen(name); - InternalScopedBuffer<char> buffer(kMaxPathLength); - const char *beg = path; - while (true) { - const char *end = internal_strchrnul(beg, ':'); - uptr prefix_len = end - beg; - if (prefix_len + name_len + 2 <= kMaxPathLength) { - internal_memcpy(buffer.data(), beg, prefix_len); - buffer[prefix_len] = '/'; - internal_memcpy(&buffer[prefix_len + 1], name, name_len); - buffer[prefix_len + 1 + name_len] = '\0'; - if (FileExists(buffer.data())) - return internal_strdup(buffer.data()); - } - if (*end == '\0') break; - beg = end + 1; - } - return 0; +bool IsPathSeparator(const char c) { + return c == '/'; } -void MaybeOpenReportFile() { - if (!log_to_file) return; - uptr pid = internal_getpid(); - // If in tracer, use the parent's file. - if (pid == stoptheworld_tracer_pid) - pid = stoptheworld_tracer_ppid; - if (report_fd_pid == pid) return; - InternalScopedBuffer<char> report_path_full(4096); - internal_snprintf(report_path_full.data(), report_path_full.size(), - "%s.%zu", report_path_prefix, pid); - uptr openrv = OpenFile(report_path_full.data(), true); - if (internal_iserror(openrv)) { - report_fd = kStderrFd; - log_to_file = false; - Report("ERROR: Can't open file: %s\n", report_path_full.data()); - Die(); - } - if (report_fd != kInvalidFd) { - // We're in the child. Close the parent's log. - internal_close(report_fd); - } - report_fd = openrv; - report_fd_pid = pid; +bool IsAbsolutePath(const char *path) { + return path != nullptr && IsPathSeparator(path[0]); } -void RawWrite(const char *buffer) { - static const char *kRawWriteError = - "RawWrite can't output requested buffer!\n"; - uptr length = (uptr)internal_strlen(buffer); - MaybeOpenReportFile(); - if (length != internal_write(report_fd, buffer, length)) { - internal_write(report_fd, kRawWriteError, internal_strlen(kRawWriteError)); +void ReportFile::Write(const char *buffer, uptr length) { + SpinMutexLock l(mu); + static const char *kWriteError = + "ReportFile::Write() can't output requested buffer!\n"; + ReopenIfNecessary(); + if (length != internal_write(fd, buffer, length)) { + internal_write(fd, kWriteError, internal_strlen(kWriteError)); Die(); } } bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) { uptr s, e, off, prot; - InternalScopedString buff(4096); + InternalScopedString buff(kMaxPathLength); MemoryMappingLayout proc_maps(/*cache_enabled*/false); while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) { if ((prot & MemoryMappingLayout::kProtectionExecute) != 0 @@ -334,6 +316,29 @@ bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) { return false; } -} // namespace __sanitizer +SignalContext SignalContext::Create(void *siginfo, void *context) { + uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr; + uptr pc, sp, bp; + GetPcSpBp(context, &pc, &sp, &bp); + return SignalContext(context, addr, pc, sp, bp); +} + +// This function check is the built VMA matches the runtime one for +// architectures with multiple VMA size. +void CheckVMASize() { +#ifdef __aarch64__ + static const unsigned kBuiltVMA = SANITIZER_AARCH64_VMA; + unsigned maxRuntimeVMA = + (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); + if (kBuiltVMA != maxRuntimeVMA) { + Printf("WARNING: %s runtime VMA is not the one built for.\n", + SanitizerToolName); + Printf("\tBuilt VMA: %u bits\n", kBuiltVMA); + Printf("\tRuntime VMA: %u bits\n", maxRuntimeVMA); + } +#endif +} + +} // namespace __sanitizer -#endif // SANITIZER_POSIX +#endif // SANITIZER_POSIX diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.h b/libsanitizer/sanitizer_common/sanitizer_posix.h new file mode 100644 index 00000000000..8668a03e68f --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_posix.h @@ -0,0 +1,79 @@ +//===-- sanitizer_posix.h -------------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between AddressSanitizer and ThreadSanitizer +// run-time libraries and declares some useful POSIX-specific functions. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_POSIX_H +#define SANITIZER_POSIX_H + +// ----------- ATTENTION ------------- +// This header should NOT include any other headers from sanitizer runtime. +#include "sanitizer_internal_defs.h" + +#if !SANITIZER_POSIX +// Make it hard to accidentally use any of functions declared in this file: +#error This file should only be included on POSIX +#endif + +namespace __sanitizer { + +// I/O +// Don't use directly, use __sanitizer::OpenFile() instead. +uptr internal_open(const char *filename, int flags); +uptr internal_open(const char *filename, int flags, u32 mode); +uptr internal_close(fd_t fd); + +uptr internal_read(fd_t fd, void *buf, uptr count); +uptr internal_write(fd_t fd, const void *buf, uptr count); + +// Memory +uptr internal_mmap(void *addr, uptr length, int prot, int flags, + int fd, OFF_T offset); +uptr internal_munmap(void *addr, uptr length); +int internal_mprotect(void *addr, uptr length, int prot); + +// OS +uptr internal_filesize(fd_t fd); // -1 on error. +uptr internal_stat(const char *path, void *buf); +uptr internal_lstat(const char *path, void *buf); +uptr internal_fstat(fd_t fd, void *buf); +uptr internal_dup2(int oldfd, int newfd); +uptr internal_readlink(const char *path, char *buf, uptr bufsize); +uptr internal_unlink(const char *path); +uptr internal_rename(const char *oldpath, const char *newpath); +uptr internal_lseek(fd_t fd, OFF_T offset, int whence); + +uptr internal_ptrace(int request, int pid, void *addr, void *data); +uptr internal_waitpid(int pid, int *status, int options); + +int internal_fork(); + +// These functions call appropriate pthread_ functions directly, bypassing +// the interceptor. They are weak and may not be present in some tools. +SANITIZER_WEAK_ATTRIBUTE +int real_pthread_create(void *th, void *attr, void *(*callback)(void *), + void *param); +SANITIZER_WEAK_ATTRIBUTE +int real_pthread_join(void *th, void **ret); + +#define DEFINE_REAL_PTHREAD_FUNCTIONS \ + namespace __sanitizer { \ + int real_pthread_create(void *th, void *attr, void *(*callback)(void *), \ + void *param) { \ + return REAL(pthread_create)(th, attr, callback, param); \ + } \ + int real_pthread_join(void *th, void **ret) { \ + return REAL(pthread_join(th, ret)); \ + } \ + } // namespace __sanitizer + +int internal_sigaction(int signum, const void *act, void *oldact); + +} // namespace __sanitizer + +#endif // SANITIZER_POSIX_H diff --git a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc index b4e42c72462..b99cdae9c28 100644 --- a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc @@ -13,21 +13,34 @@ #include "sanitizer_platform.h" #if SANITIZER_POSIX + #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_platform_limits_posix.h" +#include "sanitizer_posix.h" +#include "sanitizer_procmaps.h" #include "sanitizer_stacktrace.h" +#include "sanitizer_symbolizer.h" #include <errno.h> +#include <fcntl.h> #include <pthread.h> #include <signal.h> #include <stdlib.h> #include <sys/mman.h> #include <sys/resource.h> +#include <sys/stat.h> #include <sys/time.h> #include <sys/types.h> #include <unistd.h> +#if SANITIZER_FREEBSD +// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before +// that, it was never implemented. So just define it to zero. +#undef MAP_NORESERVE +#define MAP_NORESERVE 0 +#endif + namespace __sanitizer { u32 GetUid() { @@ -42,6 +55,18 @@ void FlushUnneededShadowMemory(uptr addr, uptr size) { madvise((void*)addr, size, MADV_DONTNEED); } +void NoHugePagesInRegion(uptr addr, uptr size) { +#ifdef MADV_NOHUGEPAGE // May not be defined on old systems. + madvise((void *)addr, size, MADV_NOHUGEPAGE); +#endif // MADV_NOHUGEPAGE +} + +void DontDumpShadowMemory(uptr addr, uptr length) { +#ifdef MADV_DONTDUMP + madvise((void *)addr, length, MADV_DONTDUMP); +#endif +} + static rlim_t getlim(int res) { rlimit rlim; CHECK_EQ(0, getrlimit(res, &rlim)); @@ -53,7 +78,7 @@ static void setlim(int res, rlim_t lim) { volatile struct rlimit rlim; rlim.rlim_cur = lim; rlim.rlim_max = lim; - if (setrlimit(res, (struct rlimit*)&rlim)) { + if (setrlimit(res, const_cast<struct rlimit *>(&rlim))) { Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno); Die(); } @@ -105,8 +130,8 @@ int Atexit(void (*function)(void)) { #endif } -int internal_isatty(fd_t fd) { - return isatty(fd); +bool SupportsColoredOutput(fd_t fd) { + return isatty(fd) != 0; } #ifndef SANITIZER_GO @@ -115,7 +140,7 @@ static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough. void SetAlternateSignalStack() { stack_t altstack, oldstack; - CHECK_EQ(0, sigaltstack(0, &oldstack)); + CHECK_EQ(0, sigaltstack(nullptr, &oldstack)); // If the alternate stack is already in place, do nothing. // Android always sets an alternate stack, but it's too small for us. if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return; @@ -126,12 +151,12 @@ void SetAlternateSignalStack() { altstack.ss_sp = (char*) base; altstack.ss_flags = 0; altstack.ss_size = kAltStackSize; - CHECK_EQ(0, sigaltstack(&altstack, 0)); + CHECK_EQ(0, sigaltstack(&altstack, nullptr)); } void UnsetAlternateSignalStack() { stack_t altstack, oldstack; - altstack.ss_sp = 0; + altstack.ss_sp = nullptr; altstack.ss_flags = SS_DISABLE; altstack.ss_size = kAltStackSize; // Some sane value required on Darwin. CHECK_EQ(0, sigaltstack(&altstack, &oldstack)); @@ -150,7 +175,7 @@ static void MaybeInstallSigaction(int signum, // Clients are responsible for handling this correctly. sigact.sa_flags = SA_SIGINFO | SA_NODEFER; if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK; - CHECK_EQ(0, internal_sigaction(signum, &sigact, 0)); + CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr)); VReport(1, "Installed the sigaction for signal %d\n", signum); } @@ -161,6 +186,8 @@ void InstallDeadlySignalHandlers(SignalHandlerType handler) { if (common_flags()->use_sigaltstack) SetAlternateSignalStack(); MaybeInstallSigaction(SIGSEGV, handler); MaybeInstallSigaction(SIGBUS, handler); + MaybeInstallSigaction(SIGABRT, handler); + MaybeInstallSigaction(SIGFPE, handler); } #endif // SANITIZER_GO @@ -186,6 +213,67 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) { return result; } -} // namespace __sanitizer +void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) { + // Some kinds of sandboxes may forbid filesystem access, so we won't be able + // to read the file mappings from /proc/self/maps. Luckily, neither the + // process will be able to load additional libraries, so it's fine to use the + // cached mappings. + MemoryMappingLayout::CacheMemoryMappings(); + // Same for /proc/self/exe in the symbolizer. +#if !SANITIZER_GO + Symbolizer::GetOrInit()->PrepareForSandboxing(); + CovPrepareForSandboxing(args); +#endif +} + +#if SANITIZER_ANDROID +int GetNamedMappingFd(const char *name, uptr size) { + return -1; +} +#else +int GetNamedMappingFd(const char *name, uptr size) { + if (!common_flags()->decorate_proc_maps) + return -1; + char shmname[200]; + CHECK(internal_strlen(name) < sizeof(shmname) - 10); + internal_snprintf(shmname, sizeof(shmname), "%zu [%s]", internal_getpid(), + name); + int fd = shm_open(shmname, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU); + CHECK_GE(fd, 0); + int res = internal_ftruncate(fd, size); + CHECK_EQ(0, res); + res = shm_unlink(shmname); + CHECK_EQ(0, res); + return fd; +} +#endif + +void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { + int fd = name ? GetNamedMappingFd(name, size) : -1; + unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE; + if (fd == -1) flags |= MAP_ANON; + + uptr PageSize = GetPageSizeCached(); + uptr p = internal_mmap((void *)(fixed_addr & ~(PageSize - 1)), + RoundUpTo(size, PageSize), PROT_READ | PROT_WRITE, + flags, fd, 0); + int reserrno; + if (internal_iserror(p, &reserrno)) + Report("ERROR: %s failed to " + "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n", + SanitizerToolName, size, size, fixed_addr, reserrno); + IncreaseTotalMmap(size); + return (void *)p; +} + +void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) { + int fd = name ? GetNamedMappingFd(name, size) : -1; + unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE; + if (fd == -1) flags |= MAP_ANON; + + return (void *)internal_mmap((void *)fixed_addr, size, PROT_NONE, flags, fd, + 0); +} +} // namespace __sanitizer -#endif // SANITIZER_POSIX +#endif // SANITIZER_POSIX diff --git a/libsanitizer/sanitizer_common/sanitizer_printf.cc b/libsanitizer/sanitizer_common/sanitizer_printf.cc index 599f2c5d7c6..6688610bf0f 100644 --- a/libsanitizer/sanitizer_common/sanitizer_printf.cc +++ b/libsanitizer/sanitizer_common/sanitizer_printf.cc @@ -12,7 +12,6 @@ // inside it. //===----------------------------------------------------------------------===// - #include "sanitizer_common.h" #include "sanitizer_flags.h" #include "sanitizer_libc.h" @@ -96,7 +95,7 @@ static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num, static int AppendString(char **buff, const char *buff_end, int precision, const char *s) { - if (s == 0) + if (!s) s = "<null>"; int result = 0; for (; *s; s++) { @@ -249,29 +248,36 @@ static void SharedPrintfCode(bool append_pid, const char *format, buffer_size = kLen; } needed_length = 0; + // Check that data fits into the current buffer. +# define CHECK_NEEDED_LENGTH \ + if (needed_length >= buffer_size) { \ + if (!use_mmap) continue; \ + RAW_CHECK_MSG(needed_length < kLen, \ + "Buffer in Report is too short!\n"); \ + } if (append_pid) { int pid = internal_getpid(); - needed_length += internal_snprintf(buffer, buffer_size, "==%d==", pid); - if (needed_length >= buffer_size) { - // The pid doesn't fit into the current buffer. - if (!use_mmap) - continue; - RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n"); + const char *exe_name = GetProcessName(); + if (common_flags()->log_exe_name && exe_name) { + needed_length += internal_snprintf(buffer, buffer_size, + "==%s", exe_name); + CHECK_NEEDED_LENGTH } + needed_length += internal_snprintf(buffer + needed_length, + buffer_size - needed_length, + "==%d==", pid); + CHECK_NEEDED_LENGTH } needed_length += VSNPrintf(buffer + needed_length, buffer_size - needed_length, format, args); - if (needed_length >= buffer_size) { - // The message doesn't fit into the current buffer. - if (!use_mmap) - continue; - RAW_CHECK_MSG(needed_length < kLen, "Buffer in Report is too short!\n"); - } + CHECK_NEEDED_LENGTH // If the message fit into the buffer, print it and exit. break; +# undef CHECK_NEEDED_LENGTH } RawWrite(buffer); - AndroidLogWrite(buffer); + if (common_flags()->log_to_syslog) + WriteToSyslog(buffer); CallPrintfAndReportCallback(buffer); // If we had mapped any memory, clean up. if (buffer != local_buffer) @@ -320,4 +326,4 @@ void InternalScopedString::append(const char *format, ...) { CHECK_LT(length_, size()); } -} // namespace __sanitizer +} // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc index ca9900fe9ee..1cf3f8510fb 100644 --- a/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc +++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc @@ -9,7 +9,9 @@ //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" + #if SANITIZER_FREEBSD || SANITIZER_LINUX + #include "sanitizer_common.h" #include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" @@ -117,7 +119,7 @@ uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules, string_predicate_t filter) { Reset(); uptr cur_beg, cur_end, cur_offset, prot; - InternalScopedBuffer<char> module_name(kMaxPathLength); + InternalScopedString module_name(kMaxPathLength); uptr n_modules = 0; for (uptr i = 0; n_modules < max_modules && Next(&cur_beg, &cur_end, &cur_offset, module_name.data(), @@ -128,7 +130,6 @@ uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules, continue; if (filter && !filter(cur_name)) continue; - void *mem = &modules[n_modules]; // Don't subtract 'cur_beg' from the first entry: // * If a binary is compiled w/o -pie, then the first entry in // process maps is likely the binary itself (all dynamic libs @@ -141,7 +142,8 @@ uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules, // shadow memory of the tool), so the module can't be the // first entry. uptr base_address = (i ? cur_beg : 0) - cur_offset; - LoadedModule *cur_module = new(mem) LoadedModule(cur_name, base_address); + LoadedModule *cur_module = &modules[n_modules]; + cur_module->set(cur_name, base_address); cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute); n_modules++; } @@ -149,10 +151,11 @@ uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules, } void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { - char *smaps = 0; + char *smaps = nullptr; uptr smaps_cap = 0; - uptr smaps_len = ReadFileToBuffer("/proc/self/smaps", - &smaps, &smaps_cap, 64<<20); + uptr smaps_len = 0; + if (!ReadFileToBuffer("/proc/self/smaps", &smaps, &smaps_cap, &smaps_len)) + return; uptr start = 0; bool file = false; const char *pos = smaps; @@ -171,6 +174,6 @@ void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { UnmapOrDie(smaps, smaps_cap); } -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc index 43babf89319..e77e33fb2d7 100644 --- a/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc +++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc @@ -16,8 +16,8 @@ namespace __sanitizer { void ReadProcMaps(ProcSelfMapsBuff *proc_maps) { - proc_maps->len = ReadFileToBuffer("/proc/self/maps", &proc_maps->data, - &proc_maps->mmaped_size, 1 << 26); + CHECK(ReadFileToBuffer("/proc/self/maps", &proc_maps->data, + &proc_maps->mmaped_size, &proc_maps->len)); } static bool IsOneOf(char c, char c1, char c2) { diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc index 81874c21b1f..393243b7c28 100644 --- a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc +++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc @@ -158,7 +158,7 @@ uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules, string_predicate_t filter) { Reset(); uptr cur_beg, cur_end, prot; - InternalScopedBuffer<char> module_name(kMaxPathLength); + InternalScopedString module_name(kMaxPathLength); uptr n_modules = 0; for (uptr i = 0; n_modules < max_modules && Next(&cur_beg, &cur_end, 0, module_name.data(), @@ -169,13 +169,13 @@ uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules, continue; if (filter && !filter(cur_name)) continue; - LoadedModule *cur_module = 0; + LoadedModule *cur_module = nullptr; if (n_modules > 0 && 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) { cur_module = &modules[n_modules - 1]; } else { - void *mem = &modules[n_modules]; - cur_module = new(mem) LoadedModule(cur_name, cur_beg); + cur_module = &modules[n_modules]; + cur_module->set(cur_name, cur_beg); n_modules++; } cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute); diff --git a/libsanitizer/sanitizer_common/sanitizer_quarantine.h b/libsanitizer/sanitizer_common/sanitizer_quarantine.h index 1e8d056a187..a635871be45 100644 --- a/libsanitizer/sanitizer_common/sanitizer_quarantine.h +++ b/libsanitizer/sanitizer_common/sanitizer_quarantine.h @@ -47,11 +47,14 @@ class Quarantine { } void Init(uptr size, uptr cache_size) { - max_size_ = size; - min_size_ = size / 10 * 9; // 90% of max size. + atomic_store(&max_size_, size, memory_order_release); + atomic_store(&min_size_, size / 10 * 9, + memory_order_release); // 90% of max size. max_cache_size_ = cache_size; } + uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); } + void Put(Cache *c, Callback cb, Node *ptr, uptr size) { c->Enqueue(cb, ptr, size); if (c->Size() > max_cache_size_) @@ -63,15 +66,15 @@ class Quarantine { SpinMutexLock l(&cache_mutex_); cache_.Transfer(c); } - if (cache_.Size() > max_size_ && recycle_mutex_.TryLock()) + if (cache_.Size() > GetSize() && recycle_mutex_.TryLock()) Recycle(cb); } private: // Read-only data. char pad0_[kCacheLineSize]; - uptr max_size_; - uptr min_size_; + atomic_uintptr_t max_size_; + atomic_uintptr_t min_size_; uptr max_cache_size_; char pad1_[kCacheLineSize]; SpinMutex cache_mutex_; @@ -81,9 +84,10 @@ class Quarantine { void NOINLINE Recycle(Callback cb) { Cache tmp; + uptr min_size = atomic_load(&min_size_, memory_order_acquire); { SpinMutexLock l(&cache_mutex_); - while (cache_.Size() > min_size_) { + while (cache_.Size() > min_size) { QuarantineBatch *b = cache_.DequeueBatch(); tmp.EnqueueBatch(b); } @@ -128,6 +132,7 @@ class QuarantineCache { size += sizeof(QuarantineBatch); // Count the batch in Quarantine size. } QuarantineBatch *b = list_.back(); + CHECK(b); b->batch[b->count++] = ptr; b->size += size; SizeAdd(size); @@ -146,7 +151,7 @@ class QuarantineCache { QuarantineBatch *DequeueBatch() { if (list_.empty()) - return 0; + return nullptr; QuarantineBatch *b = list_.front(); list_.pop_front(); SizeSub(b->size); @@ -166,12 +171,13 @@ class QuarantineCache { NOINLINE QuarantineBatch* AllocBatch(Callback cb) { QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b)); + CHECK(b); b->count = 0; b->size = 0; list_.push_back(b); return b; } }; -} // namespace __sanitizer +} // namespace __sanitizer -#endif // #ifndef SANITIZER_QUARANTINE_H +#endif // SANITIZER_QUARANTINE_H diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc index a3c9c0677e2..3c5313c3ae4 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc +++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cc @@ -20,7 +20,8 @@ struct StackDepotNode { StackDepotNode *link; u32 id; atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20; - uptr size; + u32 size; + u32 tag; uptr stack[1]; // [size] static const u32 kTabSizeLog = 20; @@ -35,7 +36,8 @@ struct StackDepotNode { bool eq(u32 hash, const args_type &args) const { u32 hash_bits = atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask; - if ((hash & kHashMask) != hash_bits || args.size != size) return false; + if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag) + return false; uptr i = 0; for (; i < size; i++) { if (stack[i] != args.trace[i]) return false; @@ -70,10 +72,11 @@ struct StackDepotNode { void store(const args_type &args, u32 hash) { atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed); size = args.size; + tag = args.tag; internal_memcpy(stack, args.trace, size * sizeof(uptr)); } args_type load() const { - return args_type(&stack[0], size); + return args_type(&stack[0], size, tag); } StackDepotHandle get_handle() { return StackDepotHandle(this); } @@ -147,7 +150,7 @@ StackDepotReverseMap::StackDepotReverseMap() StackTrace StackDepotReverseMap::Get(u32 id) { if (!map_.size()) return StackTrace(); - IdDescPair pair = {id, 0}; + IdDescPair pair = {id, nullptr}; uptr idx = InternalBinarySearch(map_, 0, map_.size(), pair, IdDescPair::IdComparator); if (idx > map_.size()) @@ -155,4 +158,4 @@ StackTrace StackDepotReverseMap::Get(u32 id) { return map_[idx].desc->load(); } -} // namespace __sanitizer +} // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h index aad9bfb8d35..dfb5349de9f 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.h +++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.h @@ -8,6 +8,7 @@ // This file is shared between AddressSanitizer and ThreadSanitizer // run-time libraries. //===----------------------------------------------------------------------===// + #ifndef SANITIZER_STACKDEPOT_H #define SANITIZER_STACKDEPOT_H @@ -21,7 +22,7 @@ namespace __sanitizer { struct StackDepotNode; struct StackDepotHandle { StackDepotNode *node_; - StackDepotHandle() : node_(0) {} + StackDepotHandle() : node_(nullptr) {} explicit StackDepotHandle(StackDepotNode *node) : node_(node) {} bool valid() { return node_; } u32 id(); @@ -64,6 +65,6 @@ class StackDepotReverseMap { void operator=(const StackDepotReverseMap&); }; -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_STACKDEPOT_H +#endif // SANITIZER_STACKDEPOT_H diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h index 05b63092b80..ab4932823a3 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h +++ b/libsanitizer/sanitizer_common/sanitizer_stackdepotbase.h @@ -8,6 +8,7 @@ // Implementation of a mapping from arbitrary values to unique 32-bit // identifiers. //===----------------------------------------------------------------------===// + #ifndef SANITIZER_STACKDEPOTBASE_H #define SANITIZER_STACKDEPOTBASE_H @@ -24,7 +25,7 @@ class StackDepotBase { typedef typename Node::args_type args_type; typedef typename Node::handle_type handle_type; // Maps stack trace to an unique id. - handle_type Put(args_type args, bool *inserted = 0); + handle_type Put(args_type args, bool *inserted = nullptr); // Retrieves a stored stack trace by the id. args_type Get(u32 id); @@ -64,7 +65,7 @@ Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s, return s; } } - return 0; + return nullptr; } template <class Node, int kReservedBits, int kTabSizeLog> @@ -170,5 +171,6 @@ void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() { } } -} // namespace __sanitizer -#endif // SANITIZER_STACKDEPOTBASE_H +} // namespace __sanitizer + +#endif // SANITIZER_STACKDEPOTBASE_H diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc index a751da2f740..7b0c084b1a2 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc @@ -15,18 +15,13 @@ namespace __sanitizer { -uptr StackTrace::GetPreviousInstructionPc(uptr pc) { -#if defined(__arm__) - // Cancel Thumb bit. - pc = pc & (~1); -#endif -#if defined(__powerpc__) || defined(__powerpc64__) - // PCs are always 4 byte aligned. - return pc - 4; -#elif defined(__sparc__) || defined(__mips__) - return pc - 8; +uptr StackTrace::GetNextInstructionPc(uptr pc) { +#if defined(__mips__) + return pc + 8; +#elif defined(__powerpc__) + return pc + 4; #else - return pc - 1; + return pc + 1; #endif } @@ -73,7 +68,7 @@ static inline uhwptr *GetCanonicFrame(uptr bp, } void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top, - uptr stack_bottom, uptr max_depth) { + uptr stack_bottom, u32 max_depth) { CHECK_GE(max_depth, 2); trace_buffer[0] = pc; size = 1; @@ -92,7 +87,7 @@ void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top, // back chain to find the caller frame before extracting it. uhwptr *caller_frame = (uhwptr*)frame[0]; if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) || - !IsAligned((uptr)caller_frame, sizeof(uhwptr))) + !IsAligned((uptr)caller_frame, sizeof(uhwptr))) break; uhwptr pc1 = caller_frame[2]; #else @@ -121,7 +116,7 @@ void BufferedStackTrace::PopStackFrames(uptr count) { uptr BufferedStackTrace::LocatePcInTrace(uptr pc) { // Use threshold to find PC in stack trace, as PC we want to unwind from may // slightly differ from return address in the actual unwinded stack trace. - const int kPcThreshold = 288; + const int kPcThreshold = 304; for (uptr i = 0; i < size; ++i) { if (MatchPc(pc, trace[i], kPcThreshold)) return i; diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h index 68cc2620db7..7f22455ac9e 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h @@ -15,10 +15,9 @@ namespace __sanitizer { -static const uptr kStackTraceMax = 256; +static const u32 kStackTraceMax = 256; -#if SANITIZER_LINUX && (defined(__aarch64__) || defined(__sparc__) || \ - defined(__mips__)) +#if SANITIZER_LINUX && (defined(__sparc__) || defined(__mips__)) # define SANITIZER_CAN_FAST_UNWIND 0 #elif SANITIZER_WINDOWS # define SANITIZER_CAN_FAST_UNWIND 0 @@ -37,10 +36,18 @@ static const uptr kStackTraceMax = 256; struct StackTrace { const uptr *trace; - uptr size; + u32 size; + u32 tag; - StackTrace() : trace(nullptr), size(0) {} - StackTrace(const uptr *trace, uptr size) : trace(trace), size(size) {} + static const int TAG_UNKNOWN = 0; + static const int TAG_ALLOC = 1; + static const int TAG_DEALLOC = 2; + static const int TAG_CUSTOM = 100; // Tool specific tags start here. + + StackTrace() : trace(nullptr), size(0), tag(0) {} + StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {} + StackTrace(const uptr *trace, u32 size, u32 tag) + : trace(trace), size(size), tag(tag) {} // Prints a symbolized stacktrace, followed by an empty line. void Print() const; @@ -54,11 +61,29 @@ struct StackTrace { } static uptr GetCurrentPc(); - static uptr GetPreviousInstructionPc(uptr pc); + static inline uptr GetPreviousInstructionPc(uptr pc); + static uptr GetNextInstructionPc(uptr pc); typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer, int out_size); }; +// Performance-critical, must be in the header. +ALWAYS_INLINE +uptr StackTrace::GetPreviousInstructionPc(uptr pc) { +#if defined(__arm__) + // Cancel Thumb bit. + pc = pc & (~1); +#endif +#if defined(__powerpc__) || defined(__powerpc64__) + // PCs are always 4 byte aligned. + return pc - 4; +#elif defined(__sparc__) || defined(__mips__) + return pc - 8; +#else + return pc - 1; +#endif +} + // StackTrace that owns the buffer used to store the addresses. struct BufferedStackTrace : public StackTrace { uptr trace_buffer[kStackTraceMax]; @@ -67,15 +92,15 @@ struct BufferedStackTrace : public StackTrace { BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {} void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0); - void Unwind(uptr max_depth, uptr pc, uptr bp, void *context, uptr stack_top, + void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top, uptr stack_bottom, bool request_fast_unwind); private: void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom, - uptr max_depth); - void SlowUnwindStack(uptr pc, uptr max_depth); + u32 max_depth); + void SlowUnwindStack(uptr pc, u32 max_depth); void SlowUnwindStackWithContext(uptr pc, void *context, - uptr max_depth); + u32 max_depth); void PopStackFrames(uptr count); uptr LocatePcInTrace(uptr pc); diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc index 2d55b73f03a..addf44f7327 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc @@ -22,36 +22,28 @@ void StackTrace::Print() const { Printf(" <empty stack>\n\n"); return; } - const int kMaxAddrFrames = 64; - InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); - for (uptr i = 0; i < kMaxAddrFrames; i++) - new(&addr_frames[i]) AddressInfo(); InternalScopedString frame_desc(GetPageSizeCached() * 2); uptr frame_num = 0; for (uptr i = 0; i < size && trace[i]; i++) { // PCs in stack traces are actually the return addresses, that is, // addresses of the next instructions after the call. uptr pc = GetPreviousInstructionPc(trace[i]); - uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC( - pc, addr_frames.data(), kMaxAddrFrames); - if (addr_frames_num == 0) { - addr_frames[0].address = pc; - addr_frames_num = 1; - } - for (uptr j = 0; j < addr_frames_num; j++) { - AddressInfo &info = addr_frames[j]; + SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc); + CHECK(frames); + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { frame_desc.clear(); RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++, - info, common_flags()->strip_path_prefix); + cur->info, common_flags()->symbolize_vs_style, + common_flags()->strip_path_prefix); Printf("%s\n", frame_desc.data()); - info.Clear(); } + frames->ClearAll(); } // Always print a trailing empty line after stack trace. Printf("\n"); } -void BufferedStackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context, +void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top, uptr stack_bottom, bool request_fast_unwind) { top_frame_bp = (max_depth > 0) ? bp : 0; @@ -66,10 +58,14 @@ void BufferedStackTrace::Unwind(uptr max_depth, uptr pc, uptr bp, void *context, return; } if (!WillUseFastUnwind(request_fast_unwind)) { +#if SANITIZER_CAN_SLOW_UNWIND if (context) SlowUnwindStackWithContext(pc, context, max_depth); else SlowUnwindStack(pc, max_depth); +#else + UNREACHABLE("slow unwind requested but not available"); +#endif } else { FastUnwindStack(pc, bp, stack_top, stack_bottom, max_depth); } diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc index 300b4904c54..bcc9de78ec1 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc @@ -8,13 +8,14 @@ // This file is shared between sanitizers' run-time libraries. // //===----------------------------------------------------------------------===// + #include "sanitizer_stacktrace_printer.h" namespace __sanitizer { static const char *StripFunctionName(const char *function, const char *prefix) { - if (function == 0) return 0; - if (prefix == 0) return function; + if (!function) return nullptr; + if (!prefix) return function; uptr prefix_len = internal_strlen(prefix); if (0 == internal_strncmp(function, prefix, prefix_len)) return function + prefix_len; @@ -24,8 +25,8 @@ static const char *StripFunctionName(const char *function, const char *prefix) { static const char kDefaultFormat[] = " #%n %p %F %L"; void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no, - const AddressInfo &info, const char *strip_path_prefix, - const char *strip_func_prefix) { + const AddressInfo &info, bool vs_style, + const char *strip_path_prefix, const char *strip_func_prefix) { if (0 == internal_strcmp(format, "DEFAULT")) format = kDefaultFormat; for (const char *p = format; *p != '\0'; p++) { @@ -80,14 +81,14 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no, break; case 'S': // File/line information. - RenderSourceLocation(buffer, info.file, info.line, info.column, + RenderSourceLocation(buffer, info.file, info.line, info.column, vs_style, strip_path_prefix); break; case 'L': // Source location, or module location. if (info.file) { RenderSourceLocation(buffer, info.file, info.line, info.column, - strip_path_prefix); + vs_style, strip_path_prefix); } else if (info.module) { RenderModuleLocation(buffer, info.module, info.module_offset, strip_path_prefix); @@ -97,22 +98,33 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no, break; case 'M': // Module basename and offset, or PC. - if (info.module) + if (info.address & kExternalPCBit) + {} // There PCs are not meaningful. + else if (info.module) buffer->append("(%s+%p)", StripModuleName(info.module), (void *)info.module_offset); else buffer->append("(%p)", (void *)info.address); break; default: - Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", - *p, *p); + Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p, + *p); Die(); } } } void RenderSourceLocation(InternalScopedString *buffer, const char *file, - int line, int column, const char *strip_path_prefix) { + int line, int column, bool vs_style, + const char *strip_path_prefix) { + if (vs_style && line > 0) { + buffer->append("%s(%d", StripPathPrefix(file, strip_path_prefix), line); + if (column > 0) + buffer->append(",%d", column); + buffer->append(")"); + return; + } + buffer->append("%s", StripPathPrefix(file, strip_path_prefix)); if (line > 0) { buffer->append(":%d", line); @@ -127,4 +139,4 @@ void RenderModuleLocation(InternalScopedString *buffer, const char *module, offset); } -} // namespace __sanitizer +} // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h index 54e2fb024df..a553568ea6f 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h +++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h @@ -46,11 +46,13 @@ namespace __sanitizer { // module+offset if it is known, or (<unknown module>) string. // %M - prints module basename and offset, if it is known, or PC. void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no, - const AddressInfo &info, const char *strip_path_prefix = "", + const AddressInfo &info, bool vs_style, + const char *strip_path_prefix = "", const char *strip_func_prefix = ""); void RenderSourceLocation(InternalScopedString *buffer, const char *file, - int line, int column, const char *strip_path_prefix); + int line, int column, bool vs_style, + const char *strip_path_prefix); void RenderModuleLocation(InternalScopedString *buffer, const char *module, uptr offset, const char *strip_path_prefix); diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h b/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h index b1241da1f73..c3245266fb9 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h +++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld.h @@ -57,7 +57,8 @@ typedef void (*StopTheWorldCallback)( // Suspend all threads in the current process and run the callback on the list // of suspended threads. This function will resume the threads before returning. -// The callback should not call any libc functions. +// The callback should not call any libc functions. The callback must not call +// exit() nor _exit() and instead return to the caller. // This function should NOT be called from multiple threads simultaneously. void StopTheWorld(StopTheWorldCallback callback, void *argument); diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc index 58812023674..c919e4f6e97 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc @@ -10,13 +10,15 @@ // //===----------------------------------------------------------------------===// - #include "sanitizer_platform.h" -#if SANITIZER_LINUX && defined(__x86_64__) + +#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \ + defined(__aarch64__)) #include "sanitizer_stoptheworld.h" #include "sanitizer_platform_limits_posix.h" +#include "sanitizer_atomic.h" #include <errno.h> #include <sched.h> // for CLONE_* definitions @@ -24,9 +26,15 @@ #include <sys/prctl.h> // for PR_* definitions #include <sys/ptrace.h> // for PTRACE_* definitions #include <sys/types.h> // for pid_t +#include <sys/uio.h> // for iovec +#include <elf.h> // for NT_PRSTATUS #if SANITIZER_ANDROID && defined(__arm__) # include <linux/user.h> // for pt_regs #else +# ifdef __aarch64__ +// GLIBC 2.20+ sys/user does not include asm/ptrace.h +# include <asm/ptrace.h> +# endif # include <sys/user.h> // for user_regs_struct #endif #include <sys/wait.h> // for signal-related stuff @@ -68,11 +76,25 @@ COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t)); namespace __sanitizer { + +// Structure for passing arguments into the tracer thread. +struct TracerThreadArgument { + StopTheWorldCallback callback; + void *callback_argument; + // The tracer thread waits on this mutex while the parent finishes its + // preparations. + BlockingMutex mutex; + // Tracer thread signals its completion by setting done. + atomic_uintptr_t done; + uptr parent_pid; +}; + // This class handles thread suspending/unsuspending in the tracer thread. class ThreadSuspender { public: - explicit ThreadSuspender(pid_t pid) - : pid_(pid) { + explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg) + : arg(arg) + , pid_(pid) { CHECK_GE(pid, 0); } bool SuspendAllThreads(); @@ -81,42 +103,58 @@ class ThreadSuspender { SuspendedThreadsList &suspended_threads_list() { return suspended_threads_list_; } + TracerThreadArgument *arg; private: SuspendedThreadsList suspended_threads_list_; pid_t pid_; bool SuspendThread(SuspendedThreadID thread_id); }; -bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) { +bool ThreadSuspender::SuspendThread(SuspendedThreadID tid) { // Are we already attached to this thread? // Currently this check takes linear time, however the number of threads is // usually small. - if (suspended_threads_list_.Contains(thread_id)) + if (suspended_threads_list_.Contains(tid)) return false; int pterrno; - if (internal_iserror(internal_ptrace(PTRACE_ATTACH, thread_id, NULL, NULL), + if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr), &pterrno)) { // Either the thread is dead, or something prevented us from attaching. // Log this event and move on. - VReport(1, "Could not attach to thread %d (errno %d).\n", thread_id, - pterrno); + VReport(1, "Could not attach to thread %d (errno %d).\n", tid, pterrno); return false; } else { - VReport(1, "Attached to thread %d.\n", thread_id); + VReport(2, "Attached to thread %d.\n", tid); // The thread is not guaranteed to stop before ptrace returns, so we must - // wait on it. - uptr waitpid_status; - HANDLE_EINTR(waitpid_status, internal_waitpid(thread_id, NULL, __WALL)); - int wperrno; - if (internal_iserror(waitpid_status, &wperrno)) { - // Got a ECHILD error. I don't think this situation is possible, but it - // doesn't hurt to report it. - VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n", - thread_id, wperrno); - internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL); - return false; + // wait on it. Note: if the thread receives a signal concurrently, + // we can get notification about the signal before notification about stop. + // In such case we need to forward the signal to the thread, otherwise + // the signal will be missed (as we do PTRACE_DETACH with arg=0) and + // any logic relying on signals will break. After forwarding we need to + // continue to wait for stopping, because the thread is not stopped yet. + // We do ignore delivery of SIGSTOP, because we want to make stop-the-world + // as invisible as possible. + for (;;) { + int status; + uptr waitpid_status; + HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL)); + int wperrno; + if (internal_iserror(waitpid_status, &wperrno)) { + // Got a ECHILD error. I don't think this situation is possible, but it + // doesn't hurt to report it. + VReport(1, "Waiting on thread %d failed, detaching (errno %d).\n", + tid, wperrno); + internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr); + return false; + } + if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) { + internal_ptrace(PTRACE_CONT, tid, nullptr, + (void*)(uptr)WSTOPSIG(status)); + continue; + } + break; } - suspended_threads_list_.Append(thread_id); + suspended_threads_list_.Append(tid); return true; } } @@ -125,9 +163,9 @@ void ThreadSuspender::ResumeAllThreads() { for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) { pid_t tid = suspended_threads_list_.GetThreadID(i); int pterrno; - if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL), + if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr), &pterrno)) { - VReport(1, "Detached from thread %d.\n", tid); + VReport(2, "Detached from thread %d.\n", tid); } else { // Either the thread is dead, or we are already detached. // The latter case is possible, for instance, if this function was called @@ -140,7 +178,7 @@ void ThreadSuspender::ResumeAllThreads() { void ThreadSuspender::KillAllThreads() { for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i), - NULL, NULL); + nullptr, nullptr); } bool ThreadSuspender::SuspendAllThreads() { @@ -166,35 +204,11 @@ bool ThreadSuspender::SuspendAllThreads() { } // Pointer to the ThreadSuspender instance for use in signal handler. -static ThreadSuspender *thread_suspender_instance = NULL; - -// Signals that should not be blocked (this is used in the parent thread as well -// as the tracer thread). -static const int kUnblockedSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, - SIGBUS, SIGXCPU, SIGXFSZ }; - -// Structure for passing arguments into the tracer thread. -struct TracerThreadArgument { - StopTheWorldCallback callback; - void *callback_argument; - // The tracer thread waits on this mutex while the parent finishes its - // preparations. - BlockingMutex mutex; - uptr parent_pid; -}; - -static DieCallbackType old_die_callback; +static ThreadSuspender *thread_suspender_instance = nullptr; -// Signal handler to wake up suspended threads when the tracer thread dies. -void TracerThreadSignalHandler(int signum, void *siginfo, void *) { - if (thread_suspender_instance != NULL) { - if (signum == SIGABRT) - thread_suspender_instance->KillAllThreads(); - else - thread_suspender_instance->ResumeAllThreads(); - } - internal__exit((signum == SIGABRT) ? 1 : 2); -} +// Synchronous signals that should not be blocked. +static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS, + SIGXCPU, SIGXFSZ }; static void TracerThreadDieCallback() { // Generally a call to Die() in the tracer thread should be fatal to the @@ -203,10 +217,29 @@ static void TracerThreadDieCallback() { // point. So we correctly handle calls to Die() from within the callback, but // not those that happen before or after the callback. Hopefully there aren't // a lot of opportunities for that to happen... - if (thread_suspender_instance) - thread_suspender_instance->KillAllThreads(); - if (old_die_callback) - old_die_callback(); + ThreadSuspender *inst = thread_suspender_instance; + if (inst && stoptheworld_tracer_pid == internal_getpid()) { + inst->KillAllThreads(); + thread_suspender_instance = nullptr; + } +} + +// Signal handler to wake up suspended threads when the tracer thread dies. +static void TracerThreadSignalHandler(int signum, void *siginfo, void *uctx) { + SignalContext ctx = SignalContext::Create(siginfo, uctx); + VPrintf(1, "Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", + signum, ctx.addr, ctx.pc, ctx.sp); + ThreadSuspender *inst = thread_suspender_instance; + if (inst) { + if (signum == SIGABRT) + inst->KillAllThreads(); + else + inst->ResumeAllThreads(); + RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); + thread_suspender_instance = nullptr; + atomic_store(&inst->arg->done, 1, memory_order_relaxed); + } + internal__exit((signum == SIGABRT) ? 1 : 2); } // Size of alternative stack for signal handlers in the tracer thread. @@ -226,9 +259,9 @@ static int TracerThread(void* argument) { tracer_thread_argument->mutex.Lock(); tracer_thread_argument->mutex.Unlock(); - SetDieCallback(TracerThreadDieCallback); + RAW_CHECK(AddDieCallback(TracerThreadDieCallback)); - ThreadSuspender thread_suspender(internal_getppid()); + ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument); // Global pointer for the signal handler. thread_suspender_instance = &thread_suspender; @@ -238,19 +271,16 @@ static int TracerThread(void* argument) { internal_memset(&handler_stack, 0, sizeof(handler_stack)); handler_stack.ss_sp = handler_stack_memory.data(); handler_stack.ss_size = kHandlerStackSize; - internal_sigaltstack(&handler_stack, NULL); - - // Install our handler for fatal signals. Other signals should be blocked by - // the mask we inherited from the caller thread. - for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals); - signal_index++) { - __sanitizer_sigaction new_sigaction; - internal_memset(&new_sigaction, 0, sizeof(new_sigaction)); - new_sigaction.sigaction = TracerThreadSignalHandler; - new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO; - internal_sigfillset(&new_sigaction.sa_mask); - internal_sigaction_norestorer(kUnblockedSignals[signal_index], - &new_sigaction, NULL); + internal_sigaltstack(&handler_stack, nullptr); + + // Install our handler for synchronous signals. Other signals should be + // blocked by the mask we inherited from the parent thread. + for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) { + __sanitizer_sigaction act; + internal_memset(&act, 0, sizeof(act)); + act.sigaction = TracerThreadSignalHandler; + act.sa_flags = SA_ONSTACK | SA_SIGINFO; + internal_sigaction_norestorer(kSyncSignals[i], &act, 0); } int exit_code = 0; @@ -263,9 +293,9 @@ static int TracerThread(void* argument) { thread_suspender.ResumeAllThreads(); exit_code = 0; } - thread_suspender_instance = NULL; - handler_stack.ss_flags = SS_DISABLE; - internal_sigaltstack(&handler_stack, NULL); + RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback)); + thread_suspender_instance = nullptr; + atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed); return exit_code; } @@ -278,7 +308,7 @@ class ScopedStackSpaceWithGuard { // in the future. guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_, "ScopedStackWithGuard"); - CHECK_EQ(guard_start_, (uptr)Mprotect((uptr)guard_start_, guard_size_)); + CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_)); } ~ScopedStackSpaceWithGuard() { UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_); @@ -297,53 +327,21 @@ class ScopedStackSpaceWithGuard { // into globals. static __sanitizer_sigset_t blocked_sigset; static __sanitizer_sigset_t old_sigset; -static __sanitizer_sigaction old_sigactions - [ARRAY_SIZE(kUnblockedSignals)]; class StopTheWorldScope { public: StopTheWorldScope() { - // Block all signals that can be blocked safely, and install - // default handlers for the remaining signals. - // We cannot allow user-defined handlers to run while the ThreadSuspender - // thread is active, because they could conceivably call some libc functions - // which modify errno (which is shared between the two threads). - internal_sigfillset(&blocked_sigset); - for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals); - signal_index++) { - // Remove the signal from the set of blocked signals. - internal_sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]); - // Install the default handler. - __sanitizer_sigaction new_sigaction; - internal_memset(&new_sigaction, 0, sizeof(new_sigaction)); - new_sigaction.handler = SIG_DFL; - internal_sigfillset(&new_sigaction.sa_mask); - internal_sigaction_norestorer(kUnblockedSignals[signal_index], - &new_sigaction, &old_sigactions[signal_index]); - } - int sigprocmask_status = - internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset); - CHECK_EQ(sigprocmask_status, 0); // sigprocmask should never fail // Make this process dumpable. Processes that are not dumpable cannot be // attached to. process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0); if (!process_was_dumpable_) internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0); - old_die_callback = GetDieCallback(); } ~StopTheWorldScope() { - SetDieCallback(old_die_callback); // Restore the dumpable flag. if (!process_was_dumpable_) internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0); - // Restore the signal handlers. - for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals); - signal_index++) { - internal_sigaction_norestorer(kUnblockedSignals[signal_index], - &old_sigactions[signal_index], NULL); - } - internal_sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset); } private: @@ -371,16 +369,42 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) { tracer_thread_argument.callback = callback; tracer_thread_argument.callback_argument = argument; tracer_thread_argument.parent_pid = internal_getpid(); + atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed); const uptr kTracerStackSize = 2 * 1024 * 1024; ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize); // Block the execution of TracerThread until after we have set ptrace // permissions. tracer_thread_argument.mutex.Lock(); + // Signal handling story. + // We don't want async signals to be delivered to the tracer thread, + // so we block all async signals before creating the thread. An async signal + // handler can temporary modify errno, which is shared with this thread. + // We ought to use pthread_sigmask here, because sigprocmask has undefined + // behavior in multithreaded programs. However, on linux sigprocmask is + // equivalent to pthread_sigmask with the exception that pthread_sigmask + // does not allow to block some signals used internally in pthread + // implementation. We are fine with blocking them here, we are really not + // going to pthread_cancel the thread. + // The tracer thread should not raise any synchronous signals. But in case it + // does, we setup a special handler for sync signals that properly kills the + // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers + // in the tracer thread won't interfere with user program. Double note: if a + // user does something along the lines of 'kill -11 pid', that can kill the + // process even if user setup own handler for SEGV. + // Thing to watch out for: this code should not change behavior of user code + // in any observable way. In particular it should not override user signal + // handlers. + internal_sigfillset(&blocked_sigset); + for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) + internal_sigdelset(&blocked_sigset, kSyncSignals[i]); + int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset); + CHECK_EQ(rv, 0); uptr tracer_pid = internal_clone( TracerThread, tracer_stack.Bottom(), CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED, - &tracer_thread_argument, 0 /* parent_tidptr */, 0 /* newtls */, 0 - /* child_tidptr */); + &tracer_thread_argument, nullptr /* parent_tidptr */, + nullptr /* newtls */, nullptr /* child_tidptr */); + internal_sigprocmask(SIG_SETMASK, &old_sigset, 0); int local_errno = 0; if (internal_iserror(tracer_pid, &local_errno)) { VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno); @@ -394,14 +418,27 @@ void StopTheWorld(StopTheWorldCallback callback, void *argument) { #endif // Allow the tracer thread to start. tracer_thread_argument.mutex.Unlock(); - // Since errno is shared between this thread and the tracer thread, we - // must avoid using errno while the tracer thread is running. - // At this point, any signal will either be blocked or kill us, so waitpid - // should never return (and set errno) while the tracer thread is alive. - uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL); - if (internal_iserror(waitpid_status, &local_errno)) + // NOTE: errno is shared between this thread and the tracer thread. + // internal_waitpid() may call syscall() which can access/spoil errno, + // so we can't call it now. Instead we for the tracer thread to finish using + // the spin loop below. Man page for sched_yield() says "In the Linux + // implementation, sched_yield() always succeeds", so let's hope it does not + // spoil errno. Note that this spin loop runs only for brief periods before + // the tracer thread has suspended us and when it starts unblocking threads. + while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0) + sched_yield(); + // Now the tracer thread is about to exit and does not touch errno, + // wait for it. + for (;;) { + uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL); + if (!internal_iserror(waitpid_status, &local_errno)) + break; + if (local_errno == EINTR) + continue; VReport(1, "Waiting on the tracer thread failed (errno %d).\n", local_errno); + break; + } } } @@ -430,6 +467,11 @@ typedef pt_regs regs_struct; typedef struct user regs_struct; #define REG_SP regs[EF_REG29] +#elif defined(__aarch64__) +typedef struct user_pt_regs regs_struct; +#define REG_SP sp +#define ARCH_IOVEC_FOR_GETREGSET + #else #error "Unsupported architecture" #endif // SANITIZER_ANDROID && defined(__arm__) @@ -440,8 +482,18 @@ int SuspendedThreadsList::GetRegistersAndSP(uptr index, pid_t tid = GetThreadID(index); regs_struct regs; int pterrno; - if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, ®s), - &pterrno)) { +#ifdef ARCH_IOVEC_FOR_GETREGSET + struct iovec regset_io; + regset_io.iov_base = ®s; + regset_io.iov_len = sizeof(regs_struct); + bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid, + (void*)NT_PRSTATUS, (void*)®set_io), + &pterrno); +#else + bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr, + ®s), &pterrno); +#endif + if (isErr) { VReport(1, "Could not get registers from thread %d (errno %d).\n", tid, pterrno); return -1; @@ -455,6 +507,7 @@ int SuspendedThreadsList::GetRegistersAndSP(uptr index, uptr SuspendedThreadsList::RegisterCount() { return sizeof(regs_struct) / sizeof(uptr); } -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_LINUX && defined(__x86_64__) +#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) + // || defined(__aarch64__) diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc index ab40598ae8e..2b6a78155fd 100644 --- a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc +++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc @@ -5,7 +5,7 @@ // //===----------------------------------------------------------------------===// // -// Suppression parsing/matching code shared between TSan and LSan. +// Suppression parsing/matching code. // //===----------------------------------------------------------------------===// @@ -19,90 +19,73 @@ namespace __sanitizer { -static const char *const kTypeStrings[SuppressionTypeCount] = { - "none", "race", "mutex", "thread", "signal", - "leak", "called_from_lib", "deadlock", "vptr_check"}; +SuppressionContext::SuppressionContext(const char *suppression_types[], + int suppression_types_num) + : suppression_types_(suppression_types), + suppression_types_num_(suppression_types_num), suppressions_(1), + can_parse_(true) { + CHECK_LE(suppression_types_num_, kMaxSuppressionTypes); + internal_memset(has_suppression_type_, 0, suppression_types_num_); +} -bool TemplateMatch(char *templ, const char *str) { - if (str == 0 || str[0] == 0) - return false; - bool start = false; - if (templ && templ[0] == '^') { - start = true; - templ++; +static bool GetPathAssumingFileIsRelativeToExec(const char *file_path, + /*out*/char *new_file_path, + uptr new_file_path_size) { + InternalScopedString exec(kMaxPathLength); + if (ReadBinaryNameCached(exec.data(), exec.size())) { + const char *file_name_pos = StripModuleName(exec.data()); + uptr path_to_exec_len = file_name_pos - exec.data(); + internal_strncat(new_file_path, exec.data(), + Min(path_to_exec_len, new_file_path_size - 1)); + internal_strncat(new_file_path, file_path, + new_file_path_size - internal_strlen(new_file_path) - 1); + return true; } - bool asterisk = false; - while (templ && templ[0]) { - if (templ[0] == '*') { - templ++; - start = false; - asterisk = true; - continue; - } - if (templ[0] == '$') - return str[0] == 0 || asterisk; - if (str[0] == 0) - return false; - char *tpos = (char*)internal_strchr(templ, '*'); - char *tpos1 = (char*)internal_strchr(templ, '$'); - if (tpos == 0 || (tpos1 && tpos1 < tpos)) - tpos = tpos1; - if (tpos != 0) - tpos[0] = 0; - const char *str0 = str; - const char *spos = internal_strstr(str, templ); - str = spos + internal_strlen(templ); - templ = tpos; - if (tpos) - tpos[0] = tpos == tpos1 ? '$' : '*'; - if (spos == 0) - return false; - if (start && spos != str0) - return false; - start = false; - asterisk = false; - } - return true; + return false; } -ALIGNED(64) static char placeholder[sizeof(SuppressionContext)]; -static SuppressionContext *suppression_ctx = 0; +void SuppressionContext::ParseFromFile(const char *filename) { + if (filename[0] == '\0') + return; -SuppressionContext *SuppressionContext::Get() { - CHECK(suppression_ctx); - return suppression_ctx; -} + // If we cannot find the file, check if its location is relative to + // the location of the executable. + InternalScopedString new_file_path(kMaxPathLength); + if (!FileExists(filename) && !IsAbsolutePath(filename) && + GetPathAssumingFileIsRelativeToExec(filename, new_file_path.data(), + new_file_path.size())) { + filename = new_file_path.data(); + } -void SuppressionContext::InitIfNecessary() { - if (suppression_ctx) - return; - suppression_ctx = new(placeholder) SuppressionContext; - if (common_flags()->suppressions[0] == '\0') - return; - char *suppressions_from_file; + // Read the file. + VPrintf(1, "%s: reading suppressions file at %s\n", + SanitizerToolName, filename); + char *file_contents; uptr buffer_size; - uptr contents_size = - ReadFileToBuffer(common_flags()->suppressions, &suppressions_from_file, - &buffer_size, 1 << 26 /* max_len */); - if (contents_size == 0) { + uptr contents_size; + if (!ReadFileToBuffer(filename, &file_contents, &buffer_size, + &contents_size)) { Printf("%s: failed to read suppressions file '%s'\n", SanitizerToolName, - common_flags()->suppressions); + filename); Die(); } - suppression_ctx->Parse(suppressions_from_file); + + Parse(file_contents); } -bool SuppressionContext::Match(const char *str, SuppressionType type, +bool SuppressionContext::Match(const char *str, const char *type, Suppression **s) { can_parse_ = false; - uptr i; - for (i = 0; i < suppressions_.size(); i++) - if (type == suppressions_[i].type && - TemplateMatch(suppressions_[i].templ, str)) - break; - if (i == suppressions_.size()) return false; - *s = &suppressions_[i]; - return true; + if (!HasSuppressionType(type)) + return false; + for (uptr i = 0; i < suppressions_.size(); i++) { + Suppression &cur = suppressions_[i]; + if (0 == internal_strcmp(cur.type, type) && TemplateMatch(cur.templ, str)) { + *s = &cur; + return true; + } + } + return false; } static const char *StripPrefix(const char *str, const char *prefix) { @@ -127,28 +110,28 @@ void SuppressionContext::Parse(const char *str) { end = line + internal_strlen(line); if (line != end && line[0] != '#') { const char *end2 = end; - while (line != end2 && (end2[-1] == ' ' || end2[-1] == '\t')) + while (line != end2 && + (end2[-1] == ' ' || end2[-1] == '\t' || end2[-1] == '\r')) end2--; int type; - for (type = 0; type < SuppressionTypeCount; type++) { - const char *next_char = StripPrefix(line, kTypeStrings[type]); + for (type = 0; type < suppression_types_num_; type++) { + const char *next_char = StripPrefix(line, suppression_types_[type]); if (next_char && *next_char == ':') { line = ++next_char; break; } } - if (type == SuppressionTypeCount) { + if (type == suppression_types_num_) { Printf("%s: failed to parse suppressions\n", SanitizerToolName); Die(); } - Suppression s; - s.type = static_cast<SuppressionType>(type); + Suppression s = {}; + s.type = suppression_types_[type]; s.templ = (char*)InternalAlloc(end2 - line + 1); internal_memcpy(s.templ, line, end2 - line); s.templ[end2 - line] = 0; - s.hit_count = 0; - s.weight = 0; suppressions_.push_back(s); + has_suppression_type_[type] = true; } if (end[0] == 0) break; @@ -160,6 +143,14 @@ uptr SuppressionContext::SuppressionCount() const { return suppressions_.size(); } +bool SuppressionContext::HasSuppressionType(const char *type) const { + for (int i = 0; i < suppression_types_num_; i++) { + if (0 == internal_strcmp(type, suppression_types_[i])) + return has_suppression_type_[i]; + } + return false; +} + const Suppression *SuppressionContext::SuppressionAt(uptr i) const { CHECK_LT(i, suppressions_.size()); return &suppressions_[i]; @@ -168,13 +159,8 @@ const Suppression *SuppressionContext::SuppressionAt(uptr i) const { void SuppressionContext::GetMatched( InternalMmapVector<Suppression *> *matched) { for (uptr i = 0; i < suppressions_.size(); i++) - if (suppressions_[i].hit_count) + if (atomic_load_relaxed(&suppressions_[i].hit_count)) matched->push_back(&suppressions_[i]); } -const char *SuppressionTypeString(SuppressionType t) { - CHECK(t < SuppressionTypeCount); - return kTypeStrings[t]; -} - } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.h b/libsanitizer/sanitizer_common/sanitizer_suppressions.h index 84ee834a9fd..efec926476b 100644 --- a/libsanitizer/sanitizer_common/sanitizer_suppressions.h +++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.h @@ -5,64 +5,50 @@ // //===----------------------------------------------------------------------===// // -// Suppression parsing/matching code shared between TSan and LSan. +// Suppression parsing/matching code. // //===----------------------------------------------------------------------===// #ifndef SANITIZER_SUPPRESSIONS_H #define SANITIZER_SUPPRESSIONS_H #include "sanitizer_common.h" +#include "sanitizer_atomic.h" #include "sanitizer_internal_defs.h" namespace __sanitizer { -enum SuppressionType { - SuppressionNone, - SuppressionRace, - SuppressionMutex, - SuppressionThread, - SuppressionSignal, - SuppressionLeak, - SuppressionLib, - SuppressionDeadlock, - SuppressionVptrCheck, - SuppressionTypeCount -}; - struct Suppression { - SuppressionType type; + const char *type; char *templ; - unsigned hit_count; + atomic_uint32_t hit_count; uptr weight; }; class SuppressionContext { public: + // Create new SuppressionContext capable of parsing given suppression types. + SuppressionContext(const char *supprression_types[], + int suppression_types_num); + + void ParseFromFile(const char *filename); void Parse(const char *str); - bool Match(const char* str, SuppressionType type, Suppression **s); + + bool Match(const char *str, const char *type, Suppression **s); uptr SuppressionCount() const; + bool HasSuppressionType(const char *type) const; const Suppression *SuppressionAt(uptr i) const; void GetMatched(InternalMmapVector<Suppression *> *matched); - // Create a SuppressionContext singleton if it hasn't been created earlier. - // Not thread safe. Must be called early during initialization (but after - // runtime flags are parsed). - static void InitIfNecessary(); - // Returns a SuppressionContext singleton. - static SuppressionContext *Get(); - private: - SuppressionContext() : suppressions_(1), can_parse_(true) {} + static const int kMaxSuppressionTypes = 16; + const char **const suppression_types_; + const int suppression_types_num_; + InternalMmapVector<Suppression> suppressions_; + bool has_suppression_type_[kMaxSuppressionTypes]; bool can_parse_; - - friend class SuppressionContextTest; }; -const char *SuppressionTypeString(SuppressionType t); - -bool TemplateMatch(char *templ, const char *str); - } // namespace __sanitizer #endif // SANITIZER_SUPPRESSIONS_H diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc index 6821e5a3262..0e58d5fed6e 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc @@ -9,24 +9,63 @@ // run-time libraries. //===----------------------------------------------------------------------===// +#include "sanitizer_allocator_internal.h" #include "sanitizer_platform.h" #include "sanitizer_internal_defs.h" +#include "sanitizer_libc.h" #include "sanitizer_placement_new.h" -#include "sanitizer_symbolizer.h" +#include "sanitizer_symbolizer_internal.h" namespace __sanitizer { +AddressInfo::AddressInfo() { + internal_memset(this, 0, sizeof(AddressInfo)); + function_offset = kUnknown; +} + +void AddressInfo::Clear() { + InternalFree(module); + InternalFree(function); + InternalFree(file); + internal_memset(this, 0, sizeof(AddressInfo)); + function_offset = kUnknown; +} + +void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset) { + module = internal_strdup(mod_name); + module_offset = mod_offset; +} + +SymbolizedStack::SymbolizedStack() : next(nullptr), info() {} + +SymbolizedStack *SymbolizedStack::New(uptr addr) { + void *mem = InternalAlloc(sizeof(SymbolizedStack)); + SymbolizedStack *res = new(mem) SymbolizedStack(); + res->info.address = addr; + return res; +} + +void SymbolizedStack::ClearAll() { + info.Clear(); + if (next) + next->ClearAll(); + InternalFree(this); +} + +DataInfo::DataInfo() { + internal_memset(this, 0, sizeof(DataInfo)); +} + +void DataInfo::Clear() { + InternalFree(module); + InternalFree(name); + internal_memset(this, 0, sizeof(DataInfo)); +} + Symbolizer *Symbolizer::symbolizer_; StaticSpinMutex Symbolizer::init_mu_; LowLevelAllocator Symbolizer::symbolizer_allocator_; -Symbolizer *Symbolizer::Disable() { - CHECK_EQ(0, symbolizer_); - // Initialize a dummy symbolizer. - symbolizer_ = new(symbolizer_allocator_) Symbolizer; - return symbolizer_; -} - void Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook, Symbolizer::EndSymbolizationHook end_hook) { CHECK(start_hook_ == 0 && end_hook_ == 0); @@ -34,7 +73,29 @@ void Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook, end_hook_ = end_hook; } -Symbolizer::Symbolizer() : start_hook_(0), end_hook_(0) {} +const char *Symbolizer::ModuleNameOwner::GetOwnedCopy(const char *str) { + mu_->CheckLocked(); + + // 'str' will be the same string multiple times in a row, optimize this case. + if (last_match_ && !internal_strcmp(last_match_, str)) + return last_match_; + + // FIXME: this is linear search. + // We should optimize this further if this turns out to be a bottleneck later. + for (uptr i = 0; i < storage_.size(); ++i) { + if (!internal_strcmp(storage_[i], str)) { + last_match_ = storage_[i]; + return last_match_; + } + } + last_match_ = internal_strdup(str); + storage_.push_back(last_match_); + return last_match_; +} + +Symbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools) + : module_names_(&mu_), n_modules_(0), modules_fresh_(false), tools_(tools), + start_hook_(0), end_hook_(0) {} Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym) : sym_(sym) { diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h index 2ff99372da9..0a443a70115 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h @@ -17,13 +17,14 @@ #ifndef SANITIZER_SYMBOLIZER_H #define SANITIZER_SYMBOLIZER_H -#include "sanitizer_allocator_internal.h" -#include "sanitizer_internal_defs.h" -#include "sanitizer_libc.h" +#include "sanitizer_common.h" +#include "sanitizer_mutex.h" namespace __sanitizer { struct AddressInfo { + // Owns all the string members. Storage for them is + // (de)allocated using sanitizer internal allocator. uptr address; char *module; @@ -37,75 +38,68 @@ struct AddressInfo { int line; int column; - AddressInfo() { - internal_memset(this, 0, sizeof(AddressInfo)); - function_offset = kUnknown; - } - + AddressInfo(); // Deletes all strings and resets all fields. - void Clear() { - InternalFree(module); - InternalFree(function); - InternalFree(file); - internal_memset(this, 0, sizeof(AddressInfo)); - function_offset = kUnknown; - } + void Clear(); + void FillModuleInfo(const char *mod_name, uptr mod_offset); +}; - void FillAddressAndModuleInfo(uptr addr, const char *mod_name, - uptr mod_offset) { - address = addr; - module = internal_strdup(mod_name); - module_offset = mod_offset; - } +// Linked list of symbolized frames (each frame is described by AddressInfo). +struct SymbolizedStack { + SymbolizedStack *next; + AddressInfo info; + static SymbolizedStack *New(uptr addr); + // Deletes current, and all subsequent frames in the linked list. + // The object cannot be accessed after the call to this function. + void ClearAll(); + + private: + SymbolizedStack(); }; // For now, DataInfo is used to describe global variable. struct DataInfo { + // Owns all the string members. Storage for them is + // (de)allocated using sanitizer internal allocator. char *module; uptr module_offset; char *name; uptr start; uptr size; - DataInfo() { - internal_memset(this, 0, sizeof(DataInfo)); - } - - void Clear() { - InternalFree(module); - InternalFree(name); - internal_memset(this, 0, sizeof(DataInfo)); - } + DataInfo(); + void Clear(); }; -class Symbolizer { +class SymbolizerTool; + +class Symbolizer final { public: /// Initialize and return platform-specific implementation of symbolizer /// (if it wasn't already initialized). static Symbolizer *GetOrInit(); - // Fills at most "max_frames" elements of "frames" with descriptions - // for a given address (in all inlined functions). Returns the number - // of descriptions actually filled. - virtual uptr SymbolizePC(uptr address, AddressInfo *frames, uptr max_frames) { - return 0; - } - virtual bool SymbolizeData(uptr address, DataInfo *info) { - return false; - } - virtual bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name, - uptr *module_address) { - return false; - } - virtual bool CanReturnFileLineInfo() { - return false; + // Returns a list of symbolized frames for a given address (containing + // all inlined functions, if necessary). + SymbolizedStack *SymbolizePC(uptr address); + bool SymbolizeData(uptr address, DataInfo *info); + + // The module names Symbolizer returns are stable and unique for every given + // module. It is safe to store and compare them as pointers. + bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name, + uptr *module_address); + const char *GetModuleNameForPc(uptr pc) { + const char *module_name = nullptr; + uptr unused; + if (GetModuleNameAndOffsetForPC(pc, &module_name, &unused)) + return module_name; + return nullptr; } + // Release internal caches (if any). - virtual void Flush() {} + void Flush(); // Attempts to demangle the provided C++ mangled name. - virtual const char *Demangle(const char *name) { - return name; - } - virtual void PrepareForSandboxing() {} + const char *Demangle(const char *name); + void PrepareForSandboxing(); // Allow user to install hooks that would be called before/after Symbolizer // does the actual file/line info fetching. Specific sanitizers may need this @@ -118,16 +112,53 @@ class Symbolizer { EndSymbolizationHook end_hook); private: + // GetModuleNameAndOffsetForPC has to return a string to the caller. + // Since the corresponding module might get unloaded later, we should create + // our owned copies of the strings that we can safely return. + // ModuleNameOwner does not provide any synchronization, thus calls to + // its method should be protected by |mu_|. + class ModuleNameOwner { + public: + explicit ModuleNameOwner(BlockingMutex *synchronized_by) + : storage_(kInitialCapacity), last_match_(nullptr), + mu_(synchronized_by) {} + const char *GetOwnedCopy(const char *str); + + private: + static const uptr kInitialCapacity = 1000; + InternalMmapVector<const char*> storage_; + const char *last_match_; + + BlockingMutex *mu_; + } module_names_; + /// Platform-specific function for creating a Symbolizer object. static Symbolizer *PlatformInit(); - /// Initialize the symbolizer in a disabled state. Not thread safe. - static Symbolizer *Disable(); + + bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name, + uptr *module_offset); + LoadedModule *FindModuleForAddress(uptr address); + LoadedModule modules_[kMaxNumberOfModules]; + uptr n_modules_; + // If stale, need to reload the modules before looking up addresses. + bool modules_fresh_; + + // Platform-specific default demangler, must not return nullptr. + const char *PlatformDemangle(const char *name); + void PlatformPrepareForSandboxing(); static Symbolizer *symbolizer_; static StaticSpinMutex init_mu_; - protected: - Symbolizer(); + // Mutex locked from public methods of |Symbolizer|, so that the internals + // (including individual symbolizer tools and platform-specific methods) are + // always synchronized. + BlockingMutex mu_; + + typedef IntrusiveList<SymbolizerTool>::Iterator Iterator; + IntrusiveList<SymbolizerTool> tools_; + + explicit Symbolizer(IntrusiveList<SymbolizerTool> tools); static LowLevelAllocator symbolizer_allocator_; diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h new file mode 100644 index 00000000000..a87964b4636 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h @@ -0,0 +1,149 @@ +//===-- sanitizer_symbolizer_internal.h -------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Header for internal classes and functions to be used by implementations of +// symbolizers. +// +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H +#define SANITIZER_SYMBOLIZER_INTERNAL_H + +#include "sanitizer_symbolizer.h" + +namespace __sanitizer { + +// Parsing helpers, 'str' is searched for delimiter(s) and a string or uptr +// is extracted. When extracting a string, a newly allocated (using +// InternalAlloc) and null-terminataed buffer is returned. They return a pointer +// to the next characted after the found delimiter. +const char *ExtractToken(const char *str, const char *delims, char **result); +const char *ExtractInt(const char *str, const char *delims, int *result); +const char *ExtractUptr(const char *str, const char *delims, uptr *result); +const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter, + char **result); + +const char *DemangleCXXABI(const char *name); + +// SymbolizerTool is an interface that is implemented by individual "tools" +// that can perform symbolication (external llvm-symbolizer, libbacktrace, +// Windows DbgHelp symbolizer, etc.). +class SymbolizerTool { + public: + // The main |Symbolizer| class implements a "fallback chain" of symbolizer + // tools. In a request to symbolize an address, if one tool returns false, + // the next tool in the chain will be tried. + SymbolizerTool *next; + + SymbolizerTool() : next(nullptr) { } + + // Can't declare pure virtual functions in sanitizer runtimes: + // __cxa_pure_virtual might be unavailable. + + // The |stack| parameter is inout. It is pre-filled with the address, + // module base and module offset values and is to be used to construct + // other stack frames. + virtual bool SymbolizePC(uptr addr, SymbolizedStack *stack) { + UNIMPLEMENTED(); + } + + // The |info| parameter is inout. It is pre-filled with the module base + // and module offset values. + virtual bool SymbolizeData(uptr addr, DataInfo *info) { + UNIMPLEMENTED(); + } + + virtual void Flush() {} + + // Return nullptr to fallback to the default platform-specific demangler. + virtual const char *Demangle(const char *name) { + return nullptr; + } +}; + +// SymbolizerProcess encapsulates communication between the tool and +// external symbolizer program, running in a different subprocess. +// SymbolizerProcess may not be used from two threads simultaneously. +class SymbolizerProcess { + public: + explicit SymbolizerProcess(const char *path, bool use_forkpty = false); + const char *SendCommand(const char *command); + + protected: + virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const { + UNIMPLEMENTED(); + } + + /// The maximum number of arguments required to invoke a tool process. + enum { kArgVMax = 6 }; + + /// Fill in an argv array to invoke the child process. + virtual void GetArgV(const char *path_to_binary, + const char *(&argv)[kArgVMax]) const { + UNIMPLEMENTED(); + } + + virtual bool ReadFromSymbolizer(char *buffer, uptr max_length); + + private: + bool Restart(); + const char *SendCommandImpl(const char *command); + bool WriteToSymbolizer(const char *buffer, uptr length); + bool StartSymbolizerSubprocess(); + + const char *path_; + fd_t input_fd_; + fd_t output_fd_; + + static const uptr kBufferSize = 16 * 1024; + char buffer_[kBufferSize]; + + static const uptr kMaxTimesRestarted = 5; + static const int kSymbolizerStartupTimeMillis = 10; + uptr times_restarted_; + bool failed_to_start_; + bool reported_invalid_path_; + bool use_forkpty_; +}; + +class LLVMSymbolizerProcess; + +// This tool invokes llvm-symbolizer in a subprocess. It should be as portable +// as the llvm-symbolizer tool is. +class LLVMSymbolizer : public SymbolizerTool { + public: + explicit LLVMSymbolizer(const char *path, LowLevelAllocator *allocator); + + bool SymbolizePC(uptr addr, SymbolizedStack *stack) override; + + bool SymbolizeData(uptr addr, DataInfo *info) override; + + private: + const char *SendCommand(bool is_data, const char *module_name, + uptr module_offset); + + LLVMSymbolizerProcess *symbolizer_process_; + static const uptr kBufferSize = 16 * 1024; + char buffer_[kBufferSize]; +}; + +// Parses one or more two-line strings in the following format: +// <function_name> +// <file_name>:<line_number>[:<column_number>] +// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of +// them use the same output format. Returns true if any useful debug +// information was found. +void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res); + +// Parses a two-line string in the following format: +// <symbol_name> +// <start_address> <size> +// Used by LLVMSymbolizer and InternalSymbolizer. +void ParseSymbolizeDataOutput(const char *str, DataInfo *info); + +} // namespace __sanitizer + +#endif // SANITIZER_SYMBOLIZER_INTERNAL_H diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc index 8bae57bfe66..b5bcfdbc089 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc @@ -31,6 +31,8 @@ namespace __sanitizer { +static char *DemangleAlloc(const char *name, bool always_alloc); + #if SANITIZER_LIBBACKTRACE namespace { @@ -81,44 +83,49 @@ char *CplusV3Demangle(const char *name) { } # endif // SANITIZER_CP_DEMANGLE -struct SymbolizeCodeData { - AddressInfo *frames; - uptr n_frames; - uptr max_frames; - const char *module_name; - uptr module_offset; +struct SymbolizeCodeCallbackArg { + SymbolizedStack *first; + SymbolizedStack *last; + uptr frames_symbolized; + + AddressInfo *get_new_frame(uintptr_t addr) { + CHECK(last); + if (frames_symbolized > 0) { + SymbolizedStack *cur = SymbolizedStack::New(addr); + AddressInfo *info = &cur->info; + info->FillModuleInfo(first->info.module, first->info.module_offset); + last->next = cur; + last = cur; + } + CHECK_EQ(addr, first->info.address); + CHECK_EQ(addr, last->info.address); + return &last->info; + } }; extern "C" { static int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr, const char *filename, int lineno, const char *function) { - SymbolizeCodeData *cdata = (SymbolizeCodeData *)vdata; + SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata; if (function) { - AddressInfo *info = &cdata->frames[cdata->n_frames++]; - info->Clear(); - info->FillAddressAndModuleInfo(addr, cdata->module_name, - cdata->module_offset); - info->function = LibbacktraceSymbolizer::Demangle(function, true); + AddressInfo *info = cdata->get_new_frame(addr); + info->function = DemangleAlloc(function, /*always_alloc*/ true); if (filename) info->file = internal_strdup(filename); info->line = lineno; - if (cdata->n_frames == cdata->max_frames) - return 1; + cdata->frames_symbolized++; } return 0; } static void SymbolizeCodeCallback(void *vdata, uintptr_t addr, const char *symname, uintptr_t, uintptr_t) { - SymbolizeCodeData *cdata = (SymbolizeCodeData *)vdata; + SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata; if (symname) { - AddressInfo *info = &cdata->frames[0]; - info->Clear(); - info->FillAddressAndModuleInfo(addr, cdata->module_name, - cdata->module_offset); - info->function = LibbacktraceSymbolizer::Demangle(symname, true); - cdata->n_frames = 1; + AddressInfo *info = cdata->get_new_frame(addr); + info->function = DemangleAlloc(symname, /*always_alloc*/ true); + cdata->frames_symbolized++; } } @@ -126,7 +133,7 @@ static void SymbolizeDataCallback(void *vdata, uintptr_t, const char *symname, uintptr_t symval, uintptr_t symsize) { DataInfo *info = (DataInfo *)vdata; if (symname && symval) { - info->name = LibbacktraceSymbolizer::Demangle(symname, true); + info->name = DemangleAlloc(symname, /*always_alloc*/ true); info->start = symval; info->size = symsize; } @@ -146,23 +153,18 @@ LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) { return new(*alloc) LibbacktraceSymbolizer(state); } -uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames, - uptr max_frames, - const char *module_name, - uptr module_offset) { - SymbolizeCodeData data; - data.frames = frames; - data.n_frames = 0; - data.max_frames = max_frames; - data.module_name = module_name; - data.module_offset = module_offset; +bool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) { + SymbolizeCodeCallbackArg data; + data.first = stack; + data.last = stack; + data.frames_symbolized = 0; backtrace_pcinfo((backtrace_state *)state_, addr, SymbolizeCodePCInfoCallback, ErrorCallback, &data); - if (data.n_frames) - return data.n_frames; + if (data.frames_symbolized > 0) + return true; backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeCodeCallback, ErrorCallback, &data); - return data.n_frames; + return (data.frames_symbolized > 0); } bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { @@ -177,12 +179,9 @@ LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) { return 0; } -uptr LibbacktraceSymbolizer::SymbolizeCode(uptr addr, AddressInfo *frames, - uptr max_frames, - const char *module_name, - uptr module_offset) { +bool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) { (void)state_; - return 0; + return false; } bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { @@ -191,7 +190,7 @@ bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { #endif // SANITIZER_LIBBACKTRACE -char *LibbacktraceSymbolizer::Demangle(const char *name, bool always_alloc) { +static char *DemangleAlloc(const char *name, bool always_alloc) { #if SANITIZER_LIBBACKTRACE && SANITIZER_CP_DEMANGLE if (char *demangled = CplusV3Demangle(name)) return demangled; @@ -201,4 +200,8 @@ char *LibbacktraceSymbolizer::Demangle(const char *name, bool always_alloc) { return 0; } +const char *LibbacktraceSymbolizer::Demangle(const char *name) { + return DemangleAlloc(name, /*always_alloc*/ false); +} + } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h index aa75a7907df..ab1d6f99163 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libbacktrace.h @@ -14,7 +14,8 @@ #include "sanitizer_platform.h" #include "sanitizer_common.h" -#include "sanitizer_symbolizer.h" +#include "sanitizer_allocator_internal.h" +#include "sanitizer_symbolizer_internal.h" #ifndef SANITIZER_LIBBACKTRACE # define SANITIZER_LIBBACKTRACE 0 @@ -26,17 +27,16 @@ namespace __sanitizer { -class LibbacktraceSymbolizer { +class LibbacktraceSymbolizer : public SymbolizerTool { public: static LibbacktraceSymbolizer *get(LowLevelAllocator *alloc); - uptr SymbolizeCode(uptr addr, AddressInfo *frames, uptr max_frames, - const char *module_name, uptr module_offset); + bool SymbolizePC(uptr addr, SymbolizedStack *stack) override; - bool SymbolizeData(uptr addr, DataInfo *info); + bool SymbolizeData(uptr addr, DataInfo *info) override; // May return NULL if demangling failed. - static char *Demangle(const char *name, bool always_alloc = false); + const char *Demangle(const char *name) override; private: explicit LibbacktraceSymbolizer(void *state) : state_(state) {} diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc index 9afd805a82c..4264b5e3ddc 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc @@ -9,18 +9,418 @@ // run-time libraries. //===----------------------------------------------------------------------===// +#include "sanitizer_allocator_internal.h" #include "sanitizer_internal_defs.h" -#include "sanitizer_symbolizer.h" +#include "sanitizer_symbolizer_internal.h" namespace __sanitizer { +const char *ExtractToken(const char *str, const char *delims, char **result) { + uptr prefix_len = internal_strcspn(str, delims); + *result = (char*)InternalAlloc(prefix_len + 1); + internal_memcpy(*result, str, prefix_len); + (*result)[prefix_len] = '\0'; + const char *prefix_end = str + prefix_len; + if (*prefix_end != '\0') prefix_end++; + return prefix_end; +} + +const char *ExtractInt(const char *str, const char *delims, int *result) { + char *buff; + const char *ret = ExtractToken(str, delims, &buff); + if (buff != 0) { + *result = (int)internal_atoll(buff); + } + InternalFree(buff); + return ret; +} + +const char *ExtractUptr(const char *str, const char *delims, uptr *result) { + char *buff; + const char *ret = ExtractToken(str, delims, &buff); + if (buff != 0) { + *result = (uptr)internal_atoll(buff); + } + InternalFree(buff); + return ret; +} + +const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter, + char **result) { + const char *found_delimiter = internal_strstr(str, delimiter); + uptr prefix_len = + found_delimiter ? found_delimiter - str : internal_strlen(str); + *result = (char *)InternalAlloc(prefix_len + 1); + internal_memcpy(*result, str, prefix_len); + (*result)[prefix_len] = '\0'; + const char *prefix_end = str + prefix_len; + if (*prefix_end != '\0') prefix_end += internal_strlen(delimiter); + return prefix_end; +} + +SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) { + BlockingMutexLock l(&mu_); + const char *module_name; + uptr module_offset; + SymbolizedStack *res = SymbolizedStack::New(addr); + if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset)) + return res; + // Always fill data about module name and offset. + res->info.FillModuleInfo(module_name, module_offset); + for (auto iter = Iterator(&tools_); iter.hasNext();) { + auto *tool = iter.next(); + SymbolizerScope sym_scope(this); + if (tool->SymbolizePC(addr, res)) { + return res; + } + } + return res; +} + +bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) { + BlockingMutexLock l(&mu_); + const char *module_name; + uptr module_offset; + if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset)) + return false; + info->Clear(); + info->module = internal_strdup(module_name); + info->module_offset = module_offset; + for (auto iter = Iterator(&tools_); iter.hasNext();) { + auto *tool = iter.next(); + SymbolizerScope sym_scope(this); + if (tool->SymbolizeData(addr, info)) { + return true; + } + } + return true; +} + +bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name, + uptr *module_address) { + BlockingMutexLock l(&mu_); + const char *internal_module_name = nullptr; + if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name, + module_address)) + return false; + + if (module_name) + *module_name = module_names_.GetOwnedCopy(internal_module_name); + return true; +} + +void Symbolizer::Flush() { + BlockingMutexLock l(&mu_); + for (auto iter = Iterator(&tools_); iter.hasNext();) { + auto *tool = iter.next(); + SymbolizerScope sym_scope(this); + tool->Flush(); + } +} + +const char *Symbolizer::Demangle(const char *name) { + BlockingMutexLock l(&mu_); + for (auto iter = Iterator(&tools_); iter.hasNext();) { + auto *tool = iter.next(); + SymbolizerScope sym_scope(this); + if (const char *demangled = tool->Demangle(name)) + return demangled; + } + return PlatformDemangle(name); +} + +void Symbolizer::PrepareForSandboxing() { + BlockingMutexLock l(&mu_); + PlatformPrepareForSandboxing(); +} + +bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address, + const char **module_name, + uptr *module_offset) { + LoadedModule *module = FindModuleForAddress(address); + if (module == 0) + return false; + *module_name = module->full_name(); + *module_offset = address - module->base_address(); + return true; +} + +LoadedModule *Symbolizer::FindModuleForAddress(uptr address) { + bool modules_were_reloaded = false; + if (!modules_fresh_) { + for (uptr i = 0; i < n_modules_; i++) + modules_[i].clear(); + n_modules_ = + GetListOfModules(modules_, kMaxNumberOfModules, /* filter */ nullptr); + CHECK_GT(n_modules_, 0); + CHECK_LT(n_modules_, kMaxNumberOfModules); + modules_fresh_ = true; + modules_were_reloaded = true; + } + for (uptr i = 0; i < n_modules_; i++) { + if (modules_[i].containsAddress(address)) { + return &modules_[i]; + } + } + // Reload the modules and look up again, if we haven't tried it yet. + if (!modules_were_reloaded) { + // FIXME: set modules_fresh_ from dlopen()/dlclose() interceptors. + // It's too aggressive to reload the list of modules each time we fail + // to find a module for a given address. + modules_fresh_ = false; + return FindModuleForAddress(address); + } + return 0; +} + Symbolizer *Symbolizer::GetOrInit() { SpinMutexLock l(&init_mu_); if (symbolizer_) return symbolizer_; - if ((symbolizer_ = PlatformInit())) - return symbolizer_; - return Disable(); + symbolizer_ = PlatformInit(); + CHECK(symbolizer_); + return symbolizer_; +} + +// For now we assume the following protocol: +// For each request of the form +// <module_name> <module_offset> +// passed to STDIN, external symbolizer prints to STDOUT response: +// <function_name> +// <file_name>:<line_number>:<column_number> +// <function_name> +// <file_name>:<line_number>:<column_number> +// ... +// <empty line> +class LLVMSymbolizerProcess : public SymbolizerProcess { + public: + explicit LLVMSymbolizerProcess(const char *path) : SymbolizerProcess(path) {} + + private: + bool ReachedEndOfOutput(const char *buffer, uptr length) const override { + // Empty line marks the end of llvm-symbolizer output. + return length >= 2 && buffer[length - 1] == '\n' && + buffer[length - 2] == '\n'; + } + + void GetArgV(const char *path_to_binary, + const char *(&argv)[kArgVMax]) const override { +#if defined(__x86_64h__) + const char* const kSymbolizerArch = "--default-arch=x86_64h"; +#elif defined(__x86_64__) + const char* const kSymbolizerArch = "--default-arch=x86_64"; +#elif defined(__i386__) + const char* const kSymbolizerArch = "--default-arch=i386"; +#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) + const char* const kSymbolizerArch = "--default-arch=powerpc64"; +#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) + const char* const kSymbolizerArch = "--default-arch=powerpc64le"; +#else + const char* const kSymbolizerArch = "--default-arch=unknown"; +#endif + + const char *const inline_flag = common_flags()->symbolize_inline_frames + ? "--inlining=true" + : "--inlining=false"; + int i = 0; + argv[i++] = path_to_binary; + argv[i++] = inline_flag; + argv[i++] = kSymbolizerArch; + argv[i++] = nullptr; + } +}; + +LLVMSymbolizer::LLVMSymbolizer(const char *path, LowLevelAllocator *allocator) + : symbolizer_process_(new(*allocator) LLVMSymbolizerProcess(path)) {} + +// Parse a <file>:<line>[:<column>] buffer. The file path may contain colons on +// Windows, so extract tokens from the right hand side first. The column info is +// also optional. +static const char *ParseFileLineInfo(AddressInfo *info, const char *str) { + char *file_line_info = 0; + str = ExtractToken(str, "\n", &file_line_info); + CHECK(file_line_info); + // Parse the last :<int>, which must be there. + char *last_colon = internal_strrchr(file_line_info, ':'); + CHECK(last_colon); + int line_or_column = internal_atoll(last_colon + 1); + // Truncate the string at the last colon and find the next-to-last colon. + *last_colon = '\0'; + last_colon = internal_strrchr(file_line_info, ':'); + if (last_colon && IsDigit(last_colon[1])) { + // If the second-to-last colon is followed by a digit, it must be the line + // number, and the previous parsed number was a column. + info->line = internal_atoll(last_colon + 1); + info->column = line_or_column; + *last_colon = '\0'; + } else { + // Otherwise, we have line info but no column info. + info->line = line_or_column; + info->column = 0; + } + ExtractToken(file_line_info, "", &info->file); + InternalFree(file_line_info); + return str; +} + +// Parses one or more two-line strings in the following format: +// <function_name> +// <file_name>:<line_number>[:<column_number>] +// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of +// them use the same output format. +void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) { + bool top_frame = true; + SymbolizedStack *last = res; + while (true) { + char *function_name = 0; + str = ExtractToken(str, "\n", &function_name); + CHECK(function_name); + if (function_name[0] == '\0') { + // There are no more frames. + InternalFree(function_name); + break; + } + SymbolizedStack *cur; + if (top_frame) { + cur = res; + top_frame = false; + } else { + cur = SymbolizedStack::New(res->info.address); + cur->info.FillModuleInfo(res->info.module, res->info.module_offset); + last->next = cur; + last = cur; + } + + AddressInfo *info = &cur->info; + info->function = function_name; + str = ParseFileLineInfo(info, str); + + // Functions and filenames can be "??", in which case we write 0 + // to address info to mark that names are unknown. + if (0 == internal_strcmp(info->function, "??")) { + InternalFree(info->function); + info->function = 0; + } + if (0 == internal_strcmp(info->file, "??")) { + InternalFree(info->file); + info->file = 0; + } + } +} + +// Parses a two-line string in the following format: +// <symbol_name> +// <start_address> <size> +// Used by LLVMSymbolizer and InternalSymbolizer. +void ParseSymbolizeDataOutput(const char *str, DataInfo *info) { + str = ExtractToken(str, "\n", &info->name); + str = ExtractUptr(str, " ", &info->start); + str = ExtractUptr(str, "\n", &info->size); +} + +bool LLVMSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) { + if (const char *buf = SendCommand(/*is_data*/ false, stack->info.module, + stack->info.module_offset)) { + ParseSymbolizePCOutput(buf, stack); + return true; + } + return false; +} + +bool LLVMSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { + if (const char *buf = + SendCommand(/*is_data*/ true, info->module, info->module_offset)) { + ParseSymbolizeDataOutput(buf, info); + info->start += (addr - info->module_offset); // Add the base address. + return true; + } + return false; +} + +const char *LLVMSymbolizer::SendCommand(bool is_data, const char *module_name, + uptr module_offset) { + CHECK(module_name); + internal_snprintf(buffer_, kBufferSize, "%s\"%s\" 0x%zx\n", + is_data ? "DATA " : "", module_name, module_offset); + return symbolizer_process_->SendCommand(buffer_); +} + +SymbolizerProcess::SymbolizerProcess(const char *path, bool use_forkpty) + : path_(path), + input_fd_(kInvalidFd), + output_fd_(kInvalidFd), + times_restarted_(0), + failed_to_start_(false), + reported_invalid_path_(false), + use_forkpty_(use_forkpty) { + CHECK(path_); + CHECK_NE(path_[0], '\0'); +} + +const char *SymbolizerProcess::SendCommand(const char *command) { + for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) { + // Start or restart symbolizer if we failed to send command to it. + if (const char *res = SendCommandImpl(command)) + return res; + Restart(); + } + if (!failed_to_start_) { + Report("WARNING: Failed to use and restart external symbolizer!\n"); + failed_to_start_ = true; + } + return 0; +} + +const char *SymbolizerProcess::SendCommandImpl(const char *command) { + if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd) + return 0; + if (!WriteToSymbolizer(command, internal_strlen(command))) + return 0; + if (!ReadFromSymbolizer(buffer_, kBufferSize)) + return 0; + return buffer_; +} + +bool SymbolizerProcess::Restart() { + if (input_fd_ != kInvalidFd) + CloseFile(input_fd_); + if (output_fd_ != kInvalidFd) + CloseFile(output_fd_); + return StartSymbolizerSubprocess(); +} + +bool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) { + if (max_length == 0) + return true; + uptr read_len = 0; + while (true) { + uptr just_read = 0; + bool success = ReadFromFile(input_fd_, buffer + read_len, + max_length - read_len - 1, &just_read); + // We can't read 0 bytes, as we don't expect external symbolizer to close + // its stdout. + if (!success || just_read == 0) { + Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_); + return false; + } + read_len += just_read; + if (ReachedEndOfOutput(buffer, read_len)) + break; + } + buffer[read_len] = '\0'; + return true; +} + +bool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) { + if (length == 0) + return true; + uptr write_len = 0; + bool success = WriteToFile(output_fd_, buffer, length, &write_len); + if (!success || write_len != length) { + Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_); + return false; + } + return true; } } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc new file mode 100644 index 00000000000..d3495348e75 --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc @@ -0,0 +1,151 @@ +//===-- sanitizer_symbolizer_mac.cc ---------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between various sanitizers' runtime libraries. +// +// Implementation of Mac-specific "atos" symbolizer. +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_MAC + +#include "sanitizer_allocator_internal.h" +#include "sanitizer_mac.h" +#include "sanitizer_symbolizer_mac.h" + +namespace __sanitizer { + +#include <dlfcn.h> +#include <errno.h> +#include <stdlib.h> +#include <sys/wait.h> +#include <unistd.h> +#include <util.h> + +bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) { + Dl_info info; + int result = dladdr((const void *)addr, &info); + if (!result) return false; + const char *demangled = DemangleCXXABI(info.dli_sname); + stack->info.function = internal_strdup(demangled); + return true; +} + +bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { + return false; +} + +class AtosSymbolizerProcess : public SymbolizerProcess { + public: + explicit AtosSymbolizerProcess(const char *path, pid_t parent_pid) + : SymbolizerProcess(path, /*use_forkpty*/ true) { + // Put the string command line argument in the object so that it outlives + // the call to GetArgV. + internal_snprintf(pid_str_, sizeof(pid_str_), "%d", parent_pid); + } + + private: + bool ReachedEndOfOutput(const char *buffer, uptr length) const override { + return (length >= 1 && buffer[length - 1] == '\n'); + } + + void GetArgV(const char *path_to_binary, + const char *(&argv)[kArgVMax]) const override { + int i = 0; + argv[i++] = path_to_binary; + argv[i++] = "-p"; + argv[i++] = &pid_str_[0]; + if (GetMacosVersion() == MACOS_VERSION_MAVERICKS) { + // On Mavericks atos prints a deprecation warning which we suppress by + // passing -d. The warning isn't present on other OSX versions, even the + // newer ones. + argv[i++] = "-d"; + } + argv[i++] = nullptr; + } + + char pid_str_[16]; +}; + +static const char *kAtosErrorMessages[] = { + "atos cannot examine process", + "unable to get permission to examine process", + "An admin user name and password is required", + "could not load inserted library", + "architecture mismatch between analysis process", +}; + +static bool IsAtosErrorMessage(const char *str) { + for (uptr i = 0; i < ARRAY_SIZE(kAtosErrorMessages); i++) { + if (internal_strstr(str, kAtosErrorMessages[i])) { + return true; + } + } + return false; +} + +static bool ParseCommandOutput(const char *str, SymbolizedStack *res) { + // Trim ending newlines. + char *trim; + ExtractTokenUpToDelimiter(str, "\n", &trim); + + // The line from `atos` is in one of these formats: + // myfunction (in library.dylib) (sourcefile.c:17) + // myfunction (in library.dylib) + 0x1fe + // 0xdeadbeef (in library.dylib) + 0x1fe + // 0xdeadbeef (in library.dylib) + // 0xdeadbeef + + if (IsAtosErrorMessage(trim)) { + Report("atos returned an error: %s\n", trim); + InternalFree(trim); + return false; + } + + const char *rest = trim; + char *function_name; + rest = ExtractTokenUpToDelimiter(rest, " (in ", &function_name); + if (internal_strncmp(function_name, "0x", 2) != 0) + res->info.function = function_name; + else + InternalFree(function_name); + rest = ExtractTokenUpToDelimiter(rest, ") ", &res->info.module); + + if (rest[0] == '(') { + rest++; + rest = ExtractTokenUpToDelimiter(rest, ":", &res->info.file); + char *extracted_line_number; + rest = ExtractTokenUpToDelimiter(rest, ")", &extracted_line_number); + res->info.line = internal_atoll(extracted_line_number); + InternalFree(extracted_line_number); + } + + InternalFree(trim); + return true; +} + +AtosSymbolizer::AtosSymbolizer(const char *path, LowLevelAllocator *allocator) + : process_(new(*allocator) AtosSymbolizerProcess(path, getpid())) {} + +bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) { + if (!process_) return false; + char command[32]; + internal_snprintf(command, sizeof(command), "0x%zx\n", addr); + const char *buf = process_->SendCommand(command); + if (!buf) return false; + if (!ParseCommandOutput(buf, stack)) { + process_ = nullptr; + return false; + } + return true; +} + +bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) { return false; } + +} // namespace __sanitizer + +#endif // SANITIZER_MAC diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h new file mode 100644 index 00000000000..240c538d93d --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.h @@ -0,0 +1,46 @@ +//===-- sanitizer_symbolizer_mac.h ------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is shared between various sanitizers' runtime libraries. +// +// Header for Mac-specific "atos" symbolizer. +//===----------------------------------------------------------------------===// + +#ifndef SANITIZER_SYMBOLIZER_MAC_H +#define SANITIZER_SYMBOLIZER_MAC_H + +#include "sanitizer_platform.h" +#if SANITIZER_MAC + +#include "sanitizer_symbolizer_internal.h" + +namespace __sanitizer { + +class DlAddrSymbolizer : public SymbolizerTool { + public: + bool SymbolizePC(uptr addr, SymbolizedStack *stack) override; + bool SymbolizeData(uptr addr, DataInfo *info) override; +}; + +class AtosSymbolizerProcess; + +class AtosSymbolizer : public SymbolizerTool { + public: + explicit AtosSymbolizer(const char *path, LowLevelAllocator *allocator); + + bool SymbolizePC(uptr addr, SymbolizedStack *stack) override; + bool SymbolizeData(uptr addr, DataInfo *info) override; + + private: + AtosSymbolizerProcess *process_; +}; + +} // namespace __sanitizer + +#endif // SANITIZER_MAC + +#endif // SANITIZER_SYMBOLIZER_MAC_H diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc index 5cc21d3f2bf..a67ae36a4d8 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc @@ -18,15 +18,21 @@ #include "sanitizer_internal_defs.h" #include "sanitizer_linux.h" #include "sanitizer_placement_new.h" +#include "sanitizer_posix.h" #include "sanitizer_procmaps.h" -#include "sanitizer_symbolizer.h" +#include "sanitizer_symbolizer_internal.h" #include "sanitizer_symbolizer_libbacktrace.h" +#include "sanitizer_symbolizer_mac.h" #include <errno.h> #include <stdlib.h> #include <sys/wait.h> #include <unistd.h> +#if SANITIZER_MAC +#include <util.h> // for forkpty() +#endif // SANITIZER_MAC + // C++ demangling function, as required by Itanium C++ ABI. This is weak, // because we do not require a C++ ABI library to be linked to a program // using sanitizers; if it's not present, we'll just use the mangled name. @@ -39,7 +45,7 @@ namespace __cxxabiv1 { namespace __sanitizer { // Attempts to demangle the name via __cxa_demangle from __cxxabiv1. -static const char *DemangleCXXABI(const char *name) { +const char *DemangleCXXABI(const char *name) { // FIXME: __cxa_demangle aggressively insists on allocating memory. // There's not much we can do about that, short of providing our // own demangler (libc++abi's implementation could be adapted so that @@ -53,150 +59,47 @@ static const char *DemangleCXXABI(const char *name) { return name; } -// Extracts the prefix of "str" that consists of any characters not -// present in "delims" string, and copies this prefix to "result", allocating -// space for it. -// Returns a pointer to "str" after skipping extracted prefix and first -// delimiter char. -static const char *ExtractToken(const char *str, const char *delims, - char **result) { - uptr prefix_len = internal_strcspn(str, delims); - *result = (char*)InternalAlloc(prefix_len + 1); - internal_memcpy(*result, str, prefix_len); - (*result)[prefix_len] = '\0'; - const char *prefix_end = str + prefix_len; - if (*prefix_end != '\0') prefix_end++; - return prefix_end; -} - -// Same as ExtractToken, but converts extracted token to integer. -static const char *ExtractInt(const char *str, const char *delims, - int *result) { - char *buff; - const char *ret = ExtractToken(str, delims, &buff); - if (buff != 0) { - *result = (int)internal_atoll(buff); - } - InternalFree(buff); - return ret; -} - -static const char *ExtractUptr(const char *str, const char *delims, - uptr *result) { - char *buff; - const char *ret = ExtractToken(str, delims, &buff); - if (buff != 0) { - *result = (uptr)internal_atoll(buff); - } - InternalFree(buff); - return ret; -} - -class ExternalSymbolizerInterface { - public: - // Can't declare pure virtual functions in sanitizer runtimes: - // __cxa_pure_virtual might be unavailable. - virtual char *SendCommand(bool is_data, const char *module_name, - uptr module_offset) { - UNIMPLEMENTED(); - } -}; - -// SymbolizerProcess encapsulates communication between the tool and -// external symbolizer program, running in a different subprocess. -// SymbolizerProcess may not be used from two threads simultaneously. -class SymbolizerProcess : public ExternalSymbolizerInterface { - public: - explicit SymbolizerProcess(const char *path) - : path_(path), - input_fd_(kInvalidFd), - output_fd_(kInvalidFd), - times_restarted_(0), - failed_to_start_(false), - reported_invalid_path_(false) { - CHECK(path_); - CHECK_NE(path_[0], '\0'); - } - - char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { - for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) { - // Start or restart symbolizer if we failed to send command to it. - if (char *res = SendCommandImpl(is_data, module_name, module_offset)) - return res; - Restart(); - } - if (!failed_to_start_) { - Report("WARNING: Failed to use and restart external symbolizer!\n"); - failed_to_start_ = true; - } - return 0; - } - - private: - bool Restart() { - if (input_fd_ != kInvalidFd) - internal_close(input_fd_); - if (output_fd_ != kInvalidFd) - internal_close(output_fd_); - return StartSymbolizerSubprocess(); - } - - char *SendCommandImpl(bool is_data, const char *module_name, - uptr module_offset) { - if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd) - return 0; - CHECK(module_name); - if (!RenderInputCommand(buffer_, kBufferSize, is_data, module_name, - module_offset)) - return 0; - if (!writeToSymbolizer(buffer_, internal_strlen(buffer_))) - return 0; - if (!readFromSymbolizer(buffer_, kBufferSize)) - return 0; - return buffer_; - } - - bool readFromSymbolizer(char *buffer, uptr max_length) { - if (max_length == 0) - return true; - uptr read_len = 0; - while (true) { - uptr just_read = internal_read(input_fd_, buffer + read_len, - max_length - read_len - 1); - // We can't read 0 bytes, as we don't expect external symbolizer to close - // its stdout. - if (just_read == 0 || just_read == (uptr)-1) { - Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_); - return false; - } - read_len += just_read; - if (ReachedEndOfOutput(buffer, read_len)) - break; +bool SymbolizerProcess::StartSymbolizerSubprocess() { + if (!FileExists(path_)) { + if (!reported_invalid_path_) { + Report("WARNING: invalid path to external symbolizer!\n"); + reported_invalid_path_ = true; } - buffer[read_len] = '\0'; - return true; - } - - bool writeToSymbolizer(const char *buffer, uptr length) { - if (length == 0) - return true; - uptr write_len = internal_write(output_fd_, buffer, length); - if (write_len == 0 || write_len == (uptr)-1) { - Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_); - return false; - } - return true; + return false; } - bool StartSymbolizerSubprocess() { - if (!FileExists(path_)) { - if (!reported_invalid_path_) { - Report("WARNING: invalid path to external symbolizer!\n"); - reported_invalid_path_ = true; - } + int pid; + if (use_forkpty_) { +#if SANITIZER_MAC + fd_t fd = kInvalidFd; + // Use forkpty to disable buffering in the new terminal. + pid = forkpty(&fd, 0, 0, 0); + if (pid == -1) { + // forkpty() failed. + Report("WARNING: failed to fork external symbolizer (errno: %d)\n", + errno); return false; + } else if (pid == 0) { + // Child subprocess. + const char *argv[kArgVMax]; + GetArgV(path_, argv); + execv(path_, const_cast<char **>(&argv[0])); + internal__exit(1); } + // Continue execution in parent process. + input_fd_ = output_fd_ = fd; + + // Disable echo in the new terminal, disable CR. + struct termios termflags; + tcgetattr(fd, &termflags); + termflags.c_oflag &= ~ONLCR; + termflags.c_lflag &= ~ECHO; + tcsetattr(fd, TCSANOW, &termflags); +#else // SANITIZER_MAC + UNIMPLEMENTED(); +#endif // SANITIZER_MAC + } else { int *infd = NULL; int *outfd = NULL; // The client program may close its stdin and/or stdout and/or stderr @@ -233,7 +136,7 @@ class SymbolizerProcess : public ExternalSymbolizerInterface { CHECK(outfd); // Real fork() may call user callbacks registered with pthread_atfork(). - int pid = internal_fork(); + pid = internal_fork(); if (pid == -1) { // Fork() failed. internal_close(infd[0]); @@ -255,7 +158,9 @@ class SymbolizerProcess : public ExternalSymbolizerInterface { internal_close(infd[1]); for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--) internal_close(fd); - ExecuteWithDefaultArgs(path_); + const char *argv[kArgVMax]; + GetArgV(path_, argv); + execv(path_, const_cast<char **>(&argv[0])); internal__exit(1); } @@ -264,96 +169,20 @@ class SymbolizerProcess : public ExternalSymbolizerInterface { internal_close(infd[1]); input_fd_ = infd[0]; output_fd_ = outfd[1]; - - // Check that symbolizer subprocess started successfully. - int pid_status; - SleepForMillis(kSymbolizerStartupTimeMillis); - int exited_pid = waitpid(pid, &pid_status, WNOHANG); - if (exited_pid != 0) { - // Either waitpid failed, or child has already exited. - Report("WARNING: external symbolizer didn't start up correctly!\n"); - return false; - } - - return true; - } - - virtual bool RenderInputCommand(char *buffer, uptr max_length, bool is_data, - const char *module_name, - uptr module_offset) const { - UNIMPLEMENTED(); - } - - virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const { - UNIMPLEMENTED(); } - virtual void ExecuteWithDefaultArgs(const char *path_to_binary) const { - UNIMPLEMENTED(); - } - - const char *path_; - int input_fd_; - int output_fd_; - - static const uptr kBufferSize = 16 * 1024; - char buffer_[kBufferSize]; - - static const uptr kMaxTimesRestarted = 5; - static const int kSymbolizerStartupTimeMillis = 10; - uptr times_restarted_; - bool failed_to_start_; - bool reported_invalid_path_; -}; - -// For now we assume the following protocol: -// For each request of the form -// <module_name> <module_offset> -// passed to STDIN, external symbolizer prints to STDOUT response: -// <function_name> -// <file_name>:<line_number>:<column_number> -// <function_name> -// <file_name>:<line_number>:<column_number> -// ... -// <empty line> -class LLVMSymbolizerProcess : public SymbolizerProcess { - public: - explicit LLVMSymbolizerProcess(const char *path) : SymbolizerProcess(path) {} - - private: - bool RenderInputCommand(char *buffer, uptr max_length, bool is_data, - const char *module_name, uptr module_offset) const { - internal_snprintf(buffer, max_length, "%s\"%s\" 0x%zx\n", - is_data ? "DATA " : "", module_name, module_offset); - return true; - } - - bool ReachedEndOfOutput(const char *buffer, uptr length) const { - // Empty line marks the end of llvm-symbolizer output. - return length >= 2 && buffer[length - 1] == '\n' && - buffer[length - 2] == '\n'; + // Check that symbolizer subprocess started successfully. + int pid_status; + SleepForMillis(kSymbolizerStartupTimeMillis); + int exited_pid = waitpid(pid, &pid_status, WNOHANG); + if (exited_pid != 0) { + // Either waitpid failed, or child has already exited. + Report("WARNING: external symbolizer didn't start up correctly!\n"); + return false; } - void ExecuteWithDefaultArgs(const char *path_to_binary) const { -#if defined(__x86_64__) - const char* const kSymbolizerArch = "--default-arch=x86_64"; -#elif defined(__i386__) - const char* const kSymbolizerArch = "--default-arch=i386"; -#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__) - const char* const kSymbolizerArch = "--default-arch=powerpc64"; -#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__) - const char* const kSymbolizerArch = "--default-arch=powerpc64le"; -#else - const char* const kSymbolizerArch = "--default-arch=unknown"; -#endif - - const char *const inline_flag = common_flags()->symbolize_inline_frames - ? "--inlining=true" - : "--inlining=false"; - execl(path_to_binary, path_to_binary, inline_flag, kSymbolizerArch, - (char *)0); - } -}; + return true; +} class Addr2LineProcess : public SymbolizerProcess { public: @@ -363,44 +192,76 @@ class Addr2LineProcess : public SymbolizerProcess { const char *module_name() const { return module_name_; } private: - bool RenderInputCommand(char *buffer, uptr max_length, bool is_data, - const char *module_name, uptr module_offset) const { - if (is_data) - return false; - CHECK_EQ(0, internal_strcmp(module_name, module_name_)); - internal_snprintf(buffer, max_length, "0x%zx\n", module_offset); - return true; + void GetArgV(const char *path_to_binary, + const char *(&argv)[kArgVMax]) const override { + int i = 0; + argv[i++] = path_to_binary; + argv[i++] = "-iCfe"; + argv[i++] = module_name_; + argv[i++] = nullptr; } - bool ReachedEndOfOutput(const char *buffer, uptr length) const { - // Output should consist of two lines. - int num_lines = 0; - for (uptr i = 0; i < length; ++i) { - if (buffer[i] == '\n') - num_lines++; - if (num_lines >= 2) - return true; - } - return false; - } + bool ReachedEndOfOutput(const char *buffer, uptr length) const override; - void ExecuteWithDefaultArgs(const char *path_to_binary) const { - execl(path_to_binary, path_to_binary, "-Cfe", module_name_, (char *)0); + bool ReadFromSymbolizer(char *buffer, uptr max_length) override { + if (!SymbolizerProcess::ReadFromSymbolizer(buffer, max_length)) + return false; + // We should cut out output_terminator_ at the end of given buffer, + // appended by addr2line to mark the end of its meaningful output. + // We cannot scan buffer from it's beginning, because it is legal for it + // to start with output_terminator_ in case given offset is invalid. So, + // scanning from second character. + char *garbage = internal_strstr(buffer + 1, output_terminator_); + // This should never be NULL since buffer must end up with + // output_terminator_. + CHECK(garbage); + // Trim the buffer. + garbage[0] = '\0'; + return true; } const char *module_name_; // Owned, leaked. + static const char output_terminator_[]; }; -class Addr2LinePool : public ExternalSymbolizerInterface { +const char Addr2LineProcess::output_terminator_[] = "??\n??:0\n"; + +bool Addr2LineProcess::ReachedEndOfOutput(const char *buffer, + uptr length) const { + const size_t kTerminatorLen = sizeof(output_terminator_) - 1; + // Skip, if we read just kTerminatorLen bytes, because Addr2Line output + // should consist at least of two pairs of lines: + // 1. First one, corresponding to given offset to be symbolized + // (may be equal to output_terminator_, if offset is not valid). + // 2. Second one for output_terminator_, itself to mark the end of output. + if (length <= kTerminatorLen) return false; + // Addr2Line output should end up with output_terminator_. + return !internal_memcmp(buffer + length - kTerminatorLen, + output_terminator_, kTerminatorLen); +} + +class Addr2LinePool : public SymbolizerTool { public: explicit Addr2LinePool(const char *addr2line_path, LowLevelAllocator *allocator) : addr2line_path_(addr2line_path), allocator_(allocator), addr2line_pool_(16) {} - char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { - if (is_data) - return 0; + bool SymbolizePC(uptr addr, SymbolizedStack *stack) override { + if (const char *buf = + SendCommand(stack->info.module, stack->info.module_offset)) { + ParseSymbolizePCOutput(buf, stack); + return true; + } + return false; + } + + bool SymbolizeData(uptr addr, DataInfo *info) override { + return false; + } + + private: + const char *SendCommand(const char *module_name, uptr module_offset) { Addr2LineProcess *addr2line = 0; for (uptr i = 0; i < addr2line_pool_.size(); ++i) { if (0 == @@ -414,13 +275,19 @@ class Addr2LinePool : public ExternalSymbolizerInterface { new(*allocator_) Addr2LineProcess(addr2line_path_, module_name); addr2line_pool_.push_back(addr2line); } - return addr2line->SendCommand(is_data, module_name, module_offset); + CHECK_EQ(0, internal_strcmp(module_name, addr2line->module_name())); + char buffer[kBufferSize]; + internal_snprintf(buffer, kBufferSize, "0x%zx\n0x%zx\n", + module_offset, dummy_address_); + return addr2line->SendCommand(buffer); } - private: + static const uptr kBufferSize = 64; const char *addr2line_path_; LowLevelAllocator *allocator_; InternalMmapVector<Addr2LineProcess*> addr2line_pool_; + static const uptr dummy_address_ = + FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX); }; #if SANITIZER_SUPPORTS_WEAK_HOOKS @@ -438,10 +305,8 @@ int __sanitizer_symbolize_demangle(const char *Name, char *Buffer, int MaxLength); } // extern "C" -class InternalSymbolizer { +class InternalSymbolizer : public SymbolizerTool { public: - typedef bool (*SanitizerSymbolizeFn)(const char*, u64, char*, int); - static InternalSymbolizer *get(LowLevelAllocator *alloc) { if (__sanitizer_symbolize_code != 0 && __sanitizer_symbolize_data != 0) { @@ -450,20 +315,29 @@ class InternalSymbolizer { return 0; } - char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { - SanitizerSymbolizeFn symbolize_fn = is_data ? __sanitizer_symbolize_data - : __sanitizer_symbolize_code; - if (symbolize_fn(module_name, module_offset, buffer_, kBufferSize)) - return buffer_; - return 0; + bool SymbolizePC(uptr addr, SymbolizedStack *stack) override { + bool result = __sanitizer_symbolize_code( + stack->info.module, stack->info.module_offset, buffer_, kBufferSize); + if (result) ParseSymbolizePCOutput(buffer_, stack); + return result; + } + + bool SymbolizeData(uptr addr, DataInfo *info) override { + bool result = __sanitizer_symbolize_data(info->module, info->module_offset, + buffer_, kBufferSize); + if (result) { + ParseSymbolizeDataOutput(buffer_, info); + info->start += (addr - info->module_offset); // Add the base address. + } + return result; } - void Flush() { + void Flush() override { if (__sanitizer_symbolize_flush) __sanitizer_symbolize_flush(); } - const char *Demangle(const char *name) { + const char *Demangle(const char *name) override { if (__sanitizer_symbolize_demangle) { for (uptr res_length = 1024; res_length <= InternalSizeClassMap::kMaxSize;) { @@ -490,272 +364,101 @@ class InternalSymbolizer { }; #else // SANITIZER_SUPPORTS_WEAK_HOOKS -class InternalSymbolizer { +class InternalSymbolizer : public SymbolizerTool { public: static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; } - char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { - return 0; - } - void Flush() { } - const char *Demangle(const char *name) { return name; } }; #endif // SANITIZER_SUPPORTS_WEAK_HOOKS -class POSIXSymbolizer : public Symbolizer { - public: - POSIXSymbolizer(ExternalSymbolizerInterface *external_symbolizer, - InternalSymbolizer *internal_symbolizer, - LibbacktraceSymbolizer *libbacktrace_symbolizer) - : Symbolizer(), - external_symbolizer_(external_symbolizer), - internal_symbolizer_(internal_symbolizer), - libbacktrace_symbolizer_(libbacktrace_symbolizer) {} - - uptr SymbolizePC(uptr addr, AddressInfo *frames, uptr max_frames) { - BlockingMutexLock l(&mu_); - if (max_frames == 0) - return 0; - const char *module_name; - uptr module_offset; - if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset)) - return 0; - // First, try to use libbacktrace symbolizer (if it's available). - if (libbacktrace_symbolizer_ != 0) { - mu_.CheckLocked(); - uptr res = libbacktrace_symbolizer_->SymbolizeCode( - addr, frames, max_frames, module_name, module_offset); - if (res > 0) - return res; - } - const char *str = SendCommand(false, module_name, module_offset); - if (str == 0) { - // Symbolizer was not initialized or failed. Fill only data - // about module name and offset. - AddressInfo *info = &frames[0]; - info->Clear(); - info->FillAddressAndModuleInfo(addr, module_name, module_offset); - return 1; - } - uptr frame_id = 0; - for (frame_id = 0; frame_id < max_frames; frame_id++) { - AddressInfo *info = &frames[frame_id]; - char *function_name = 0; - str = ExtractToken(str, "\n", &function_name); - CHECK(function_name); - if (function_name[0] == '\0') { - // There are no more frames. - break; - } - info->Clear(); - info->FillAddressAndModuleInfo(addr, module_name, module_offset); - info->function = function_name; - // Parse <file>:<line>:<column> buffer. - char *file_line_info = 0; - str = ExtractToken(str, "\n", &file_line_info); - CHECK(file_line_info); - const char *line_info = ExtractToken(file_line_info, ":", &info->file); - line_info = ExtractInt(line_info, ":", &info->line); - line_info = ExtractInt(line_info, "", &info->column); - InternalFree(file_line_info); - - // Functions and filenames can be "??", in which case we write 0 - // to address info to mark that names are unknown. - if (0 == internal_strcmp(info->function, "??")) { - InternalFree(info->function); - info->function = 0; - } - if (0 == internal_strcmp(info->file, "??")) { - InternalFree(info->file); - info->file = 0; - } - } - if (frame_id == 0) { - // Make sure we return at least one frame. - AddressInfo *info = &frames[0]; - info->Clear(); - info->FillAddressAndModuleInfo(addr, module_name, module_offset); - frame_id = 1; - } - return frame_id; - } - - bool SymbolizeData(uptr addr, DataInfo *info) { - BlockingMutexLock l(&mu_); - LoadedModule *module = FindModuleForAddress(addr); - if (module == 0) - return false; - const char *module_name = module->full_name(); - uptr module_offset = addr - module->base_address(); - info->Clear(); - info->module = internal_strdup(module_name); - info->module_offset = module_offset; - // First, try to use libbacktrace symbolizer (if it's available). - if (libbacktrace_symbolizer_ != 0) { - mu_.CheckLocked(); - if (libbacktrace_symbolizer_->SymbolizeData(addr, info)) - return true; - } - const char *str = SendCommand(true, module_name, module_offset); - if (str == 0) - return true; - str = ExtractToken(str, "\n", &info->name); - str = ExtractUptr(str, " ", &info->start); - str = ExtractUptr(str, "\n", &info->size); - info->start += module->base_address(); - return true; - } - - bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name, - uptr *module_address) { - BlockingMutexLock l(&mu_); - return FindModuleNameAndOffsetForAddress(pc, module_name, module_address); - } - - bool CanReturnFileLineInfo() { - return internal_symbolizer_ != 0 || external_symbolizer_ != 0 || - libbacktrace_symbolizer_ != 0; - } +const char *Symbolizer::PlatformDemangle(const char *name) { + return DemangleCXXABI(name); +} - void Flush() { - BlockingMutexLock l(&mu_); - if (internal_symbolizer_ != 0) { - SymbolizerScope sym_scope(this); - internal_symbolizer_->Flush(); +void Symbolizer::PlatformPrepareForSandboxing() {} + +static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) { + const char *path = common_flags()->external_symbolizer_path; + const char *binary_name = path ? StripModuleName(path) : ""; + if (path && path[0] == '\0') { + VReport(2, "External symbolizer is explicitly disabled.\n"); + return nullptr; + } else if (!internal_strcmp(binary_name, "llvm-symbolizer")) { + VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path); + return new(*allocator) LLVMSymbolizer(path, allocator); + } else if (!internal_strcmp(binary_name, "atos")) { +#if SANITIZER_MAC + VReport(2, "Using atos at user-specified path: %s\n", path); + return new(*allocator) AtosSymbolizer(path, allocator); +#else // SANITIZER_MAC + Report("ERROR: Using `atos` is only supported on Darwin.\n"); + Die(); +#endif // SANITIZER_MAC + } else if (!internal_strcmp(binary_name, "addr2line")) { + VReport(2, "Using addr2line at user-specified path: %s\n", path); + return new(*allocator) Addr2LinePool(path, allocator); + } else if (path) { + Report("ERROR: External symbolizer path is set to '%s' which isn't " + "a known symbolizer. Please set the path to the llvm-symbolizer " + "binary or other known tool.\n", path); + Die(); + } + + // Otherwise symbolizer program is unknown, let's search $PATH + CHECK(path == nullptr); + if (const char *found_path = FindPathToBinary("llvm-symbolizer")) { + VReport(2, "Using llvm-symbolizer found at: %s\n", found_path); + return new(*allocator) LLVMSymbolizer(found_path, allocator); + } +#if SANITIZER_MAC + if (const char *found_path = FindPathToBinary("atos")) { + VReport(2, "Using atos found at: %s\n", found_path); + return new(*allocator) AtosSymbolizer(found_path, allocator); + } +#endif // SANITIZER_MAC + if (common_flags()->allow_addr2line) { + if (const char *found_path = FindPathToBinary("addr2line")) { + VReport(2, "Using addr2line found at: %s\n", found_path); + return new(*allocator) Addr2LinePool(found_path, allocator); } } + return nullptr; +} - const char *Demangle(const char *name) { - BlockingMutexLock l(&mu_); - // Run hooks even if we don't use internal symbolizer, as cxxabi - // demangle may call system functions. - SymbolizerScope sym_scope(this); - // Try to use libbacktrace demangler (if available). - if (libbacktrace_symbolizer_ != 0) { - if (const char *demangled = libbacktrace_symbolizer_->Demangle(name)) - return demangled; - } - if (internal_symbolizer_ != 0) - return internal_symbolizer_->Demangle(name); - return DemangleCXXABI(name); +static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list, + LowLevelAllocator *allocator) { + if (!common_flags()->symbolize) { + VReport(2, "Symbolizer is disabled.\n"); + return; } - - void PrepareForSandboxing() { -#if SANITIZER_LINUX && !SANITIZER_ANDROID - BlockingMutexLock l(&mu_); - // Cache /proc/self/exe on Linux. - CacheBinaryName(); -#endif + if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) { + VReport(2, "Using internal symbolizer.\n"); + list->push_back(tool); + return; } - - private: - char *SendCommand(bool is_data, const char *module_name, uptr module_offset) { - mu_.CheckLocked(); - // First, try to use internal symbolizer. - if (internal_symbolizer_) { - SymbolizerScope sym_scope(this); - return internal_symbolizer_->SendCommand(is_data, module_name, - module_offset); - } - // Otherwise, fall back to external symbolizer. - if (external_symbolizer_) { - SymbolizerScope sym_scope(this); - return external_symbolizer_->SendCommand(is_data, module_name, - module_offset); - } - return 0; + if (SymbolizerTool *tool = LibbacktraceSymbolizer::get(allocator)) { + VReport(2, "Using libbacktrace symbolizer.\n"); + list->push_back(tool); + return; } - LoadedModule *FindModuleForAddress(uptr address) { - mu_.CheckLocked(); - bool modules_were_reloaded = false; - if (modules_ == 0 || !modules_fresh_) { - modules_ = (LoadedModule*)(symbolizer_allocator_.Allocate( - kMaxNumberOfModuleContexts * sizeof(LoadedModule))); - CHECK(modules_); - n_modules_ = GetListOfModules(modules_, kMaxNumberOfModuleContexts, - /* filter */ 0); - CHECK_GT(n_modules_, 0); - CHECK_LT(n_modules_, kMaxNumberOfModuleContexts); - modules_fresh_ = true; - modules_were_reloaded = true; - } - for (uptr i = 0; i < n_modules_; i++) { - if (modules_[i].containsAddress(address)) { - return &modules_[i]; - } - } - // Reload the modules and look up again, if we haven't tried it yet. - if (!modules_were_reloaded) { - // FIXME: set modules_fresh_ from dlopen()/dlclose() interceptors. - // It's too aggressive to reload the list of modules each time we fail - // to find a module for a given address. - modules_fresh_ = false; - return FindModuleForAddress(address); - } - return 0; + if (SymbolizerTool *tool = ChooseExternalSymbolizer(allocator)) { + list->push_back(tool); + } else { + VReport(2, "No internal or external symbolizer found.\n"); } - bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name, - uptr *module_offset) { - mu_.CheckLocked(); - LoadedModule *module = FindModuleForAddress(address); - if (module == 0) - return false; - *module_name = module->full_name(); - *module_offset = address - module->base_address(); - return true; - } - - // 16K loaded modules should be enough for everyone. - static const uptr kMaxNumberOfModuleContexts = 1 << 14; - LoadedModule *modules_; // Array of module descriptions is leaked. - uptr n_modules_; - // If stale, need to reload the modules before looking up addresses. - bool modules_fresh_; - BlockingMutex mu_; - - ExternalSymbolizerInterface *external_symbolizer_; // Leaked. - InternalSymbolizer *const internal_symbolizer_; // Leaked. - LibbacktraceSymbolizer *libbacktrace_symbolizer_; // Leaked. -}; +#if SANITIZER_MAC + VReport(2, "Using dladdr symbolizer.\n"); + list->push_back(new(*allocator) DlAddrSymbolizer()); +#endif // SANITIZER_MAC +} Symbolizer *Symbolizer::PlatformInit() { - if (!common_flags()->symbolize) { - return new(symbolizer_allocator_) POSIXSymbolizer(0, 0, 0); - } - InternalSymbolizer* internal_symbolizer = - InternalSymbolizer::get(&symbolizer_allocator_); - ExternalSymbolizerInterface *external_symbolizer = 0; - LibbacktraceSymbolizer *libbacktrace_symbolizer = 0; - - if (!internal_symbolizer) { - libbacktrace_symbolizer = - LibbacktraceSymbolizer::get(&symbolizer_allocator_); - if (!libbacktrace_symbolizer) { - const char *path_to_external = common_flags()->external_symbolizer_path; - if (path_to_external && path_to_external[0] == '\0') { - // External symbolizer is explicitly disabled. Do nothing. - } else { - // Find path to llvm-symbolizer if it's not provided. - if (!path_to_external) - path_to_external = FindPathToBinary("llvm-symbolizer"); - if (path_to_external) { - external_symbolizer = new(symbolizer_allocator_) - LLVMSymbolizerProcess(path_to_external); - } else if (common_flags()->allow_addr2line) { - // If llvm-symbolizer is not found, try to use addr2line. - if (const char *addr2line_path = FindPathToBinary("addr2line")) { - external_symbolizer = new(symbolizer_allocator_) - Addr2LinePool(addr2line_path, &symbolizer_allocator_); - } - } - } - } - } - - return new(symbolizer_allocator_) POSIXSymbolizer( - external_symbolizer, internal_symbolizer, libbacktrace_symbolizer); + IntrusiveList<SymbolizerTool> list; + list.clear(); + ChooseSymbolizerTools(&list, &symbolizer_allocator_); + return new(symbolizer_allocator_) Symbolizer(list); } } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc index a1ed4e9a7b7..bb59f803285 100644 --- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc +++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc @@ -16,108 +16,264 @@ #include <dbghelp.h> #pragma comment(lib, "dbghelp.lib") -#include "sanitizer_symbolizer.h" +#include "sanitizer_symbolizer_internal.h" namespace __sanitizer { -class WinSymbolizer : public Symbolizer { +namespace { + +class WinSymbolizerTool : public SymbolizerTool { public: - WinSymbolizer() : initialized_(false) {} - - uptr SymbolizePC(uptr addr, AddressInfo *frames, uptr max_frames) { - if (max_frames == 0) - return 0; - - BlockingMutexLock l(&dbghelp_mu_); - if (!initialized_) { - if (!TrySymInitialize()) { - // OK, maybe the client app has called SymInitialize already. - // That's a bit unfortunate for us as all the DbgHelp functions are - // single-threaded and we can't coordinate with the app. - // FIXME: Can we stop the other threads at this point? - // Anyways, we have to reconfigure stuff to make sure that SymInitialize - // has all the appropriate options set. - // Cross our fingers and reinitialize DbgHelp. - Report("*** WARNING: Failed to initialize DbgHelp! ***\n"); - Report("*** Most likely this means that the app is already ***\n"); - Report("*** using DbgHelp, possibly with incompatible flags. ***\n"); - Report("*** Due to technical reasons, symbolization might crash ***\n"); - Report("*** or produce wrong results. ***\n"); - SymCleanup(GetCurrentProcess()); - TrySymInitialize(); - } - initialized_ = true; - } + bool SymbolizePC(uptr addr, SymbolizedStack *stack) override; + bool SymbolizeData(uptr addr, DataInfo *info) override { + return false; + } + const char *Demangle(const char *name) override; +}; - // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx - char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)]; - PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer; - symbol->SizeOfStruct = sizeof(SYMBOL_INFO); - symbol->MaxNameLen = MAX_SYM_NAME; - DWORD64 offset = 0; - BOOL got_objname = SymFromAddr(GetCurrentProcess(), - (DWORD64)addr, &offset, symbol); - if (!got_objname) - return 0; - - DWORD unused; - IMAGEHLP_LINE64 line_info; - line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64); - BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr, - &unused, &line_info); - AddressInfo *info = &frames[0]; - info->Clear(); - info->function = internal_strdup(symbol->Name); - info->function_offset = (uptr)offset; - if (got_fileline) { - info->file = internal_strdup(line_info.FileName); - info->line = line_info.LineNumber; - } +bool is_dbghelp_initialized = false; - IMAGEHLP_MODULE64 mod_info; - internal_memset(&mod_info, 0, sizeof(mod_info)); - mod_info.SizeOfStruct = sizeof(mod_info); - if (SymGetModuleInfo64(GetCurrentProcess(), addr, &mod_info)) - info->FillAddressAndModuleInfo(addr, mod_info.ImageName, - addr - (uptr)mod_info.BaseOfImage); - return 1; - } +bool TrySymInitialize() { + SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES); + return SymInitialize(GetCurrentProcess(), 0, TRUE); + // FIXME: We don't call SymCleanup() on exit yet - should we? +} - bool CanReturnFileLineInfo() { - return true; +// Initializes DbgHelp library, if it's not yet initialized. Calls to this +// function should be synchronized with respect to other calls to DbgHelp API +// (e.g. from WinSymbolizerTool). +void InitializeDbgHelpIfNeeded() { + if (is_dbghelp_initialized) + return; + if (!TrySymInitialize()) { + // OK, maybe the client app has called SymInitialize already. + // That's a bit unfortunate for us as all the DbgHelp functions are + // single-threaded and we can't coordinate with the app. + // FIXME: Can we stop the other threads at this point? + // Anyways, we have to reconfigure stuff to make sure that SymInitialize + // has all the appropriate options set. + // Cross our fingers and reinitialize DbgHelp. + Report("*** WARNING: Failed to initialize DbgHelp! ***\n"); + Report("*** Most likely this means that the app is already ***\n"); + Report("*** using DbgHelp, possibly with incompatible flags. ***\n"); + Report("*** Due to technical reasons, symbolization might crash ***\n"); + Report("*** or produce wrong results. ***\n"); + SymCleanup(GetCurrentProcess()); + TrySymInitialize(); } + is_dbghelp_initialized = true; - const char *Demangle(const char *name) { - CHECK(initialized_); - static char demangle_buffer[1000]; - if (name[0] == '\01' && - UnDecorateSymbolName(name + 1, demangle_buffer, sizeof(demangle_buffer), - UNDNAME_NAME_ONLY)) - return demangle_buffer; - else - return name; + // When an executable is run from a location different from the one where it + // was originally built, we may not see the nearby PDB files. + // To work around this, let's append the directory of the main module + // to the symbol search path. All the failures below are not fatal. + const size_t kSymPathSize = 2048; + static wchar_t path_buffer[kSymPathSize + 1 + MAX_PATH]; + if (!SymGetSearchPathW(GetCurrentProcess(), path_buffer, kSymPathSize)) { + Report("*** WARNING: Failed to SymGetSearchPathW ***\n"); + return; + } + size_t sz = wcslen(path_buffer); + if (sz) { + CHECK_EQ(0, wcscat_s(path_buffer, L";")); + sz++; + } + DWORD res = GetModuleFileNameW(NULL, path_buffer + sz, MAX_PATH); + if (res == 0 || res == MAX_PATH) { + Report("*** WARNING: Failed to getting the EXE directory ***\n"); + return; } + // Write the zero character in place of the last backslash to get the + // directory of the main module at the end of path_buffer. + wchar_t *last_bslash = wcsrchr(path_buffer + sz, L'\\'); + CHECK_NE(last_bslash, 0); + *last_bslash = L'\0'; + if (!SymSetSearchPathW(GetCurrentProcess(), path_buffer)) { + Report("*** WARNING: Failed to SymSetSearchPathW\n"); + return; + } +} - // FIXME: Implement GetModuleNameAndOffsetForPC(). +} // namespace - private: - bool TrySymInitialize() { - SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES); - return SymInitialize(GetCurrentProcess(), 0, TRUE); - // FIXME: We don't call SymCleanup() on exit yet - should we? +bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) { + InitializeDbgHelpIfNeeded(); + + // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx + char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)]; + PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer; + symbol->SizeOfStruct = sizeof(SYMBOL_INFO); + symbol->MaxNameLen = MAX_SYM_NAME; + DWORD64 offset = 0; + BOOL got_objname = SymFromAddr(GetCurrentProcess(), + (DWORD64)addr, &offset, symbol); + if (!got_objname) + return false; + + DWORD unused; + IMAGEHLP_LINE64 line_info; + line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64); + BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr, + &unused, &line_info); + frame->info.function = internal_strdup(symbol->Name); + frame->info.function_offset = (uptr)offset; + if (got_fileline) { + frame->info.file = internal_strdup(line_info.FileName); + frame->info.line = line_info.LineNumber; } + // Only consider this a successful symbolization attempt if we got file info. + // Otherwise, try llvm-symbolizer. + return got_fileline; +} + +const char *WinSymbolizerTool::Demangle(const char *name) { + CHECK(is_dbghelp_initialized); + static char demangle_buffer[1000]; + if (name[0] == '\01' && + UnDecorateSymbolName(name + 1, demangle_buffer, sizeof(demangle_buffer), + UNDNAME_NAME_ONLY)) + return demangle_buffer; + else + return name; +} - // All DbgHelp functions are single threaded, so we should use a mutex to - // serialize accesses. - BlockingMutex dbghelp_mu_; - bool initialized_; +const char *Symbolizer::PlatformDemangle(const char *name) { + return name; +} + +void Symbolizer::PlatformPrepareForSandboxing() { + // Do nothing. +} + +namespace { +struct ScopedHandle { + ScopedHandle() : h_(nullptr) {} + explicit ScopedHandle(HANDLE h) : h_(h) {} + ~ScopedHandle() { + if (h_) + ::CloseHandle(h_); + } + HANDLE get() { return h_; } + HANDLE *receive() { return &h_; } + HANDLE release() { + HANDLE h = h_; + h_ = nullptr; + return h; + } + HANDLE h_; }; +} // namespace + +bool SymbolizerProcess::StartSymbolizerSubprocess() { + // Create inherited pipes for stdin and stdout. + ScopedHandle stdin_read, stdin_write; + ScopedHandle stdout_read, stdout_write; + SECURITY_ATTRIBUTES attrs; + attrs.nLength = sizeof(SECURITY_ATTRIBUTES); + attrs.bInheritHandle = TRUE; + attrs.lpSecurityDescriptor = nullptr; + if (!::CreatePipe(stdin_read.receive(), stdin_write.receive(), &attrs, 0) || + !::CreatePipe(stdout_read.receive(), stdout_write.receive(), &attrs, 0)) { + VReport(2, "WARNING: %s CreatePipe failed (error code: %d)\n", + SanitizerToolName, path_, GetLastError()); + return false; + } + + // Don't inherit the writing end of stdin or the reading end of stdout. + if (!SetHandleInformation(stdin_write.get(), HANDLE_FLAG_INHERIT, 0) || + !SetHandleInformation(stdout_read.get(), HANDLE_FLAG_INHERIT, 0)) { + VReport(2, "WARNING: %s SetHandleInformation failed (error code: %d)\n", + SanitizerToolName, path_, GetLastError()); + return false; + } + + // Compute the command line. Wrap double quotes around everything. + const char *argv[kArgVMax]; + GetArgV(path_, argv); + InternalScopedString command_line(kMaxPathLength * 3); + for (int i = 0; argv[i]; i++) { + const char *arg = argv[i]; + int arglen = internal_strlen(arg); + // Check that tool command lines are simple and that complete escaping is + // unnecessary. + CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported"); + CHECK(!internal_strstr(arg, "\\\\") && + "double backslashes in args unsupported"); + CHECK(arglen > 0 && arg[arglen - 1] != '\\' && + "args ending in backslash and empty args unsupported"); + command_line.append("\"%s\" ", arg); + } + VReport(3, "Launching symbolizer command: %s\n", command_line.data()); + + // Launch llvm-symbolizer with stdin and stdout redirected. + STARTUPINFOA si; + memset(&si, 0, sizeof(si)); + si.cb = sizeof(si); + si.dwFlags |= STARTF_USESTDHANDLES; + si.hStdInput = stdin_read.get(); + si.hStdOutput = stdout_write.get(); + PROCESS_INFORMATION pi; + memset(&pi, 0, sizeof(pi)); + if (!CreateProcessA(path_, // Executable + command_line.data(), // Command line + nullptr, // Process handle not inheritable + nullptr, // Thread handle not inheritable + TRUE, // Set handle inheritance to TRUE + 0, // Creation flags + nullptr, // Use parent's environment block + nullptr, // Use parent's starting directory + &si, &pi)) { + VReport(2, "WARNING: %s failed to create process for %s (error code: %d)\n", + SanitizerToolName, path_, GetLastError()); + return false; + } + + // Process creation succeeded, so transfer handle ownership into the fields. + input_fd_ = stdout_read.release(); + output_fd_ = stdin_write.release(); + + // The llvm-symbolizer process is responsible for quitting itself when the + // stdin pipe is closed, so we don't need these handles. Close them to prevent + // leaks. If we ever want to try to kill the symbolizer process from the + // parent, we'll want to hang on to these handles. + CloseHandle(pi.hProcess); + CloseHandle(pi.hThread); + return true; +} + +static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list, + LowLevelAllocator *allocator) { + if (!common_flags()->symbolize) { + VReport(2, "Symbolizer is disabled.\n"); + return; + } + + // Add llvm-symbolizer in case the binary has dwarf. + const char *user_path = common_flags()->external_symbolizer_path; + const char *path = + user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe"); + if (path) { + VReport(2, "Using llvm-symbolizer at %spath: %s\n", + user_path ? "user-specified " : "", path); + list->push_back(new(*allocator) LLVMSymbolizer(path, allocator)); + } else { + if (user_path && user_path[0] == '\0') { + VReport(2, "External symbolizer is explicitly disabled.\n"); + } else { + VReport(2, "External symbolizer is not present.\n"); + } + } + + // Add the dbghelp based symbolizer. + list->push_back(new(*allocator) WinSymbolizerTool()); +} Symbolizer *Symbolizer::PlatformInit() { - static bool called_once = false; - CHECK(!called_once && "Shouldn't create more than one symbolizer"); - called_once = true; - return new(symbolizer_allocator_) WinSymbolizer(); + IntrusiveList<SymbolizerTool> list; + list.clear(); + ChooseSymbolizerTools(&list, &symbolizer_allocator_); + + return new(symbolizer_allocator_) Symbolizer(list); } } // namespace __sanitizer diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc index 7667b753a8b..3bc50d972a4 100644 --- a/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc +++ b/libsanitizer/sanitizer_common/sanitizer_syscall_generic.inc @@ -9,13 +9,13 @@ // //===----------------------------------------------------------------------===// -#if SANITIZER_FREEBSD +#if SANITIZER_FREEBSD || SANITIZER_MAC # define SYSCALL(name) SYS_ ## name #else # define SYSCALL(name) __NR_ ## name #endif -#if SANITIZER_FREEBSD && defined(__x86_64__) +#if (SANITIZER_FREEBSD || SANITIZER_MAC) && defined(__x86_64__) # define internal_syscall __syscall # else # define internal_syscall syscall diff --git a/libsanitizer/sanitizer_common/sanitizer_syscall_linux_aarch64.inc b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_aarch64.inc new file mode 100644 index 00000000000..64d6322651a --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_syscall_linux_aarch64.inc @@ -0,0 +1,136 @@ +//===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implementations of internal_syscall and internal_iserror for Linux/aarch64. +// +//===----------------------------------------------------------------------===// + +#define SYSCALL(name) __NR_ ## name + +static uptr __internal_syscall(u64 nr) { + register u64 x8 asm("x8") = nr; + register u64 x0 asm("x0"); + asm volatile("svc 0" + : "=r"(x0) + : "r"(x8) + : "memory", "cc"); + return x0; +} +#define __internal_syscall0(n) \ + (__internal_syscall)(n) + +static uptr __internal_syscall(u64 nr, u64 arg1) { + register u64 x8 asm("x8") = nr; + register u64 x0 asm("x0") = arg1; + asm volatile("svc 0" + : "=r"(x0) + : "r"(x8), "0"(x0) + : "memory", "cc"); + return x0; +} +#define __internal_syscall1(n, a1) \ + (__internal_syscall)(n, (u64)(a1)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) { + register u64 x8 asm("x8") = nr; + register u64 x0 asm("x0") = arg1; + register u64 x1 asm("x1") = arg2; + asm volatile("svc 0" + : "=r"(x0) + : "r"(x8), "0"(x0), "r"(x1) + : "memory", "cc"); + return x0; +} +#define __internal_syscall2(n, a1, a2) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) { + register u64 x8 asm("x8") = nr; + register u64 x0 asm("x0") = arg1; + register u64 x1 asm("x1") = arg2; + register u64 x2 asm("x2") = arg3; + asm volatile("svc 0" + : "=r"(x0) + : "r"(x8), "0"(x0), "r"(x1), "r"(x2) + : "memory", "cc"); + return x0; +} +#define __internal_syscall3(n, a1, a2, a3) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4) { + register u64 x8 asm("x8") = nr; + register u64 x0 asm("x0") = arg1; + register u64 x1 asm("x1") = arg2; + register u64 x2 asm("x2") = arg3; + register u64 x3 asm("x3") = arg4; + asm volatile("svc 0" + : "=r"(x0) + : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3) + : "memory", "cc"); + return x0; +} +#define __internal_syscall4(n, a1, a2, a3, a4) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4, long arg5) { + register u64 x8 asm("x8") = nr; + register u64 x0 asm("x0") = arg1; + register u64 x1 asm("x1") = arg2; + register u64 x2 asm("x2") = arg3; + register u64 x3 asm("x3") = arg4; + register u64 x4 asm("x4") = arg5; + asm volatile("svc 0" + : "=r"(x0) + : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4) + : "memory", "cc"); + return x0; +} +#define __internal_syscall5(n, a1, a2, a3, a4, a5) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5)) + +static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3, + u64 arg4, long arg5, long arg6) { + register u64 x8 asm("x8") = nr; + register u64 x0 asm("x0") = arg1; + register u64 x1 asm("x1") = arg2; + register u64 x2 asm("x2") = arg3; + register u64 x3 asm("x3") = arg4; + register u64 x4 asm("x4") = arg5; + register u64 x5 asm("x5") = arg6; + asm volatile("svc 0" + : "=r"(x0) + : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5) + : "memory", "cc"); + return x0; +} +#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \ + (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \ + (u64)(a5), (long)(a6)) + +#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n +#define __SYSCALL_NARGS(...) \ + __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, ) +#define __SYSCALL_CONCAT_X(a, b) a##b +#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b) +#define __SYSCALL_DISP(b, ...) \ + __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__) + +#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__) + +// Helper function used to avoid cobbler errno. +bool internal_iserror(uptr retval, int *rverrno) { + if (retval >= (uptr)-4095) { + if (rverrno) + *rverrno = -retval; + return true; + } + return false; +} diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h index d5a741bbfc4..8d5f1ea3988 100644 --- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.h +++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.h @@ -77,7 +77,8 @@ class ThreadRegistry { ThreadRegistry(ThreadContextFactory factory, u32 max_threads, u32 thread_quarantine_size, u32 max_reuse = 0); - void GetNumberOfThreads(uptr *total = 0, uptr *running = 0, uptr *alive = 0); + void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr, + uptr *alive = nullptr); uptr GetMaxAliveThreads(); void Lock() { mtx_.Lock(); } @@ -140,6 +141,6 @@ class ThreadRegistry { typedef GenericScopedLock<ThreadRegistry> ThreadRegistryLock; -} // namespace __sanitizer +} // namespace __sanitizer -#endif // SANITIZER_THREAD_REGISTRY_H +#endif // SANITIZER_THREAD_REGISTRY_H diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc index af828045b59..e05c7be0799 100644 --- a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc +++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc @@ -76,7 +76,8 @@ void DTLS_Destroy() { DTLS_Deallocate(dtls.dtv, s); } -DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res) { +DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res, + uptr static_tls_begin, uptr static_tls_end) { if (!common_flags()->intercept_tls_get_addr) return 0; TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void); uptr dso_id = arg->dso_id; @@ -93,6 +94,11 @@ DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res) { tls_size = dtls.last_memalign_size; VPrintf(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n", tls_beg, tls_size); + } else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) { + // This is the static TLS block which was initialized / unpoisoned at thread + // creation. + VPrintf(2, "__tls_get_addr: static tls: %p\n", tls_beg); + tls_size = 0; } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) { // We may want to check gnu_get_libc_version(). Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1; diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h index 8dc629f630e..e4f8c0c535e 100644 --- a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h +++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.h @@ -48,7 +48,8 @@ struct DTLS { // Returns pointer and size of a linker-allocated TLS block. // Each block is returned exactly once. -DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res); +DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, uptr static_tls_begin, + uptr static_tls_end); void DTLS_on_libc_memalign(void *ptr, uptr size); DTLS *DTLS_Get(); void DTLS_Destroy(); // Make sure to call this before the thread is destroyed. diff --git a/libsanitizer/sanitizer_common/sanitizer_unwind_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc index b2ca931e9d9..408c21c913b 100644 --- a/libsanitizer/sanitizer_common/sanitizer_unwind_posix_libcdep.cc +++ b/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc @@ -1,4 +1,4 @@ -//===-- sanitizer_unwind_posix.cc ----------------------------------------===// +//===-- sanitizer_unwind_linux_libcdep.cc ---------------------------------===// // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. @@ -6,11 +6,11 @@ //===----------------------------------------------------------------------===// // // This file contains the unwind.h-based (aka "slow") stack unwinding routines -// available to the tools on Linux, Android, FreeBSD and OS X. +// available to the tools on Linux, Android, and FreeBSD. //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" -#if SANITIZER_POSIX +#if SANITIZER_FREEBSD || SANITIZER_LINUX #include "sanitizer_common.h" #include "sanitizer_stacktrace.h" @@ -80,7 +80,7 @@ void SanitizerInitializeUnwinder() { #endif uptr Unwind_GetIP(struct _Unwind_Context *ctx) { -#ifdef __arm__ +#if defined(__arm__) && !SANITIZER_MAC uptr val; _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE, 15 /* r15 = PC */, _UVRSD_UINT32, &val); @@ -94,7 +94,7 @@ uptr Unwind_GetIP(struct _Unwind_Context *ctx) { struct UnwindTraceArg { BufferedStackTrace *stack; - uptr max_depth; + u32 max_depth; }; _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { @@ -106,14 +106,19 @@ _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { return UNWIND_CONTINUE; } -void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { +void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { CHECK_GE(max_depth, 2); size = 0; UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)}; _Unwind_Backtrace(Unwind_Trace, &arg); // We need to pop a few frames so that pc is on top. uptr to_pop = LocatePcInTrace(pc); - // trace_buffer[0] belongs to the current function so we always pop it. + // trace_buffer[0] belongs to the current function so we always pop it, + // unless there is only 1 frame in the stack trace (1 frame is always better + // than 0!). + // 1-frame stacks don't normally happen, but this depends on the actual + // unwinder implementation (libgcc, libunwind, etc) which is outside of our + // control. if (to_pop == 0 && size > 1) to_pop = 1; PopStackFrames(to_pop); @@ -121,7 +126,7 @@ void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { } void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, - uptr max_depth) { + u32 max_depth) { CHECK_GE(max_depth, 2); if (!unwind_backtrace_signal_arch) { SlowUnwindStack(pc, max_depth); @@ -148,4 +153,4 @@ void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, } // namespace __sanitizer -#endif // SANITIZER_POSIX +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_win.cc index fbccef19cb1..bb84249b243 100644 --- a/libsanitizer/sanitizer_common/sanitizer_win.cc +++ b/libsanitizer/sanitizer_common/sanitizer_win.cc @@ -18,6 +18,7 @@ #include <windows.h> #include <dbghelp.h> #include <io.h> +#include <psapi.h> #include <stdlib.h> #include "sanitizer_common.h" @@ -32,7 +33,9 @@ namespace __sanitizer { // --------------------- sanitizer_common.h uptr GetPageSize() { - return 1U << 14; // FIXME: is this configurable? + // FIXME: there is an API for getting the system page size (GetSystemInfo or + // GetNativeSystemInfo), but if we use it here we get test failures elsewhere. + return 1U << 14; } uptr GetMmapGranularity() { @@ -46,7 +49,7 @@ uptr GetMaxVirtualAddress() { } bool FileExists(const char *filename) { - UNIMPLEMENTED(); + return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES; } uptr internal_getpid() { @@ -80,16 +83,15 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, void *MmapOrDie(uptr size, const char *mem_type) { void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); - if (rv == 0) { - Report("ERROR: %s failed to " - "allocate 0x%zx (%zd) bytes of %s (error code: %d)\n", - SanitizerToolName, size, size, mem_type, GetLastError()); - CHECK("unable to mmap" && 0); - } + if (rv == 0) + ReportMmapFailureAndDie(size, mem_type, GetLastError()); return rv; } void UnmapOrDie(void *addr, uptr size) { + if (!size || !addr) + return; + if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) { Report("ERROR: %s failed to " "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n", @@ -98,9 +100,10 @@ void UnmapOrDie(void *addr, uptr size) { } } -void *MmapFixedNoReserve(uptr fixed_addr, uptr size) { +void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { // FIXME: is this really "NoReserve"? On Win32 this does not matter much, // but on Win64 it does. + (void)name; // unsupported void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); if (p == 0) @@ -119,26 +122,49 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) { return MmapOrDie(size, mem_type); } -void *Mprotect(uptr fixed_addr, uptr size) { - return VirtualAlloc((LPVOID)fixed_addr, size, - MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS); +void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) { + (void)name; // unsupported + void *res = VirtualAlloc((LPVOID)fixed_addr, size, + MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS); + if (res == 0) + Report("WARNING: %s failed to " + "mprotect %p (%zd) bytes at %p (error code: %d)\n", + SanitizerToolName, size, size, fixed_addr, GetLastError()); + return res; +} + +bool MprotectNoAccess(uptr addr, uptr size) { + DWORD old_protection; + return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection); } + void FlushUnneededShadowMemory(uptr addr, uptr size) { // This is almost useless on 32-bits. - // FIXME: add madvice-analog when we move to 64-bits. + // FIXME: add madvise-analog when we move to 64-bits. +} + +void NoHugePagesInRegion(uptr addr, uptr size) { + // FIXME: probably similar to FlushUnneededShadowMemory. +} + +void DontDumpShadowMemory(uptr addr, uptr length) { + // This is almost useless on 32-bits. + // FIXME: add madvise-analog when we move to 64-bits. } bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { - // FIXME: shall we do anything here on Windows? - return true; + MEMORY_BASIC_INFORMATION mbi; + CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi))); + return mbi.Protect == PAGE_NOACCESS && + (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end; } void *MapFileToMemory(const char *file_name, uptr *buff_size) { UNIMPLEMENTED(); } -void *MapWritableFileToMemory(void *addr, uptr size, uptr fd, uptr offset) { +void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) { UNIMPLEMENTED(); } @@ -185,9 +211,50 @@ u32 GetUid() { UNIMPLEMENTED(); } +namespace { +struct ModuleInfo { + const char *filepath; + uptr base_address; + uptr end_address; +}; + +int CompareModulesBase(const void *pl, const void *pr) { + const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr; + if (l->base_address < r->base_address) + return -1; + return l->base_address > r->base_address; +} +} // namespace + +#ifndef SANITIZER_GO void DumpProcessMap() { - UNIMPLEMENTED(); + Report("Dumping process modules:\n"); + InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules); + uptr num_modules = + GetListOfModules(modules.data(), kMaxNumberOfModules, nullptr); + + InternalScopedBuffer<ModuleInfo> module_infos(num_modules); + for (size_t i = 0; i < num_modules; ++i) { + module_infos[i].filepath = modules[i].full_name(); + module_infos[i].base_address = modules[i].base_address(); + module_infos[i].end_address = modules[i].ranges().next()->end; + } + qsort(module_infos.data(), num_modules, sizeof(ModuleInfo), + CompareModulesBase); + + for (size_t i = 0; i < num_modules; ++i) { + const ModuleInfo &mi = module_infos[i]; + if (mi.end_address != 0) { + Printf("\t%p-%p %s\n", mi.base_address, mi.end_address, + mi.filepath[0] ? mi.filepath : "[no name]"); + } else if (mi.filepath[0]) { + Printf("\t??\?-??? %s\n", mi.filepath); + } else { + Printf("\t???\n"); + } + } } +#endif void DisableCoreDumperIfNecessary() { // Do nothing. @@ -198,8 +265,9 @@ void ReExec() { } void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) { - (void)args; - // Nothing here for now. +#if !SANITIZER_GO + CovPrepareForSandboxing(args); +#endif } bool StackSizeIsUnlimited() { @@ -218,9 +286,12 @@ void SetAddressSpaceUnlimited() { UNIMPLEMENTED(); } -char *FindPathToBinary(const char *name) { - // Nothing here for now. - return 0; +bool IsPathSeparator(const char c) { + return c == '\\' || c == '/'; +} + +bool IsAbsolutePath(const char *path) { + UNIMPLEMENTED(); } void SleepForSeconds(int seconds) { @@ -236,123 +307,234 @@ u64 NanoTime() { } void Abort() { - abort(); - internal__exit(-1); // abort is not NORETURN on Windows. + if (::IsDebuggerPresent()) + __debugbreak(); + internal__exit(3); +} + +// Read the file to extract the ImageBase field from the PE header. If ASLR is +// disabled and this virtual address is available, the loader will typically +// load the image at this address. Therefore, we call it the preferred base. Any +// addresses in the DWARF typically assume that the object has been loaded at +// this address. +static uptr GetPreferredBase(const char *modname) { + fd_t fd = OpenFile(modname, RdOnly, nullptr); + if (fd == kInvalidFd) + return 0; + FileCloser closer(fd); + + // Read just the DOS header. + IMAGE_DOS_HEADER dos_header; + uptr bytes_read; + if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) || + bytes_read != sizeof(dos_header)) + return 0; + + // The file should start with the right signature. + if (dos_header.e_magic != IMAGE_DOS_SIGNATURE) + return 0; + + // The layout at e_lfanew is: + // "PE\0\0" + // IMAGE_FILE_HEADER + // IMAGE_OPTIONAL_HEADER + // Seek to e_lfanew and read all that data. + char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)]; + if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) == + INVALID_SET_FILE_POINTER) + return 0; + if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) || + bytes_read != sizeof(buf)) + return 0; + + // Check for "PE\0\0" before the PE header. + char *pe_sig = &buf[0]; + if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0) + return 0; + + // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted. + IMAGE_OPTIONAL_HEADER *pe_header = + (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER)); + + // Check for more magic in the PE header. + if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC) + return 0; + + // Finally, return the ImageBase. + return (uptr)pe_header->ImageBase; } uptr GetListOfModules(LoadedModule *modules, uptr max_modules, string_predicate_t filter) { - UNIMPLEMENTED(); + HANDLE cur_process = GetCurrentProcess(); + + // Query the list of modules. Start by assuming there are no more than 256 + // modules and retry if that's not sufficient. + HMODULE *hmodules = 0; + uptr modules_buffer_size = sizeof(HMODULE) * 256; + DWORD bytes_required; + while (!hmodules) { + hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__); + CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size, + &bytes_required)); + if (bytes_required > modules_buffer_size) { + // Either there turned out to be more than 256 hmodules, or new hmodules + // could have loaded since the last try. Retry. + UnmapOrDie(hmodules, modules_buffer_size); + hmodules = 0; + modules_buffer_size = bytes_required; + } + } + + // |num_modules| is the number of modules actually present, + // |count| is the number of modules we return. + size_t nun_modules = bytes_required / sizeof(HMODULE), + count = 0; + for (size_t i = 0; i < nun_modules && count < max_modules; ++i) { + HMODULE handle = hmodules[i]; + MODULEINFO mi; + if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi))) + continue; + + // Get the UTF-16 path and convert to UTF-8. + wchar_t modname_utf16[kMaxPathLength]; + int modname_utf16_len = + GetModuleFileNameW(handle, modname_utf16, kMaxPathLength); + if (modname_utf16_len == 0) + modname_utf16[0] = '\0'; + char module_name[kMaxPathLength]; + int module_name_len = + ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1, + &module_name[0], kMaxPathLength, NULL, NULL); + module_name[module_name_len] = '\0'; + + if (filter && !filter(module_name)) + continue; + + uptr base_address = (uptr)mi.lpBaseOfDll; + uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage; + + // Adjust the base address of the module so that we get a VA instead of an + // RVA when computing the module offset. This helps llvm-symbolizer find the + // right DWARF CU. In the common case that the image is loaded at it's + // preferred address, we will now print normal virtual addresses. + uptr preferred_base = GetPreferredBase(&module_name[0]); + uptr adjusted_base = base_address - preferred_base; + + LoadedModule *cur_module = &modules[count]; + cur_module->set(module_name, adjusted_base); + // We add the whole module as one single address range. + cur_module->addAddressRange(base_address, end_address, /*executable*/ true); + count++; + } + UnmapOrDie(hmodules, modules_buffer_size); + + return count; }; #ifndef SANITIZER_GO +// We can't use atexit() directly at __asan_init time as the CRT is not fully +// initialized at this point. Place the functions into a vector and use +// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers). +InternalMmapVectorNoCtor<void (*)(void)> atexit_functions; + int Atexit(void (*function)(void)) { - return atexit(function); + atexit_functions.push_back(function); + return 0; } -#endif -// ------------------ sanitizer_libc.h -uptr internal_mmap(void *addr, uptr length, int prot, int flags, - int fd, u64 offset) { - UNIMPLEMENTED(); +static int RunAtexit() { + int ret = 0; + for (uptr i = 0; i < atexit_functions.size(); ++i) { + ret |= atexit(atexit_functions[i]); + } + return ret; } -uptr internal_munmap(void *addr, uptr length) { - UNIMPLEMENTED(); -} +#pragma section(".CRT$XID", long, read) // NOLINT +__declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit; +#endif -uptr internal_close(fd_t fd) { - UNIMPLEMENTED(); +// ------------------ sanitizer_libc.h +fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) { + fd_t res; + if (mode == RdOnly) { + res = CreateFile(filename, GENERIC_READ, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr); + } else if (mode == WrOnly) { + res = CreateFile(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS, + FILE_ATTRIBUTE_NORMAL, nullptr); + } else { + UNIMPLEMENTED(); + } + CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd); + CHECK(res != kStderrFd || kStderrFd == kInvalidFd); + if (res == kInvalidFd && last_error) + *last_error = GetLastError(); + return res; } -int internal_isatty(fd_t fd) { - return _isatty(fd); +void CloseFile(fd_t fd) { + CloseHandle(fd); } -uptr internal_open(const char *filename, int flags) { - UNIMPLEMENTED(); -} +bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read, + error_t *error_p) { + CHECK(fd != kInvalidFd); -uptr internal_open(const char *filename, int flags, u32 mode) { - UNIMPLEMENTED(); -} + // bytes_read can't be passed directly to ReadFile: + // uptr is unsigned long long on 64-bit Windows. + unsigned long num_read_long; -uptr OpenFile(const char *filename, bool write) { - UNIMPLEMENTED(); + bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr); + if (!success && error_p) + *error_p = GetLastError(); + if (bytes_read) + *bytes_read = num_read_long; + return success; } -uptr internal_read(fd_t fd, void *buf, uptr count) { - UNIMPLEMENTED(); +bool SupportsColoredOutput(fd_t fd) { + // FIXME: support colored output. + return false; } -uptr internal_write(fd_t fd, const void *buf, uptr count) { - if (fd != kStderrFd) - UNIMPLEMENTED(); +bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written, + error_t *error_p) { + CHECK(fd != kInvalidFd); - static HANDLE output_stream = 0; - // Abort immediately if we know printing is not possible. - if (output_stream == INVALID_HANDLE_VALUE) - return 0; + // Handle null optional parameters. + error_t dummy_error; + error_p = error_p ? error_p : &dummy_error; + uptr dummy_bytes_written; + bytes_written = bytes_written ? bytes_written : &dummy_bytes_written; - // If called for the first time, try to use stderr to output stuff, - // falling back to stdout if anything goes wrong. - bool fallback_to_stdout = false; - if (output_stream == 0) { - output_stream = GetStdHandle(STD_ERROR_HANDLE); - // We don't distinguish "no such handle" from error. - if (output_stream == 0) - output_stream = INVALID_HANDLE_VALUE; - - if (output_stream == INVALID_HANDLE_VALUE) { - // Retry with stdout? - output_stream = GetStdHandle(STD_OUTPUT_HANDLE); - if (output_stream == 0) - output_stream = INVALID_HANDLE_VALUE; - if (output_stream == INVALID_HANDLE_VALUE) - return 0; - } else { - // Successfully got an stderr handle. However, if WriteFile() fails, - // we can still try to fallback to stdout. - fallback_to_stdout = true; + // Initialize output parameters in case we fail. + *error_p = 0; + *bytes_written = 0; + + // Map the conventional Unix fds 1 and 2 to Windows handles. They might be + // closed, in which case this will fail. + if (fd == kStdoutFd || fd == kStderrFd) { + fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE); + if (fd == 0) { + *error_p = ERROR_INVALID_HANDLE; + return false; } } - DWORD ret; - if (WriteFile(output_stream, buf, count, &ret, 0)) - return ret; - - // Re-try with stdout if using a valid stderr handle fails. - if (fallback_to_stdout) { - output_stream = GetStdHandle(STD_OUTPUT_HANDLE); - if (output_stream == 0) - output_stream = INVALID_HANDLE_VALUE; - if (output_stream != INVALID_HANDLE_VALUE) - return internal_write(fd, buf, count); + DWORD bytes_written_32; + if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) { + *error_p = GetLastError(); + return false; + } else { + *bytes_written = bytes_written_32; + return true; } - return 0; -} - -uptr internal_stat(const char *path, void *buf) { - UNIMPLEMENTED(); -} - -uptr internal_lstat(const char *path, void *buf) { - UNIMPLEMENTED(); } -uptr internal_fstat(fd_t fd, void *buf) { - UNIMPLEMENTED(); -} - -uptr internal_filesize(fd_t fd) { - UNIMPLEMENTED(); -} - -uptr internal_dup2(int oldfd, int newfd) { - UNIMPLEMENTED(); -} - -uptr internal_readlink(const char *path, char *buf, uptr bufsize) { +bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) { UNIMPLEMENTED(); } @@ -369,10 +551,13 @@ uptr internal_ftruncate(fd_t fd, uptr size) { UNIMPLEMENTED(); } -uptr internal_rename(const char *oldpath, const char *newpath) { - UNIMPLEMENTED(); +uptr GetRSS() { + return 0; } +void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; } +void internal_join_thread(void *th) { } + // ---------------------- BlockingMutex ---------------- {{{1 const uptr LOCK_UNINITIALIZED = 0; const uptr LOCK_READY = (uptr)-1; @@ -442,7 +627,7 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, } #if !SANITIZER_GO -void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { +void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { CHECK_GE(max_depth, 2); // FIXME: CaptureStackBackTrace might be too slow for us. // FIXME: Compare with StackWalk64. @@ -458,7 +643,7 @@ void BufferedStackTrace::SlowUnwindStack(uptr pc, uptr max_depth) { } void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, - uptr max_depth) { + u32 max_depth) { CONTEXT ctx = *(CONTEXT *)context; STACKFRAME64 stack_frame; memset(&stack_frame, 0, sizeof(stack_frame)); @@ -486,15 +671,10 @@ void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, } #endif // #if !SANITIZER_GO -void MaybeOpenReportFile() { - // Windows doesn't have native fork, and we don't support Cygwin or other - // environments that try to fake it, so the initial report_fd will always be - // correct. -} - -void RawWrite(const char *buffer) { - uptr length = (uptr)internal_strlen(buffer); - if (length != internal_write(report_fd, buffer, length)) { +void ReportFile::Write(const char *buffer, uptr length) { + SpinMutexLock l(mu); + ReopenIfNecessary(); + if (!WriteToFile(fd, buffer, length)) { // stderr may be closed, but we may be able to print to the debugger // instead. This is the case when launching a program from Visual Studio, // and the following routine should write to its console. @@ -521,10 +701,62 @@ bool IsDeadlySignal(int signum) { } bool IsAccessibleMemoryRange(uptr beg, uptr size) { - // FIXME: Actually implement this function. + SYSTEM_INFO si; + GetNativeSystemInfo(&si); + uptr page_size = si.dwPageSize; + uptr page_mask = ~(page_size - 1); + + for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask; + page <= end;) { + MEMORY_BASIC_INFORMATION info; + if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info)) + return false; + + if (info.Protect == 0 || info.Protect == PAGE_NOACCESS || + info.Protect == PAGE_EXECUTE) + return false; + + if (info.RegionSize == 0) + return false; + + page += info.RegionSize; + } + return true; } +SignalContext SignalContext::Create(void *siginfo, void *context) { + EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD*)siginfo; + CONTEXT *context_record = (CONTEXT*)context; + + uptr pc = (uptr)exception_record->ExceptionAddress; +#ifdef _WIN64 + uptr bp = (uptr)context_record->Rbp; + uptr sp = (uptr)context_record->Rsp; +#else + uptr bp = (uptr)context_record->Ebp; + uptr sp = (uptr)context_record->Esp; +#endif + uptr access_addr = exception_record->ExceptionInformation[1]; + + return SignalContext(context, access_addr, pc, sp, bp); +} + +uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) { + // FIXME: Actually implement this function. + CHECK_GT(buf_len, 0); + buf[0] = 0; + return 0; +} + +uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) { + return ReadBinaryName(buf, buf_len); +} + +void CheckVMASize() { + // Do nothing. +} + } // namespace __sanitizer #endif // _WIN32 diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am index abfafb783fd..5c732cb50c3 100644 --- a/libsanitizer/tsan/Makefile.am +++ b/libsanitizer/tsan/Makefile.am @@ -3,7 +3,7 @@ AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include # May be used by toolexeclibdir. gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) -DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS +DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DCAN_SANITIZE_UB=0 AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) AM_CXXFLAGS += -std=gnu++11 @@ -25,6 +25,7 @@ tsan_files = \ tsan_mman.cc \ tsan_mutex.cc \ tsan_mutexset.cc \ + tsan_new_delete.cc \ tsan_platform_linux.cc \ tsan_platform_mac.cc \ tsan_platform_windows.cc \ diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in index 8b20d285901..bd196073e03 100644 --- a/libsanitizer/tsan/Makefile.in +++ b/libsanitizer/tsan/Makefile.in @@ -108,11 +108,11 @@ am__objects_1 = tsan_clock.lo tsan_fd.lo tsan_flags.lo \ tsan_ignoreset.lo tsan_interceptors.lo tsan_interface_ann.lo \ tsan_interface_atomic.lo tsan_interface.lo \ tsan_interface_java.lo tsan_md5.lo tsan_mman.lo tsan_mutex.lo \ - tsan_mutexset.lo tsan_platform_linux.lo tsan_platform_mac.lo \ - tsan_platform_windows.lo tsan_report.lo tsan_rtl.lo \ - tsan_rtl_mutex.lo tsan_rtl_report.lo tsan_rtl_thread.lo \ - tsan_stack_trace.lo tsan_stat.lo tsan_suppressions.lo \ - tsan_symbolize.lo tsan_sync.lo + tsan_mutexset.lo tsan_new_delete.lo tsan_platform_linux.lo \ + tsan_platform_mac.lo tsan_platform_windows.lo tsan_report.lo \ + tsan_rtl.lo tsan_rtl_mutex.lo tsan_rtl_report.lo \ + tsan_rtl_thread.lo tsan_stack_trace.lo tsan_stat.lo \ + tsan_suppressions.lo tsan_symbolize.lo tsan_sync.lo am_libtsan_la_OBJECTS = $(am__objects_1) libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS) libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ @@ -177,7 +177,7 @@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ -DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS +DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DCAN_SANITIZE_UB=0 DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ @@ -321,6 +321,7 @@ tsan_files = \ tsan_mman.cc \ tsan_mutex.cc \ tsan_mutexset.cc \ + tsan_new_delete.cc \ tsan_platform_linux.cc \ tsan_platform_mac.cc \ tsan_platform_windows.cc \ @@ -475,6 +476,7 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mman.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutex.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_mutexset.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_new_delete.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_linux.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_mac.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_platform_windows.Plo@am__quote@ diff --git a/libsanitizer/tsan/tsan_clock.cc b/libsanitizer/tsan/tsan_clock.cc index a84caa952a6..d925c9cfd83 100644 --- a/libsanitizer/tsan/tsan_clock.cc +++ b/libsanitizer/tsan/tsan_clock.cc @@ -80,7 +80,7 @@ // We don't have ThreadState in these methods, so this is an ugly hack that // works only in C++. -#ifndef TSAN_GO +#ifndef SANITIZER_GO # define CPP_STAT_INC(typ) StatInc(cur_thread(), typ) #else # define CPP_STAT_INC(typ) (void)0 @@ -102,8 +102,8 @@ ThreadClock::ThreadClock(unsigned tid, unsigned reused) } void ThreadClock::acquire(ClockCache *c, const SyncClock *src) { - DCHECK(nclk_ <= kMaxTid); - DCHECK(src->size_ <= kMaxTid); + DCHECK_LE(nclk_, kMaxTid); + DCHECK_LE(src->size_, kMaxTid); CPP_STAT_INC(StatClockAcquire); // Check if it's empty -> no need to do anything. @@ -213,8 +213,8 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) const { } void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const { - DCHECK(nclk_ <= kMaxTid); - DCHECK(dst->size_ <= kMaxTid); + DCHECK_LE(nclk_, kMaxTid); + DCHECK_LE(dst->size_, kMaxTid); CPP_STAT_INC(StatClockStore); // Check if we need to resize dst. diff --git a/libsanitizer/tsan/tsan_defs.h b/libsanitizer/tsan/tsan_defs.h index 076de73c9be..21b68356e7b 100644 --- a/libsanitizer/tsan/tsan_defs.h +++ b/libsanitizer/tsan/tsan_defs.h @@ -15,14 +15,24 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "tsan_stat.h" +#include "ubsan/ubsan_platform.h" -#ifndef TSAN_DEBUG -#define TSAN_DEBUG 0 -#endif // TSAN_DEBUG +// Setup defaults for compile definitions. +#ifndef TSAN_NO_HISTORY +# define TSAN_NO_HISTORY 0 +#endif + +#ifndef TSAN_COLLECT_STATS +# define TSAN_COLLECT_STATS 0 +#endif + +#ifndef TSAN_CONTAINS_UBSAN +# define TSAN_CONTAINS_UBSAN (CAN_SANITIZE_UB && !defined(SANITIZER_GO)) +#endif namespace __tsan { -#ifdef TSAN_GO +#ifdef SANITIZER_GO const bool kGoMode = true; const bool kCppMode = false; const char *const kTsanOptionsEnv = "GORACE"; @@ -37,23 +47,17 @@ const char *const kTsanOptionsEnv = "TSAN_OPTIONS"; const int kTidBits = 13; const unsigned kMaxTid = 1 << kTidBits; +#ifndef SANITIZER_GO const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit. +#else +const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory. +#endif const int kClkBits = 42; const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1; const uptr kShadowStackSize = 64 * 1024; -#ifdef TSAN_SHADOW_COUNT -# if TSAN_SHADOW_COUNT == 2 \ - || TSAN_SHADOW_COUNT == 4 || TSAN_SHADOW_COUNT == 8 -const uptr kShadowCnt = TSAN_SHADOW_COUNT; -# else -# error "TSAN_SHADOW_COUNT must be one of 2,4,8" -# endif -#else // Count of shadow values in a shadow cell. -#define TSAN_SHADOW_COUNT 4 const uptr kShadowCnt = 4; -#endif // That many user bytes are mapped onto a single shadow cell. const uptr kShadowCell = 8; @@ -71,22 +75,16 @@ const uptr kMetaShadowCell = 8; // Size of a single meta shadow value (u32). const uptr kMetaShadowSize = 4; -#if defined(TSAN_NO_HISTORY) && TSAN_NO_HISTORY +#if TSAN_NO_HISTORY const bool kCollectHistory = false; #else const bool kCollectHistory = true; #endif -#if defined(TSAN_COLLECT_STATS) && TSAN_COLLECT_STATS -const bool kCollectStats = true; -#else -const bool kCollectStats = false; -#endif - // The following "build consistency" machinery ensures that all source files // are built in the same configuration. Inconsistent builds lead to // hard to debug crashes. -#if TSAN_DEBUG +#if SANITIZER_DEBUG void build_consistency_debug(); #else void build_consistency_release(); @@ -98,18 +96,8 @@ void build_consistency_stats(); void build_consistency_nostats(); #endif -#if TSAN_SHADOW_COUNT == 1 -void build_consistency_shadow1(); -#elif TSAN_SHADOW_COUNT == 2 -void build_consistency_shadow2(); -#elif TSAN_SHADOW_COUNT == 4 -void build_consistency_shadow4(); -#else -void build_consistency_shadow8(); -#endif - static inline void USED build_consistency() { -#if TSAN_DEBUG +#if SANITIZER_DEBUG build_consistency_debug(); #else build_consistency_release(); @@ -119,15 +107,6 @@ static inline void USED build_consistency() { #else build_consistency_nostats(); #endif -#if TSAN_SHADOW_COUNT == 1 - build_consistency_shadow1(); -#elif TSAN_SHADOW_COUNT == 2 - build_consistency_shadow2(); -#elif TSAN_SHADOW_COUNT == 4 - build_consistency_shadow4(); -#else - build_consistency_shadow8(); -#endif } template<typename T> diff --git a/libsanitizer/tsan/tsan_fd.cc b/libsanitizer/tsan/tsan_fd.cc index 10582035f24..8f75a28200d 100644 --- a/libsanitizer/tsan/tsan_fd.cc +++ b/libsanitizer/tsan/tsan_fd.cc @@ -89,7 +89,8 @@ static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) { } // pd must be already ref'ed. -static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) { +static void init(ThreadState *thr, uptr pc, int fd, FdSync *s, + bool write = true) { FdDesc *d = fddesc(thr, pc, fd); // As a matter of fact, we don't intercept all close calls. // See e.g. libc __res_iclose(). @@ -107,8 +108,13 @@ static void init(ThreadState *thr, uptr pc, int fd, FdSync *s) { } d->creation_tid = thr->tid; d->creation_stack = CurrentStackId(thr, pc); - // To catch races between fd usage and open. - MemoryRangeImitateWrite(thr, pc, (uptr)d, 8); + if (write) { + // To catch races between fd usage and open. + MemoryRangeImitateWrite(thr, pc, (uptr)d, 8); + } else { + // See the dup-related comment in FdClose. + MemoryRead(thr, pc, (uptr)d, kSizeLog8); + } } void FdInit() { @@ -179,13 +185,25 @@ void FdAccess(ThreadState *thr, uptr pc, int fd) { MemoryRead(thr, pc, (uptr)d, kSizeLog8); } -void FdClose(ThreadState *thr, uptr pc, int fd) { +void FdClose(ThreadState *thr, uptr pc, int fd, bool write) { DPrintf("#%d: FdClose(%d)\n", thr->tid, fd); if (bogusfd(fd)) return; FdDesc *d = fddesc(thr, pc, fd); - // To catch races between fd usage and close. - MemoryWrite(thr, pc, (uptr)d, kSizeLog8); + if (write) { + // To catch races between fd usage and close. + MemoryWrite(thr, pc, (uptr)d, kSizeLog8); + } else { + // This path is used only by dup2/dup3 calls. + // We do read instead of write because there is a number of legitimate + // cases where write would lead to false positives: + // 1. Some software dups a closed pipe in place of a socket before closing + // the socket (to prevent races actually). + // 2. Some daemons dup /dev/null in place of stdin/stdout. + // On the other hand we have not seen cases when write here catches real + // bugs. + MemoryRead(thr, pc, (uptr)d, kSizeLog8); + } // We need to clear it, because if we do not intercept any call out there // that creates fd, we will hit false postives. MemoryResetRange(thr, pc, (uptr)d, 8); @@ -202,15 +220,15 @@ void FdFileCreate(ThreadState *thr, uptr pc, int fd) { init(thr, pc, fd, &fdctx.filesync); } -void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd) { +void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) { DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd); if (bogusfd(oldfd) || bogusfd(newfd)) return; // Ignore the case when user dups not yet connected socket. FdDesc *od = fddesc(thr, pc, oldfd); MemoryRead(thr, pc, (uptr)od, kSizeLog8); - FdClose(thr, pc, newfd); - init(thr, pc, newfd, ref(od->sync)); + FdClose(thr, pc, newfd, write); + init(thr, pc, newfd, ref(od->sync), write); } void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) { diff --git a/libsanitizer/tsan/tsan_fd.h b/libsanitizer/tsan/tsan_fd.h index 57ae62cfe8b..4d9236c9903 100644 --- a/libsanitizer/tsan/tsan_fd.h +++ b/libsanitizer/tsan/tsan_fd.h @@ -40,9 +40,9 @@ void FdInit(); void FdAcquire(ThreadState *thr, uptr pc, int fd); void FdRelease(ThreadState *thr, uptr pc, int fd); void FdAccess(ThreadState *thr, uptr pc, int fd); -void FdClose(ThreadState *thr, uptr pc, int fd); +void FdClose(ThreadState *thr, uptr pc, int fd, bool write = true); void FdFileCreate(ThreadState *thr, uptr pc, int fd); -void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd); +void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write); void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd); void FdEventCreate(ThreadState *thr, uptr pc, int fd); void FdSignalCreate(ThreadState *thr, uptr pc, int fd); diff --git a/libsanitizer/tsan/tsan_flags.cc b/libsanitizer/tsan/tsan_flags.cc index 6059f2771ba..3e89375187d 100644 --- a/libsanitizer/tsan/tsan_flags.cc +++ b/libsanitizer/tsan/tsan_flags.cc @@ -10,10 +10,12 @@ //===----------------------------------------------------------------------===// #include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" #include "sanitizer_common/sanitizer_libc.h" #include "tsan_flags.h" #include "tsan_rtl.h" #include "tsan_mman.h" +#include "ubsan/ubsan_flags.h" namespace __tsan { @@ -31,80 +33,67 @@ const char *WEAK __tsan_default_options() { } #endif -static void ParseFlags(Flags *f, const char *env) { - ParseFlag(env, &f->enable_annotations, "enable_annotations", ""); - ParseFlag(env, &f->suppress_equal_stacks, "suppress_equal_stacks", ""); - ParseFlag(env, &f->suppress_equal_addresses, "suppress_equal_addresses", ""); - ParseFlag(env, &f->report_bugs, "report_bugs", ""); - ParseFlag(env, &f->report_thread_leaks, "report_thread_leaks", ""); - ParseFlag(env, &f->report_destroy_locked, "report_destroy_locked", ""); - ParseFlag(env, &f->report_mutex_bugs, "report_mutex_bugs", ""); - ParseFlag(env, &f->report_signal_unsafe, "report_signal_unsafe", ""); - ParseFlag(env, &f->report_atomic_races, "report_atomic_races", ""); - ParseFlag(env, &f->force_seq_cst_atomics, "force_seq_cst_atomics", ""); - ParseFlag(env, &f->print_benign, "print_benign", ""); - ParseFlag(env, &f->exitcode, "exitcode", ""); - ParseFlag(env, &f->halt_on_error, "halt_on_error", ""); - ParseFlag(env, &f->atexit_sleep_ms, "atexit_sleep_ms", ""); - ParseFlag(env, &f->profile_memory, "profile_memory", ""); - ParseFlag(env, &f->flush_memory_ms, "flush_memory_ms", ""); - ParseFlag(env, &f->flush_symbolizer_ms, "flush_symbolizer_ms", ""); - ParseFlag(env, &f->memory_limit_mb, "memory_limit_mb", ""); - ParseFlag(env, &f->stop_on_start, "stop_on_start", ""); - ParseFlag(env, &f->running_on_valgrind, "running_on_valgrind", ""); - ParseFlag(env, &f->history_size, "history_size", ""); - ParseFlag(env, &f->io_sync, "io_sync", ""); - ParseFlag(env, &f->die_after_fork, "die_after_fork", ""); +void Flags::SetDefaults() { +#define TSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "tsan_flags.inc" +#undef TSAN_FLAG + // DDFlags + second_deadlock_stack = false; +} +void RegisterTsanFlags(FlagParser *parser, Flags *f) { +#define TSAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "tsan_flags.inc" +#undef TSAN_FLAG // DDFlags - ParseFlag(env, &f->second_deadlock_stack, "second_deadlock_stack", ""); + RegisterFlag(parser, "second_deadlock_stack", + "Report where each mutex is locked in deadlock reports", + &f->second_deadlock_stack); } void InitializeFlags(Flags *f, const char *env) { - internal_memset(f, 0, sizeof(*f)); - - // Default values. - f->enable_annotations = true; - f->suppress_equal_stacks = true; - f->suppress_equal_addresses = true; - f->report_bugs = true; - f->report_thread_leaks = true; - f->report_destroy_locked = true; - f->report_mutex_bugs = true; - f->report_signal_unsafe = true; - f->report_atomic_races = true; - f->force_seq_cst_atomics = false; - f->print_benign = false; - f->exitcode = 66; - f->halt_on_error = false; - f->atexit_sleep_ms = 1000; - f->profile_memory = ""; - f->flush_memory_ms = 0; - f->flush_symbolizer_ms = 5000; - f->memory_limit_mb = 0; - f->stop_on_start = false; - f->running_on_valgrind = false; - f->history_size = kGoMode ? 1 : 2; // There are a lot of goroutines in Go. - f->io_sync = 1; - f->die_after_fork = true; + SetCommonFlagsDefaults(); + { + // Override some common flags defaults. + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.allow_addr2line = true; +#ifndef SANITIZER_GO + cf.detect_deadlocks = true; +#endif + cf.print_suppressions = false; + cf.stack_trace_format = " #%n %f %S %M"; + cf.exitcode = 66; + OverrideCommonFlags(cf); + } - // DDFlags - f->second_deadlock_stack = false; + f->SetDefaults(); + + FlagParser parser; + RegisterTsanFlags(&parser, f); + RegisterCommonFlags(&parser); - CommonFlags *cf = common_flags(); - SetCommonFlagsDefaults(cf); - // Override some common flags defaults. - cf->allow_addr2line = true; - cf->detect_deadlocks = true; - cf->print_suppressions = false; - cf->stack_trace_format = " #%n %f %S %M"; +#if TSAN_CONTAINS_UBSAN + __ubsan::Flags *uf = __ubsan::flags(); + uf->SetDefaults(); + + FlagParser ubsan_parser; + __ubsan::RegisterUbsanFlags(&ubsan_parser, uf); + RegisterCommonFlags(&ubsan_parser); +#endif // Let a frontend override. - ParseFlags(f, __tsan_default_options()); - ParseCommonFlagsFromString(cf, __tsan_default_options()); + parser.ParseString(__tsan_default_options()); +#if TSAN_CONTAINS_UBSAN + const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions(); + ubsan_parser.ParseString(ubsan_default_options); +#endif // Override from command line. - ParseFlags(f, env); - ParseCommonFlagsFromString(cf, env); + parser.ParseString(env); +#if TSAN_CONTAINS_UBSAN + ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS")); +#endif // Sanity check. if (!f->report_bugs) { @@ -113,7 +102,11 @@ void InitializeFlags(Flags *f, const char *env) { f->report_signal_unsafe = false; } - if (cf->help) PrintFlagDescriptions(); + SetVerbosity(common_flags()->verbosity); + + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) parser.PrintFlagDescriptions(); if (f->history_size < 0 || f->history_size > 7) { Printf("ThreadSanitizer: incorrect value for history_size" diff --git a/libsanitizer/tsan/tsan_flags.h b/libsanitizer/tsan/tsan_flags.h index 182d9b298af..3d58ff3793e 100644 --- a/libsanitizer/tsan/tsan_flags.h +++ b/libsanitizer/tsan/tsan_flags.h @@ -18,65 +18,12 @@ namespace __tsan { struct Flags : DDFlags { - // Enable dynamic annotations, otherwise they are no-ops. - bool enable_annotations; - // Suppress a race report if we've already output another race report - // with the same stack. - bool suppress_equal_stacks; - // Suppress a race report if we've already output another race report - // on the same address. - bool suppress_equal_addresses; - // Turns off bug reporting entirely (useful for benchmarking). - bool report_bugs; - // Report thread leaks at exit? - bool report_thread_leaks; - // Report destruction of a locked mutex? - bool report_destroy_locked; - // Report incorrect usages of mutexes and mutex annotations? - bool report_mutex_bugs; - // Report violations of async signal-safety - // (e.g. malloc() call from a signal handler). - bool report_signal_unsafe; - // Report races between atomic and plain memory accesses. - bool report_atomic_races; - // If set, all atomics are effectively sequentially consistent (seq_cst), - // regardless of what user actually specified. - bool force_seq_cst_atomics; - // Print matched "benign" races at exit. - bool print_benign; - // Override exit status if something was reported. - int exitcode; - // Exit after first reported error. - bool halt_on_error; - // Sleep in main thread before exiting for that many ms - // (useful to catch "at exit" races). - int atexit_sleep_ms; - // If set, periodically write memory profile to that file. - const char *profile_memory; - // Flush shadow memory every X ms. - int flush_memory_ms; - // Flush symbolizer caches every X ms. - int flush_symbolizer_ms; - // Resident memory limit in MB to aim at. - // If the process consumes more memory, then TSan will flush shadow memory. - int memory_limit_mb; - // Stops on start until __tsan_resume() is called (for debugging). - bool stop_on_start; - // Controls whether RunningOnValgrind() returns true or false. - bool running_on_valgrind; - // Per-thread history size, controls how many previous memory accesses - // are remembered per thread. Possible values are [0..7]. - // history_size=0 amounts to 32K memory accesses. Each next value doubles - // the amount of memory accesses, up to history_size=7 that amounts to - // 4M memory accesses. The default value is 2 (128K memory accesses). - int history_size; - // Controls level of synchronization implied by IO operations. - // 0 - no synchronization - // 1 - reasonable level of synchronization (write->read) - // 2 - global synchronization of all IO operations - int io_sync; - // Die after multi-threaded fork if the child creates new threads. - bool die_after_fork; +#define TSAN_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "tsan_flags.inc" +#undef TSAN_FLAG + + void SetDefaults(); + void ParseFromString(const char *str); }; Flags *flags(); diff --git a/libsanitizer/tsan/tsan_flags.inc b/libsanitizer/tsan/tsan_flags.inc new file mode 100644 index 00000000000..822e560b622 --- /dev/null +++ b/libsanitizer/tsan/tsan_flags.inc @@ -0,0 +1,76 @@ +//===-- tsan_flags.inc ------------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// TSan runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef TSAN_FLAG +# error "Define TSAN_FLAG prior to including this file!" +#endif + +// TSAN_FLAG(Type, Name, DefaultValue, Description) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +TSAN_FLAG(bool, enable_annotations, true, + "Enable dynamic annotations, otherwise they are no-ops.") +// Suppress a race report if we've already output another race report +// with the same stack. +TSAN_FLAG(bool, suppress_equal_stacks, true, + "Suppress a race report if we've already output another race report " + "with the same stack.") +TSAN_FLAG(bool, suppress_equal_addresses, true, + "Suppress a race report if we've already output another race report " + "on the same address.") + +TSAN_FLAG(bool, report_bugs, true, + "Turns off bug reporting entirely (useful for benchmarking).") +TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?") +TSAN_FLAG(bool, report_destroy_locked, true, + "Report destruction of a locked mutex?") +TSAN_FLAG(bool, report_mutex_bugs, true, + "Report incorrect usages of mutexes and mutex annotations?") +TSAN_FLAG(bool, report_signal_unsafe, true, + "Report violations of async signal-safety " + "(e.g. malloc() call from a signal handler).") +TSAN_FLAG(bool, report_atomic_races, true, + "Report races between atomic and plain memory accesses.") +TSAN_FLAG( + bool, force_seq_cst_atomics, false, + "If set, all atomics are effectively sequentially consistent (seq_cst), " + "regardless of what user actually specified.") +TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.") +TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.") +TSAN_FLAG(int, atexit_sleep_ms, 1000, + "Sleep in main thread before exiting for that many ms " + "(useful to catch \"at exit\" races).") +TSAN_FLAG(const char *, profile_memory, "", + "If set, periodically write memory profile to that file.") +TSAN_FLAG(int, flush_memory_ms, 0, "Flush shadow memory every X ms.") +TSAN_FLAG(int, flush_symbolizer_ms, 5000, "Flush symbolizer caches every X ms.") +TSAN_FLAG( + int, memory_limit_mb, 0, + "Resident memory limit in MB to aim at." + "If the process consumes more memory, then TSan will flush shadow memory.") +TSAN_FLAG(bool, stop_on_start, false, + "Stops on start until __tsan_resume() is called (for debugging).") +TSAN_FLAG(bool, running_on_valgrind, false, + "Controls whether RunningOnValgrind() returns true or false.") +TSAN_FLAG( + int, history_size, kGoMode ? 1 : 3, // There are a lot of goroutines in Go. + "Per-thread history size, controls how many previous memory accesses " + "are remembered per thread. Possible values are [0..7]. " + "history_size=0 amounts to 32K memory accesses. Each next value doubles " + "the amount of memory accesses, up to history_size=7 that amounts to " + "4M memory accesses. The default value is 2 (128K memory accesses).") +TSAN_FLAG(int, io_sync, 1, + "Controls level of synchronization implied by IO operations. " + "0 - no synchronization " + "1 - reasonable level of synchronization (write->read)" + "2 - global synchronization of all IO operations.") +TSAN_FLAG(bool, die_after_fork, true, + "Die after multi-threaded fork if the child creates new threads.") +TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.") diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc index 7f1b6e45a8d..16465b96b6c 100644 --- a/libsanitizer/tsan/tsan_interceptors.cc +++ b/libsanitizer/tsan/tsan_interceptors.cc @@ -18,6 +18,7 @@ #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "interception/interception.h" +#include "tsan_interceptors.h" #include "tsan_interface.h" #include "tsan_platform.h" #include "tsan_suppressions.h" @@ -29,25 +30,46 @@ using namespace __tsan; // NOLINT #if SANITIZER_FREEBSD #define __errno_location __error -#define __libc_malloc __malloc #define __libc_realloc __realloc #define __libc_calloc __calloc -#define __libc_free __free #define stdout __stdoutp #define stderr __stderrp #endif +#if SANITIZER_LINUX || SANITIZER_FREEBSD +#define PTHREAD_CREATE_DETACHED 1 +#elif SANITIZER_MAC +#define PTHREAD_CREATE_DETACHED 2 +#endif + + +#ifdef __mips__ +const int kSigCount = 129; +#else const int kSigCount = 65; +#endif struct my_siginfo_t { // The size is determined by looking at sizeof of real siginfo_t on linux. u64 opaque[128 / sizeof(u64)]; }; +#ifdef __mips__ +struct ucontext_t { + u64 opaque[768 / sizeof(u64) + 1]; +}; +#else struct ucontext_t { // The size is determined by looking at sizeof of real ucontext_t on linux. u64 opaque[936 / sizeof(u64) + 1]; }; +#endif + +#if defined(__x86_64__) || defined(__mips__) +#define PTHREAD_ABI_BASE "GLIBC_2.3.2" +#elif defined(__aarch64__) +#define PTHREAD_ABI_BASE "GLIBC_2.17" +#endif extern "C" int pthread_attr_init(void *attr); extern "C" int pthread_attr_destroy(void *attr); @@ -66,10 +88,9 @@ extern "C" void *pthread_self(); extern "C" void _exit(int status); extern "C" int *__errno_location(); extern "C" int fileno_unlocked(void *stream); -extern "C" void *__libc_malloc(uptr size); extern "C" void *__libc_calloc(uptr size, uptr n); extern "C" void *__libc_realloc(void *ptr, uptr size); -extern "C" void __libc_free(void *ptr); +extern "C" int dirfd(void *dirp); #if !SANITIZER_FREEBSD extern "C" int mallopt(int param, int value); #endif @@ -86,8 +107,13 @@ const int SIGFPE = 8; const int SIGSEGV = 11; const int SIGPIPE = 13; const int SIGTERM = 15; +#ifdef __mips__ +const int SIGBUS = 10; +const int SIGSYS = 12; +#else const int SIGBUS = 7; const int SIGSYS = 31; +#endif void *const MAP_FAILED = (void*)-1; const int PTHREAD_BARRIER_SERIAL_THREAD = -1; const int MAP_FIXED = 0x10; @@ -99,21 +125,27 @@ typedef long long_t; // NOLINT # define F_TLOCK 2 /* Test and lock a region for exclusive use. */ # define F_TEST 3 /* Test a region for other processes locks. */ -typedef void (*sighandler_t)(int sig); - #define errno (*__errno_location()) +typedef void (*sighandler_t)(int sig); +typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx); + struct sigaction_t { +#ifdef __mips__ + u32 sa_flags; +#endif union { sighandler_t sa_handler; - void (*sa_sigaction)(int sig, my_siginfo_t *siginfo, void *uctx); + sigactionhandler_t sa_sigaction; }; #if SANITIZER_FREEBSD int sa_flags; __sanitizer_sigset_t sa_mask; #else __sanitizer_sigset_t sa_mask; +#ifndef __mips__ int sa_flags; +#endif void (*sa_restorer)(); #endif }; @@ -121,12 +153,19 @@ struct sigaction_t { const sighandler_t SIG_DFL = (sighandler_t)0; const sighandler_t SIG_IGN = (sighandler_t)1; const sighandler_t SIG_ERR = (sighandler_t)-1; +#if SANITIZER_FREEBSD +const int SA_SIGINFO = 0x40; +const int SIG_SETMASK = 3; +#elif defined(__mips__) +const int SA_SIGINFO = 8; +const int SIG_SETMASK = 3; +#else const int SA_SIGINFO = 4; const int SIG_SETMASK = 2; +#endif -namespace std { -struct nothrow_t {}; -} // namespace std +#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \ + (!cur_thread()->is_inited) static sigaction_t sigactions[kSigCount]; @@ -138,7 +177,7 @@ struct SignalDesc { ucontext_t ctx; }; -struct SignalContext { +struct ThreadSignalContext { int int_signal_send; atomic_uintptr_t in_blocking_func; atomic_uintptr_t have_pending_signals; @@ -153,16 +192,22 @@ static LibIgnore *libignore() { } void InitializeLibIgnore() { - libignore()->Init(*SuppressionContext::Get()); + const SuppressionContext &supp = *Suppressions(); + const uptr n = supp.SuppressionCount(); + for (uptr i = 0; i < n; i++) { + const Suppression *s = supp.SuppressionAt(i); + if (0 == internal_strcmp(s->type, kSuppressionLib)) + libignore()->AddIgnoredLibrary(s->templ); + } libignore()->OnLibraryLoaded(0); } } // namespace __tsan -static SignalContext *SigCtx(ThreadState *thr) { - SignalContext *ctx = (SignalContext*)thr->signal_ctx; +static ThreadSignalContext *SigCtx(ThreadState *thr) { + ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx; if (ctx == 0 && !thr->is_dead) { - ctx = (SignalContext*)MmapOrDie(sizeof(*ctx), "SignalContext"); + ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext"); MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); thr->signal_ctx = ctx; } @@ -171,16 +216,6 @@ static SignalContext *SigCtx(ThreadState *thr) { static unsigned g_thread_finalize_key; -class ScopedInterceptor { - public: - ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc); - ~ScopedInterceptor(); - private: - ThreadState *const thr_; - const uptr pc_; - bool in_ignored_lib_; -}; - ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc) : thr_(thr) @@ -210,14 +245,6 @@ ScopedInterceptor::~ScopedInterceptor() { } } -#define SCOPED_INTERCEPTOR_RAW(func, ...) \ - ThreadState *thr = cur_thread(); \ - const uptr caller_pc = GET_CALLER_PC(); \ - ScopedInterceptor si(thr, #func, caller_pc); \ - const uptr pc = StackTrace::GetCurrentPc(); \ - (void)pc; \ -/**/ - #define SCOPED_TSAN_INTERCEPTOR(func, ...) \ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ if (REAL(func) == 0) { \ @@ -236,6 +263,13 @@ ScopedInterceptor::~ScopedInterceptor() { # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) #endif +#define READ_STRING_OF_LEN(thr, pc, s, len, n) \ + MemoryAccessRange((thr), (pc), (uptr)(s), \ + common_flags()->strict_string_checks ? (len) + 1 : (n), false) + +#define READ_STRING(thr, pc, s, n) \ + READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n)) + #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name)) struct BlockingCall { @@ -263,7 +297,7 @@ struct BlockingCall { } ThreadState *thr; - SignalContext *ctx; + ThreadSignalContext *ctx; }; TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) { @@ -375,7 +409,7 @@ static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { } static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { - if (thr->shadow_stack_pos == 0) // called from libc guts during bootstrap + if (!thr->is_inited) // called from libc guts during bootstrap return; // Cleanup old bufs. JmpBufGarbageCollect(thr, sp); @@ -384,7 +418,7 @@ static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { buf->sp = sp; buf->mangled_sp = mangled_sp; buf->shadow_stack_pos = thr->shadow_stack_pos; - SignalContext *sctx = SigCtx(thr); + ThreadSignalContext *sctx = SigCtx(thr); buf->int_signal_send = sctx ? sctx->int_signal_send : 0; buf->in_blocking_func = sctx ? atomic_load(&sctx->in_blocking_func, memory_order_relaxed) : @@ -407,7 +441,7 @@ static void LongJmp(ThreadState *thr, uptr *env) { // Unwind the stack. while (thr->shadow_stack_pos > buf->shadow_stack_pos) FuncExit(thr); - SignalContext *sctx = SigCtx(thr); + ThreadSignalContext *sctx = SigCtx(thr); if (sctx) { sctx->int_signal_send = buf->int_signal_send; atomic_store(&sctx->in_blocking_func, buf->in_blocking_func, @@ -503,14 +537,10 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { if (cur_thread()->in_symbolizer) return __libc_calloc(size, n); - if (__sanitizer::CallocShouldReturnNullDueToOverflow(size, n)) - return AllocatorReturnNull(); void *p = 0; { SCOPED_INTERCEPTOR_RAW(calloc, size, n); - p = user_alloc(thr, pc, n * size); - if (p) - internal_memset(p, 0, n * size); + p = user_calloc(thr, pc, size, n); } invoke_malloc_hook(p, n * size); return p; @@ -554,73 +584,6 @@ TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { return user_alloc_usable_size(p); } -#define OPERATOR_NEW_BODY(mangled_name) \ - if (cur_thread()->in_symbolizer) \ - return __libc_malloc(size); \ - void *p = 0; \ - { \ - SCOPED_INTERCEPTOR_RAW(mangled_name, size); \ - p = user_alloc(thr, pc, size); \ - } \ - invoke_malloc_hook(p, size); \ - return p; - -SANITIZER_INTERFACE_ATTRIBUTE -void *operator new(__sanitizer::uptr size); -void *operator new(__sanitizer::uptr size) { - OPERATOR_NEW_BODY(_Znwm); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *operator new[](__sanitizer::uptr size); -void *operator new[](__sanitizer::uptr size) { - OPERATOR_NEW_BODY(_Znam); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *operator new(__sanitizer::uptr size, std::nothrow_t const&); -void *operator new(__sanitizer::uptr size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void *operator new[](__sanitizer::uptr size, std::nothrow_t const&); -void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t); -} - -#define OPERATOR_DELETE_BODY(mangled_name) \ - if (ptr == 0) return; \ - if (cur_thread()->in_symbolizer) \ - return __libc_free(ptr); \ - invoke_free_hook(ptr); \ - SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \ - user_free(thr, pc, ptr); - -SANITIZER_INTERFACE_ATTRIBUTE -void operator delete(void *ptr) throw(); -void operator delete(void *ptr) throw() { - OPERATOR_DELETE_BODY(_ZdlPv); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void operator delete[](void *ptr) throw(); -void operator delete[](void *ptr) throw() { - OPERATOR_DELETE_BODY(_ZdaPv); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void operator delete(void *ptr, std::nothrow_t const&); -void operator delete(void *ptr, std::nothrow_t const&) { - OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t); -} - -SANITIZER_INTERFACE_ATTRIBUTE -void operator delete[](void *ptr, std::nothrow_t const&); -void operator delete[](void *ptr, std::nothrow_t const&) { - OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t); -} - TSAN_INTERCEPTOR(uptr, strlen, const char *s) { SCOPED_TSAN_INTERCEPTOR(strlen, s); uptr len = internal_strlen(s); @@ -629,29 +592,22 @@ TSAN_INTERCEPTOR(uptr, strlen, const char *s) { } TSAN_INTERCEPTOR(void*, memset, void *dst, int v, uptr size) { - SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size); - MemoryAccessRange(thr, pc, (uptr)dst, size, true); + // On FreeBSD we get here from libthr internals on thread initialization. + if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { + SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size); + MemoryAccessRange(thr, pc, (uptr)dst, size, true); + } return internal_memset(dst, v, size); } TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) { - SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size); - MemoryAccessRange(thr, pc, (uptr)dst, size, true); - MemoryAccessRange(thr, pc, (uptr)src, size, false); - return internal_memcpy(dst, src, size); -} - -TSAN_INTERCEPTOR(int, memcmp, const void *s1, const void *s2, uptr n) { - SCOPED_TSAN_INTERCEPTOR(memcmp, s1, s2, n); - int res = 0; - uptr len = 0; - for (; len < n; len++) { - if ((res = ((unsigned char*)s1)[len] - ((unsigned char*)s2)[len])) - break; + // On FreeBSD we get here from libthr internals on thread initialization. + if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { + SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size); + MemoryAccessRange(thr, pc, (uptr)dst, size, true); + MemoryAccessRange(thr, pc, (uptr)src, size, false); } - MemoryAccessRange(thr, pc, (uptr)s1, len < n ? len + 1 : n, false); - MemoryAccessRange(thr, pc, (uptr)s2, len < n ? len + 1 : n, false); - return res; + return internal_memcpy(dst, src, size); } TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) { @@ -664,8 +620,9 @@ TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) { TSAN_INTERCEPTOR(char*, strchr, char *s, int c) { SCOPED_TSAN_INTERCEPTOR(strchr, s, c); char *res = REAL(strchr)(s, c); - uptr len = res ? (char*)res - (char*)s + 1 : internal_strlen(s) + 1; - MemoryAccessRange(thr, pc, (uptr)s, len, false); + uptr len = internal_strlen(s); + uptr n = res ? (char*)res - (char*)s + 1 : len + 1; + READ_STRING_OF_LEN(thr, pc, s, len, n); return res; } @@ -673,7 +630,7 @@ TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) { SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c); char *res = REAL(strchrnul)(s, c); uptr len = (char*)res - (char*)s + 1; - MemoryAccessRange(thr, pc, (uptr)s, len, false); + READ_STRING(thr, pc, s, len); return res; } @@ -699,16 +656,6 @@ TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) { return REAL(strncpy)(dst, src, n); } -TSAN_INTERCEPTOR(const char*, strstr, const char *s1, const char *s2) { - SCOPED_TSAN_INTERCEPTOR(strstr, s1, s2); - const char *res = REAL(strstr)(s1, s2); - uptr len1 = internal_strlen(s1); - uptr len2 = internal_strlen(s2); - MemoryAccessRange(thr, pc, (uptr)s1, len1 + 1, false); - MemoryAccessRange(thr, pc, (uptr)s2, len2 + 1, false); - return res; -} - TSAN_INTERCEPTOR(char*, strdup, const char *str) { SCOPED_TSAN_INTERCEPTOR(strdup, str); // strdup will call malloc, so no instrumentation is required here. @@ -764,7 +711,11 @@ TSAN_INTERCEPTOR(void*, mmap64, void *addr, long_t sz, int prot, TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); - DontNeedShadowFor((uptr)addr, sz); + if (sz != 0) { + // If sz == 0, munmap will return EINVAL and don't unmap any memory. + DontNeedShadowFor((uptr)addr, sz); + ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz); + } int res = REAL(munmap)(addr, sz); return res; } @@ -846,7 +797,7 @@ static void thread_finalize(void *v) { { ThreadState *thr = cur_thread(); ThreadFinish(thr); - SignalContext *sctx = thr->signal_ctx; + ThreadSignalContext *sctx = thr->signal_ctx; if (sctx) { thr->signal_ctx = 0; UnmapOrDie(sctx, sizeof(*sctx)); @@ -872,15 +823,15 @@ extern "C" void *__tsan_thread_start_func(void *arg) { ScopedIgnoreInterceptors ignore; ThreadIgnoreBegin(thr, 0); if (pthread_setspecific(g_thread_finalize_key, - (void *)kPthreadDestructorIterations)) { + (void *)GetPthreadDestructorIterations())) { Printf("ThreadSanitizer: failed to set thread key\n"); Die(); } ThreadIgnoreEnd(thr, 0); while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) pthread_yield(); - atomic_store(&p->tid, 0, memory_order_release); ThreadStart(thr, tid, GetTid()); + atomic_store(&p->tid, 0, memory_order_release); } void *res = callback(param); // Prevent the callback from being tail called, @@ -926,8 +877,16 @@ TSAN_INTERCEPTOR(int, pthread_create, ThreadIgnoreEnd(thr, pc); } if (res == 0) { - int tid = ThreadCreate(thr, pc, *(uptr*)th, detached); + int tid = ThreadCreate(thr, pc, *(uptr*)th, + detached == PTHREAD_CREATE_DETACHED); CHECK_NE(tid, 0); + // Synchronization on p.tid serves two purposes: + // 1. ThreadCreate must finish before the new thread starts. + // Otherwise the new thread can call pthread_detach, but the pthread_t + // identifier is not yet registered in ThreadRegistry by ThreadCreate. + // 2. ThreadStart must finish before this thread continues. + // Otherwise, this thread can call pthread_detach and reset thr->sync + // before the new thread got a chance to acquire from it in ThreadStart. atomic_store(&p.tid, tid, memory_order_release); while (atomic_load(&p.tid, memory_order_acquire) != 0) pthread_yield(); @@ -949,6 +908,8 @@ TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { return res; } +DEFINE_REAL_PTHREAD_FUNCTIONS + TSAN_INTERCEPTOR(int, pthread_detach, void *th) { SCOPED_TSAN_INTERCEPTOR(pthread_detach, th); int tid = ThreadTid(thr, pc, (uptr)th); @@ -998,13 +959,25 @@ static void *init_cond(void *c, bool force = false) { } struct CondMutexUnlockCtx { + ScopedInterceptor *si; ThreadState *thr; uptr pc; void *m; }; static void cond_mutex_unlock(CondMutexUnlockCtx *arg) { + // pthread_cond_wait interceptor has enabled async signal delivery + // (see BlockingCall below). Disable async signals since we are running + // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run + // since the thread is cancelled, so we have to manually execute them + // (the thread still can run some user code due to pthread_cleanup_push). + ThreadSignalContext *ctx = SigCtx(arg->thr); + CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1); + atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); MutexLock(arg->thr, arg->pc, (uptr)arg->m); + // Undo BlockingCall ctor effects. + arg->thr->ignore_interceptors--; + arg->si->~ScopedInterceptor(); } INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { @@ -1017,14 +990,19 @@ INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { void *cond = init_cond(c); SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m); - MutexUnlock(thr, pc, (uptr)m); MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); - CondMutexUnlockCtx arg = {thr, pc, m}; + MutexUnlock(thr, pc, (uptr)m); + CondMutexUnlockCtx arg = {&si, thr, pc, m}; + int res = 0; // This ensures that we handle mutex lock even in case of pthread_cancel. // See test/tsan/cond_cancel.cc. - int res = call_pthread_cancel_with_cleanup( - (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait), - cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg); + { + // Enable signal delivery while the thread is blocked. + BlockingCall bc(thr); + res = call_pthread_cancel_with_cleanup( + (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait), + cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg); + } if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); MutexLock(thr, pc, (uptr)m); @@ -1034,14 +1012,18 @@ INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { void *cond = init_cond(c); SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime); - MutexUnlock(thr, pc, (uptr)m); MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); - CondMutexUnlockCtx arg = {thr, pc, m}; + MutexUnlock(thr, pc, (uptr)m); + CondMutexUnlockCtx arg = {&si, thr, pc, m}; + int res = 0; // This ensures that we handle mutex lock even in case of pthread_cancel. // See test/tsan/cond_cancel.cc. - int res = call_pthread_cancel_with_cleanup( - REAL(pthread_cond_timedwait), cond, m, abstime, - (void(*)(void *arg))cond_mutex_unlock, &arg); + { + BlockingCall bc(thr); + res = call_pthread_cancel_with_cleanup( + REAL(pthread_cond_timedwait), cond, m, abstime, + (void(*)(void *arg))cond_mutex_unlock, &arg); + } if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); MutexLock(thr, pc, (uptr)m); @@ -1290,64 +1272,10 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { return 0; } -TSAN_INTERCEPTOR(int, sem_init, void *s, int pshared, unsigned value) { - SCOPED_TSAN_INTERCEPTOR(sem_init, s, pshared, value); - int res = REAL(sem_init)(s, pshared, value); - return res; -} - -TSAN_INTERCEPTOR(int, sem_destroy, void *s) { - SCOPED_TSAN_INTERCEPTOR(sem_destroy, s); - int res = REAL(sem_destroy)(s); - return res; -} - -TSAN_INTERCEPTOR(int, sem_wait, void *s) { - SCOPED_TSAN_INTERCEPTOR(sem_wait, s); - int res = BLOCK_REAL(sem_wait)(s); - if (res == 0) { - Acquire(thr, pc, (uptr)s); - } - return res; -} - -TSAN_INTERCEPTOR(int, sem_trywait, void *s) { - SCOPED_TSAN_INTERCEPTOR(sem_trywait, s); - int res = BLOCK_REAL(sem_trywait)(s); - if (res == 0) { - Acquire(thr, pc, (uptr)s); - } - return res; -} - -TSAN_INTERCEPTOR(int, sem_timedwait, void *s, void *abstime) { - SCOPED_TSAN_INTERCEPTOR(sem_timedwait, s, abstime); - int res = BLOCK_REAL(sem_timedwait)(s, abstime); - if (res == 0) { - Acquire(thr, pc, (uptr)s); - } - return res; -} - -TSAN_INTERCEPTOR(int, sem_post, void *s) { - SCOPED_TSAN_INTERCEPTOR(sem_post, s); - Release(thr, pc, (uptr)s); - int res = REAL(sem_post)(s); - return res; -} - -TSAN_INTERCEPTOR(int, sem_getvalue, void *s, int *sval) { - SCOPED_TSAN_INTERCEPTOR(sem_getvalue, s, sval); - int res = REAL(sem_getvalue)(s, sval); - if (res == 0) { - Acquire(thr, pc, (uptr)s); - } - return res; -} - #if !SANITIZER_FREEBSD TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(__xstat)(version, path, buf); } #define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat) @@ -1358,9 +1286,11 @@ TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) { TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) { #if SANITIZER_FREEBSD SCOPED_TSAN_INTERCEPTOR(stat, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(stat)(path, buf); #else SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(__xstat)(0, path, buf); #endif } @@ -1368,6 +1298,7 @@ TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) { #if !SANITIZER_FREEBSD TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(__xstat64)(version, path, buf); } #define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64) @@ -1378,6 +1309,7 @@ TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) { #if !SANITIZER_FREEBSD TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(__xstat64)(0, path, buf); } #define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64) @@ -1388,6 +1320,7 @@ TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) { #if !SANITIZER_FREEBSD TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(__lxstat)(version, path, buf); } #define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat) @@ -1398,9 +1331,11 @@ TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) { TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) { #if SANITIZER_FREEBSD SCOPED_TSAN_INTERCEPTOR(lstat, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(lstat)(path, buf); #else SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(__lxstat)(0, path, buf); #endif } @@ -1408,6 +1343,7 @@ TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) { #if !SANITIZER_FREEBSD TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(__lxstat64)(version, path, buf); } #define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64) @@ -1418,6 +1354,7 @@ TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) { #if !SANITIZER_FREEBSD TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) { SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf); + READ_STRING(thr, pc, path, 0); return REAL(__lxstat64)(0, path, buf); } #define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64) @@ -1477,6 +1414,7 @@ TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode); + READ_STRING(thr, pc, name, 0); int fd = REAL(open)(name, flags, mode); if (fd >= 0) FdFileCreate(thr, pc, fd); @@ -1486,6 +1424,7 @@ TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { #if !SANITIZER_FREEBSD TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode); + READ_STRING(thr, pc, name, 0); int fd = REAL(open64)(name, flags, mode); if (fd >= 0) FdFileCreate(thr, pc, fd); @@ -1498,6 +1437,7 @@ TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { SCOPED_TSAN_INTERCEPTOR(creat, name, mode); + READ_STRING(thr, pc, name, 0); int fd = REAL(creat)(name, mode); if (fd >= 0) FdFileCreate(thr, pc, fd); @@ -1507,6 +1447,7 @@ TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { #if !SANITIZER_FREEBSD TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); + READ_STRING(thr, pc, name, 0); int fd = REAL(creat64)(name, mode); if (fd >= 0) FdFileCreate(thr, pc, fd); @@ -1521,7 +1462,7 @@ TSAN_INTERCEPTOR(int, dup, int oldfd) { SCOPED_TSAN_INTERCEPTOR(dup, oldfd); int newfd = REAL(dup)(oldfd); if (oldfd >= 0 && newfd >= 0 && newfd != oldfd) - FdDup(thr, pc, oldfd, newfd); + FdDup(thr, pc, oldfd, newfd, true); return newfd; } @@ -1529,7 +1470,7 @@ TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd); int newfd2 = REAL(dup2)(oldfd, newfd); if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) - FdDup(thr, pc, oldfd, newfd2); + FdDup(thr, pc, oldfd, newfd2, false); return newfd2; } @@ -1537,7 +1478,7 @@ TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); int newfd2 = REAL(dup3)(oldfd, newfd, flags); if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) - FdDup(thr, pc, oldfd, newfd2); + FdDup(thr, pc, oldfd, newfd2, false); return newfd2; } @@ -1797,9 +1738,16 @@ TSAN_INTERCEPTOR(uptr, fwrite, const void *p, uptr size, uptr nmemb, void *f) { return REAL(fwrite)(p, size, nmemb, f); } +static void FlushStreams() { + // Flushing all the streams here may freeze the process if a child thread is + // performing file stream operations at the same time. + REAL(fflush)(stdout); + REAL(fflush)(stderr); +} + TSAN_INTERCEPTOR(void, abort, int fake) { SCOPED_TSAN_INTERCEPTOR(abort, fake); - REAL(fflush)(0); + FlushStreams(); REAL(abort)(fake); } @@ -1816,12 +1764,11 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) { return res; } -TSAN_INTERCEPTOR(void*, opendir, char *path) { - SCOPED_TSAN_INTERCEPTOR(opendir, path); - void *res = REAL(opendir)(path); - if (res != 0) - Acquire(thr, pc, Dir2addr(path)); - return res; +TSAN_INTERCEPTOR(int, closedir, void *dirp) { + SCOPED_TSAN_INTERCEPTOR(closedir, dirp); + int fd = dirfd(dirp); + FdClose(thr, pc, fd); + return REAL(closedir)(dirp); } #if !SANITIZER_FREEBSD @@ -1865,15 +1812,18 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, // Ensure that the handler does not spoil errno. const int saved_errno = errno; errno = 99; - // Need to remember pc before the call, because the handler can reset it. - uptr pc = sigact ? + // This code races with sigaction. Be careful to not read sa_sigaction twice. + // Also need to remember pc for reporting before the call, + // because the handler can reset it. + volatile uptr pc = sigact ? (uptr)sigactions[sig].sa_sigaction : (uptr)sigactions[sig].sa_handler; - pc += 1; // return address is expected, OutputReport() will undo this - if (sigact) - sigactions[sig].sa_sigaction(sig, info, uctx); - else - sigactions[sig].sa_handler(sig); + if (pc != (uptr)SIG_DFL && pc != (uptr)SIG_IGN) { + if (sigact) + ((sigactionhandler_t)pc)(sig, info, uctx); + else + ((sighandler_t)pc)(sig); + } // We do not detect errno spoiling for SIGTERM, // because some SIGTERM handlers do spoil errno but reraise SIGTERM, // tsan reports false positive in such case. @@ -1883,10 +1833,12 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, // signal; and it looks too fragile to intercept all ways to reraise a signal. if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) { VarSizeStackTrace stack; - ObtainCurrentStack(thr, pc, &stack); + // StackTrace::GetNestInstructionPc(pc) is used because return address is + // expected, OutputReport() will undo this. + ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeErrnoInSignal); - if (!IsFiredSuppression(ctx, rep, stack)) { + if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { rep.AddStack(stack, true); OutputReport(thr, rep); } @@ -1895,7 +1847,7 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, } void ProcessPendingSignals(ThreadState *thr) { - SignalContext *sctx = SigCtx(thr); + ThreadSignalContext *sctx = SigCtx(thr); if (sctx == 0 || atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0) return; @@ -1903,26 +1855,23 @@ void ProcessPendingSignals(ThreadState *thr) { atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); // These are too big for stack. static THREADLOCAL __sanitizer_sigset_t emptyset, oldset; - REAL(sigfillset)(&emptyset); - pthread_sigmask(SIG_SETMASK, &emptyset, &oldset); + CHECK_EQ(0, REAL(sigfillset)(&emptyset)); + CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &emptyset, &oldset)); for (int sig = 0; sig < kSigCount; sig++) { SignalDesc *signal = &sctx->pending_signals[sig]; if (signal->armed) { signal->armed = false; - if (sigactions[sig].sa_handler != SIG_DFL - && sigactions[sig].sa_handler != SIG_IGN) { - CallUserSignalHandler(thr, false, true, signal->sigaction, - sig, &signal->siginfo, &signal->ctx); - } + CallUserSignalHandler(thr, false, true, signal->sigaction, sig, + &signal->siginfo, &signal->ctx); } } - pthread_sigmask(SIG_SETMASK, &oldset, 0); + CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &oldset, 0)); atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); } } // namespace __tsan -static bool is_sync_signal(SignalContext *sctx, int sig) { +static bool is_sync_signal(ThreadSignalContext *sctx, int sig) { return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS || // If we are sending signal to ourselves, we must process it now. @@ -1932,7 +1881,7 @@ static bool is_sync_signal(SignalContext *sctx, int sig) { void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, my_siginfo_t *info, void *ctx) { ThreadState *thr = cur_thread(); - SignalContext *sctx = SigCtx(thr); + ThreadSignalContext *sctx = SigCtx(thr); if (sig < 0 || sig >= kSigCount) { VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig); return; @@ -1995,7 +1944,19 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) { internal_memcpy(old, &sigactions[sig], sizeof(*old)); if (act == 0) return 0; - internal_memcpy(&sigactions[sig], act, sizeof(*act)); + // Copy act into sigactions[sig]. + // Can't use struct copy, because compiler can emit call to memcpy. + // Can't use internal_memcpy, because it copies byte-by-byte, + // and signal handler reads the sa_handler concurrently. It it can read + // some bytes from old value and some bytes from new value. + // Use volatile to prevent insertion of memcpy. + sigactions[sig].sa_handler = *(volatile sighandler_t*)&act->sa_handler; + sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags; + internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask, + sizeof(sigactions[sig].sa_mask)); +#if !SANITIZER_FREEBSD + sigactions[sig].sa_restorer = act->sa_restorer; +#endif sigaction_t newact; internal_memcpy(&newact, act, sizeof(newact)); REAL(sigfillset)(&newact.sa_mask); @@ -2029,7 +1990,7 @@ TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) { TSAN_INTERCEPTOR(int, raise, int sig) { SCOPED_TSAN_INTERCEPTOR(raise, sig); - SignalContext *sctx = SigCtx(thr); + ThreadSignalContext *sctx = SigCtx(thr); CHECK_NE(sctx, 0); int prev = sctx->int_signal_send; sctx->int_signal_send = sig; @@ -2041,7 +2002,7 @@ TSAN_INTERCEPTOR(int, raise, int sig) { TSAN_INTERCEPTOR(int, kill, int pid, int sig) { SCOPED_TSAN_INTERCEPTOR(kill, pid, sig); - SignalContext *sctx = SigCtx(thr); + ThreadSignalContext *sctx = SigCtx(thr); CHECK_NE(sctx, 0); int prev = sctx->int_signal_send; if (pid == (int)internal_getpid()) { @@ -2057,7 +2018,7 @@ TSAN_INTERCEPTOR(int, kill, int pid, int sig) { TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig); - SignalContext *sctx = SigCtx(thr); + ThreadSignalContext *sctx = SigCtx(thr); CHECK_NE(sctx, 0); int prev = sctx->int_signal_send; if (tid == pthread_self()) { @@ -2126,9 +2087,53 @@ TSAN_INTERCEPTOR(int, vfork, int fake) { return WRAP(fork)(fake); } +typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size, + void *data); +struct dl_iterate_phdr_data { + ThreadState *thr; + uptr pc; + dl_iterate_phdr_cb_t cb; + void *data; +}; + +static bool IsAppNotRodata(uptr addr) { + return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata; +} + +static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, + void *data) { + dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; + // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later + // accessible in dl_iterate_phdr callback. But we don't see synchronization + // inside of dynamic linker, so we "unpoison" it here in order to not + // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough + // because some libc functions call __libc_dlopen. + if (info && IsAppNotRodata((uptr)info->dlpi_name)) + MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, + internal_strlen(info->dlpi_name)); + int res = cbdata->cb(info, size, cbdata->data); + // Perform the check one more time in case info->dlpi_name was overwritten + // by user callback. + if (info && IsAppNotRodata((uptr)info->dlpi_name)) + MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, + internal_strlen(info->dlpi_name)); + return res; +} + +TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) { + SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data); + dl_iterate_phdr_data cbdata; + cbdata.thr = thr; + cbdata.pc = pc; + cbdata.cb = cb; + cbdata.data = data; + int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata); + return res; +} + static int OnExit(ThreadState *thr) { int status = Finalize(thr); - REAL(fflush)(0); + FlushStreams(); return status; } @@ -2161,6 +2166,16 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, #undef SANITIZER_INTERCEPT_FGETPWENT #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS +// __tls_get_addr can be called with mis-aligned stack due to: +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066 +// There are two potential issues: +// 1. Sanitizer code contains a MOVDQA spill (it does not seem to be the case +// right now). or 2. ProcessPendingSignal calls user handler which contains +// MOVDQA spill (this happens right now). +// Since the interceptor only initializes memory for msan, the simplest solution +// is to disable the interceptor in tsan (other sanitizers do not call +// signal handlers from COMMON_INTERCEPTOR_ENTER). +#undef SANITIZER_INTERCEPT_TLS_GET_ADDR #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) @@ -2199,12 +2214,21 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, if (fd >= 0) FdClose(thr, pc, fd); \ } -#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res) \ +#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ libignore()->OnLibraryLoaded(filename) #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \ libignore()->OnLibraryUnloaded() +#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \ + Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u) + +#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \ + Release(((TsanInterceptorContext *) ctx)->thr, pc, u) + +#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ + Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path)) + #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd) @@ -2244,6 +2268,14 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc, HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ ((TsanInterceptorContext *)ctx)->pc, msg) +#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ + if (TsanThread *t = GetCurrentThread()) { \ + *begin = t->tls_begin(); \ + *end = t->tls_end(); \ + } else { \ + *begin = *end = 0; \ + } + #include "sanitizer_common/sanitizer_common_interceptors.inc" #define TSAN_SYSCALL() \ @@ -2364,12 +2396,9 @@ static void finalize(void *arg) { ThreadState *thr = cur_thread(); int status = Finalize(thr); // Make sure the output is not lost. - // Flushing all the streams here may freeze the process if a child thread is - // performing file stream operations at the same time. - REAL(fflush)(stdout); - REAL(fflush)(stderr); + FlushStreams(); if (status) - REAL(_exit)(status); + Die(); } static void unreachable() { @@ -2381,7 +2410,6 @@ void InitializeInterceptors() { // We need to setup it early, because functions like dlsym() can call it. REAL(memset) = internal_memset; REAL(memcpy) = internal_memcpy; - REAL(memcmp) = internal_memcmp; // Instruct libc malloc to consume less memory. #if !SANITIZER_FREEBSD @@ -2420,25 +2448,23 @@ void InitializeInterceptors() { TSAN_INTERCEPT(memset); TSAN_INTERCEPT(memcpy); TSAN_INTERCEPT(memmove); - TSAN_INTERCEPT(memcmp); TSAN_INTERCEPT(strchr); TSAN_INTERCEPT(strchrnul); TSAN_INTERCEPT(strrchr); TSAN_INTERCEPT(strcpy); // NOLINT TSAN_INTERCEPT(strncpy); - TSAN_INTERCEPT(strstr); TSAN_INTERCEPT(strdup); TSAN_INTERCEPT(pthread_create); TSAN_INTERCEPT(pthread_join); TSAN_INTERCEPT(pthread_detach); - TSAN_INTERCEPT_VER(pthread_cond_init, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_signal, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_broadcast, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_wait, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_timedwait, "GLIBC_2.3.2"); - TSAN_INTERCEPT_VER(pthread_cond_destroy, "GLIBC_2.3.2"); + TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); TSAN_INTERCEPT(pthread_mutex_init); TSAN_INTERCEPT(pthread_mutex_destroy); @@ -2467,14 +2493,6 @@ void InitializeInterceptors() { TSAN_INTERCEPT(pthread_once); - TSAN_INTERCEPT(sem_init); - TSAN_INTERCEPT(sem_destroy); - TSAN_INTERCEPT(sem_wait); - TSAN_INTERCEPT(sem_trywait); - TSAN_INTERCEPT(sem_timedwait); - TSAN_INTERCEPT(sem_post); - TSAN_INTERCEPT(sem_getvalue); - TSAN_INTERCEPT(stat); TSAN_MAYBE_INTERCEPT___XSTAT; TSAN_MAYBE_INTERCEPT_STAT64; @@ -2523,7 +2541,7 @@ void InitializeInterceptors() { TSAN_INTERCEPT(abort); TSAN_INTERCEPT(puts); TSAN_INTERCEPT(rmdir); - TSAN_INTERCEPT(opendir); + TSAN_INTERCEPT(closedir); TSAN_MAYBE_INTERCEPT_EPOLL_CTL; TSAN_MAYBE_INTERCEPT_EPOLL_WAIT; @@ -2542,6 +2560,7 @@ void InitializeInterceptors() { TSAN_INTERCEPT(fork); TSAN_INTERCEPT(vfork); + TSAN_INTERCEPT(dl_iterate_phdr); TSAN_INTERCEPT(on_exit); TSAN_INTERCEPT(__cxa_atexit); TSAN_INTERCEPT(_exit); @@ -2562,19 +2581,4 @@ void InitializeInterceptors() { FdInit(); } -void *internal_start_thread(void(*func)(void *arg), void *arg) { - // Start the thread with signals blocked, otherwise it can steal user signals. - __sanitizer_sigset_t set, old; - internal_sigfillset(&set); - internal_sigprocmask(SIG_SETMASK, &set, &old); - void *th; - REAL(pthread_create)(&th, 0, (void*(*)(void *arg))func, arg); - internal_sigprocmask(SIG_SETMASK, &old, 0); - return th; -} - -void internal_join_thread(void *th) { - REAL(pthread_join)(th, 0); -} - } // namespace __tsan diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h new file mode 100644 index 00000000000..4a246c647a0 --- /dev/null +++ b/libsanitizer/tsan/tsan_interceptors.h @@ -0,0 +1,35 @@ +#ifndef TSAN_INTERCEPTORS_H +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "tsan_rtl.h" + +namespace __tsan { + +class ScopedInterceptor { + public: + ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc); + ~ScopedInterceptor(); + private: + ThreadState *const thr_; + const uptr pc_; + bool in_ignored_lib_; +}; + +} // namespace __tsan + +#define SCOPED_INTERCEPTOR_RAW(func, ...) \ + ThreadState *thr = cur_thread(); \ + const uptr caller_pc = GET_CALLER_PC(); \ + ScopedInterceptor si(thr, #func, caller_pc); \ + const uptr pc = StackTrace::GetCurrentPc(); \ + (void)pc; \ +/**/ + +#if SANITIZER_FREEBSD +#define __libc_free __free +#define __libc_malloc __malloc +#endif + +extern "C" void __libc_free(void *ptr); +extern "C" void *__libc_malloc(uptr size); + +#endif // TSAN_INTERCEPTORS_H diff --git a/libsanitizer/tsan/tsan_interface.cc b/libsanitizer/tsan/tsan_interface.cc index e056bd465bf..ee9a627076b 100644 --- a/libsanitizer/tsan/tsan_interface.cc +++ b/libsanitizer/tsan/tsan_interface.cc @@ -36,56 +36,89 @@ void __tsan_write16(void *addr) { MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8); } -u16 __tsan_unaligned_read2(const uu16 *addr) { +void __tsan_read16_pc(void *addr, void *pc) { + MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8); + MemoryRead(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8); +} + +void __tsan_write16_pc(void *addr, void *pc) { + MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8); + MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8); +} + +// __tsan_unaligned_read/write calls are emitted by compiler. + +void __tsan_unaligned_read2(const void *addr) { UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false); - return *addr; } -u32 __tsan_unaligned_read4(const uu32 *addr) { +void __tsan_unaligned_read4(const void *addr) { UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false); - return *addr; } -u64 __tsan_unaligned_read8(const uu64 *addr) { +void __tsan_unaligned_read8(const void *addr) { UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false); - return *addr; } -void __tsan_unaligned_write2(uu16 *addr, u16 v) { +void __tsan_unaligned_read16(const void *addr) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false); +} + +void __tsan_unaligned_write2(void *addr) { UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false); - *addr = v; } -void __tsan_unaligned_write4(uu32 *addr, u32 v) { +void __tsan_unaligned_write4(void *addr) { UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false); - *addr = v; } -void __tsan_unaligned_write8(uu64 *addr, u64 v) { +void __tsan_unaligned_write8(void *addr) { UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false); - *addr = v; } +void __tsan_unaligned_write16(void *addr) { + UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false); +} + +// __sanitizer_unaligned_load/store are for user instrumentation. + extern "C" { SANITIZER_INTERFACE_ATTRIBUTE -uint16_t __sanitizer_unaligned_load16(void *addr) - ALIAS("__tsan_unaligned_read2"); +u16 __sanitizer_unaligned_load16(const uu16 *addr) { + __tsan_unaligned_read2(addr); + return *addr; +} + SANITIZER_INTERFACE_ATTRIBUTE -uint32_t __sanitizer_unaligned_load32(void *addr) - ALIAS("__tsan_unaligned_read4"); +u32 __sanitizer_unaligned_load32(const uu32 *addr) { + __tsan_unaligned_read4(addr); + return *addr; +} + SANITIZER_INTERFACE_ATTRIBUTE -uint64_t __sanitizer_unaligned_load64(void *addr) - ALIAS("__tsan_unaligned_read8"); +u64 __sanitizer_unaligned_load64(const uu64 *addr) { + __tsan_unaligned_read8(addr); + return *addr; +} + SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_unaligned_store16(void *addr, uint16_t v) - ALIAS("__tsan_unaligned_write2"); +void __sanitizer_unaligned_store16(uu16 *addr, u16 v) { + __tsan_unaligned_write2(addr); + *addr = v; +} + SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_unaligned_store32(void *addr, uint32_t v) - ALIAS("__tsan_unaligned_write4"); +void __sanitizer_unaligned_store32(uu32 *addr, u32 v) { + __tsan_unaligned_write4(addr); + *addr = v; +} + SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_unaligned_store64(void *addr, uint64_t v) - ALIAS("__tsan_unaligned_write8"); +void __sanitizer_unaligned_store64(uu64 *addr, u64 v) { + __tsan_unaligned_write8(addr); + *addr = v; } +} // extern "C" void __tsan_acquire(void *addr) { Acquire(cur_thread(), CALLERPC, (uptr)addr); diff --git a/libsanitizer/tsan/tsan_interface.h b/libsanitizer/tsan/tsan_interface.h index 63de7b2ab5c..4ff6fa1831e 100644 --- a/libsanitizer/tsan/tsan_interface.h +++ b/libsanitizer/tsan/tsan_interface.h @@ -39,12 +39,27 @@ SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4(void *addr); SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr); SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr); -SANITIZER_INTERFACE_ATTRIBUTE u16 __tsan_unaligned_read2(const uu16 *addr); -SANITIZER_INTERFACE_ATTRIBUTE u32 __tsan_unaligned_read4(const uu32 *addr); -SANITIZER_INTERFACE_ATTRIBUTE u64 __tsan_unaligned_read8(const uu64 *addr); -SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(uu16 *addr, u16 v); -SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(uu32 *addr, u32 v); -SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(uu64 *addr, u64 v); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read2(const void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read4(const void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read8(const void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read16(const void *addr); + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(void *addr); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write16(void *addr); + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1_pc(void *addr, void *pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2_pc(void *addr, void *pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4_pc(void *addr, void *pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8_pc(void *addr, void *pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16_pc(void *addr, void *pc); + +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1_pc(void *addr, void *pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2_pc(void *addr, void *pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4_pc(void *addr, void *pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8_pc(void *addr, void *pc); +SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16_pc(void *addr, void *pc); SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p); SANITIZER_INTERFACE_ATTRIBUTE diff --git a/libsanitizer/tsan/tsan_interface_ann.cc b/libsanitizer/tsan/tsan_interface_ann.cc index b44e56898d6..19ff4050abc 100644 --- a/libsanitizer/tsan/tsan_interface_ann.cc +++ b/libsanitizer/tsan/tsan_interface_ann.cc @@ -61,8 +61,8 @@ static const int kMaxDescLen = 128; struct ExpectRace { ExpectRace *next; ExpectRace *prev; - int hitcount; - int addcount; + atomic_uintptr_t hitcount; + atomic_uintptr_t addcount; uptr addr; uptr size; char *file; @@ -88,7 +88,8 @@ static void AddExpectRace(ExpectRace *list, ExpectRace *race = list->next; for (; race != list; race = race->next) { if (race->addr == addr && race->size == size) { - race->addcount++; + atomic_store_relaxed(&race->addcount, + atomic_load_relaxed(&race->addcount) + 1); return; } } @@ -98,8 +99,8 @@ static void AddExpectRace(ExpectRace *list, race->file = f; race->line = l; race->desc[0] = 0; - race->hitcount = 0; - race->addcount = 1; + atomic_store_relaxed(&race->hitcount, 0); + atomic_store_relaxed(&race->addcount, 1); if (desc) { int i = 0; for (; i < kMaxDescLen - 1 && desc[i]; i++) @@ -128,7 +129,7 @@ static bool CheckContains(ExpectRace *list, uptr addr, uptr size) { return false; DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n", race->desc, race->addr, (int)race->size, race->file, race->line); - race->hitcount++; + atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed); return true; } @@ -144,7 +145,7 @@ void InitializeDynamicAnnotations() { } bool IsExpectedReport(uptr addr, uptr size) { - Lock lock(&dyn_ann_ctx->mtx); + ReadLock lock(&dyn_ann_ctx->mtx); if (CheckContains(&dyn_ann_ctx->expect, addr, size)) return true; if (CheckContains(&dyn_ann_ctx->benign, addr, size)) @@ -153,20 +154,21 @@ bool IsExpectedReport(uptr addr, uptr size) { } static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched, - int *unique_count, int *hit_count, int ExpectRace::*counter) { + int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) { ExpectRace *list = &dyn_ann_ctx->benign; for (ExpectRace *race = list->next; race != list; race = race->next) { (*unique_count)++; - if (race->*counter == 0) + const uptr cnt = atomic_load_relaxed(&(race->*counter)); + if (cnt == 0) continue; - (*hit_count) += race->*counter; + *hit_count += cnt; uptr i = 0; for (; i < matched->Size(); i++) { ExpectRace *race0 = &(*matched)[i]; if (race->line == race0->line && internal_strcmp(race->file, race0->file) == 0 && internal_strcmp(race->desc, race0->desc) == 0) { - race0->*counter += race->*counter; + atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed); break; } } @@ -191,8 +193,8 @@ void PrintMatchedBenignRaces() { hit_count, (int)internal_getpid()); for (uptr i = 0; i < hit_matched.Size(); i++) { Printf("%d %s:%d %s\n", - hit_matched[i].hitcount, hit_matched[i].file, - hit_matched[i].line, hit_matched[i].desc); + atomic_load_relaxed(&hit_matched[i].hitcount), + hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc); } } if (hit_matched.Size()) { @@ -201,8 +203,8 @@ void PrintMatchedBenignRaces() { add_count, unique_count, (int)internal_getpid()); for (uptr i = 0; i < add_matched.Size(); i++) { Printf("%d %s:%d %s\n", - add_matched[i].addcount, add_matched[i].file, - add_matched[i].line, add_matched[i].desc); + atomic_load_relaxed(&add_matched[i].addcount), + add_matched[i].file, add_matched[i].line, add_matched[i].desc); } } } @@ -301,7 +303,7 @@ void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) { Lock lock(&dyn_ann_ctx->mtx); while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) { ExpectRace *race = dyn_ann_ctx->expect.next; - if (race->hitcount == 0) { + if (atomic_load_relaxed(&race->hitcount) == 0) { ctx->nmissed_expected++; ReportMissedExpectedRace(race); } diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc index 15fc21c4627..0ded1aa1099 100644 --- a/libsanitizer/tsan/tsan_interface_atomic.cc +++ b/libsanitizer/tsan/tsan_interface_atomic.cc @@ -30,15 +30,15 @@ typedef unsigned char a8; typedef unsigned short a16; // NOLINT typedef unsigned int a32; typedef unsigned long long a64; // NOLINT -#if !defined(TSAN_GO) && (defined(__SIZEOF_INT128__) \ - || (__clang_major__ * 100 + __clang_minor__ >= 302)) +#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \ + || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64) __extension__ typedef __int128 a128; # define __TSAN_HAS_INT128 1 #else # define __TSAN_HAS_INT128 0 #endif -#ifndef TSAN_GO +#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128 // Protects emulation of 128-bit atomic operations. static StaticSpinMutex mutex128; #endif @@ -123,7 +123,8 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) { // Atomic ops are executed under tsan internal mutex, // here we assume that the atomic variables are not accessed // from non-instrumented code. -#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(TSAN_GO) +#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \ + && __TSAN_HAS_INT128 a128 func_xchg(volatile a128 *v, a128 op) { SpinMutexLock lock(&mutex128); a128 cmp = *v; @@ -196,22 +197,22 @@ static int SizeLog() { // this leads to false negatives only in very obscure cases. } -#ifndef TSAN_GO +#ifndef SANITIZER_GO static atomic_uint8_t *to_atomic(const volatile a8 *a) { - return (atomic_uint8_t*)a; + return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a)); } static atomic_uint16_t *to_atomic(const volatile a16 *a) { - return (atomic_uint16_t*)a; + return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a)); } #endif static atomic_uint32_t *to_atomic(const volatile a32 *a) { - return (atomic_uint32_t*)a; + return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a)); } static atomic_uint64_t *to_atomic(const volatile a64 *a) { - return (atomic_uint64_t*)a; + return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a)); } static memory_order to_mo(morder mo) { @@ -232,7 +233,7 @@ static T NoTsanAtomicLoad(const volatile T *a, morder mo) { return atomic_load(to_atomic(a), to_mo(mo)); } -#if __TSAN_HAS_INT128 && !defined(TSAN_GO) +#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO) static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) { SpinMutexLock lock(&mutex128); return *a; @@ -262,7 +263,7 @@ static void NoTsanAtomicStore(volatile T *a, T v, morder mo) { atomic_store(to_atomic(a), v, to_mo(mo)); } -#if __TSAN_HAS_INT128 && !defined(TSAN_GO) +#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO) static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) { SpinMutexLock lock(&mutex128); *a = v; @@ -454,7 +455,7 @@ static T AtomicCAS(ThreadState *thr, uptr pc, return c; } -#ifndef TSAN_GO +#ifndef SANITIZER_GO static void NoTsanAtomicFence(morder mo) { __sync_synchronize(); } @@ -466,7 +467,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) { #endif // Interface functions follow. -#ifndef TSAN_GO +#ifndef SANITIZER_GO // C/C++ @@ -865,7 +866,7 @@ void __tsan_atomic_signal_fence(morder mo) { } } // extern "C" -#else // #ifndef TSAN_GO +#else // #ifndef SANITIZER_GO // Go @@ -948,4 +949,4 @@ void __tsan_go_atomic64_compare_exchange( *(bool*)(a+24) = (cur == cmp); } } // extern "C" -#endif // #ifndef TSAN_GO +#endif // #ifndef SANITIZER_GO diff --git a/libsanitizer/tsan/tsan_interface_inl.h b/libsanitizer/tsan/tsan_interface_inl.h index 4d6e10bcf7b..67a91aa341a 100644 --- a/libsanitizer/tsan/tsan_interface_inl.h +++ b/libsanitizer/tsan/tsan_interface_inl.h @@ -48,6 +48,38 @@ void __tsan_write8(void *addr) { MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8); } +void __tsan_read1_pc(void *addr, void *pc) { + MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1); +} + +void __tsan_read2_pc(void *addr, void *pc) { + MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2); +} + +void __tsan_read4_pc(void *addr, void *pc) { + MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4); +} + +void __tsan_read8_pc(void *addr, void *pc) { + MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8); +} + +void __tsan_write1_pc(void *addr, void *pc) { + MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1); +} + +void __tsan_write2_pc(void *addr, void *pc) { + MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2); +} + +void __tsan_write4_pc(void *addr, void *pc) { + MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4); +} + +void __tsan_write8_pc(void *addr, void *pc) { + MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8); +} + void __tsan_vptr_update(void **vptr_p, void *new_val) { CHECK_EQ(sizeof(vptr_p), 8); if (*vptr_p != new_val) { diff --git a/libsanitizer/tsan/tsan_interface_java.cc b/libsanitizer/tsan/tsan_interface_java.cc index 6e1ec6de359..934ccc3ef9c 100644 --- a/libsanitizer/tsan/tsan_interface_java.cc +++ b/libsanitizer/tsan/tsan_interface_java.cc @@ -217,3 +217,33 @@ int __tsan_java_mutex_unlock_rec(jptr addr) { return MutexUnlock(thr, pc, addr, true); } + +void __tsan_java_acquire(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_acquire); + DPrintf("#%d: java_acquire(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + Acquire(thr, caller_pc, addr); +} + +void __tsan_java_release(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_release); + DPrintf("#%d: java_release(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + Release(thr, caller_pc, addr); +} + +void __tsan_java_release_store(jptr addr) { + SCOPED_JAVA_FUNC(__tsan_java_release); + DPrintf("#%d: java_release_store(%p)\n", thr->tid, addr); + CHECK_NE(jctx, 0); + CHECK_GE(addr, jctx->heap_begin); + CHECK_LT(addr, jctx->heap_begin + jctx->heap_size); + + ReleaseStore(thr, caller_pc, addr); +} diff --git a/libsanitizer/tsan/tsan_interface_java.h b/libsanitizer/tsan/tsan_interface_java.h index 9af1f3fe21a..04e52031dd3 100644 --- a/libsanitizer/tsan/tsan_interface_java.h +++ b/libsanitizer/tsan/tsan_interface_java.h @@ -77,6 +77,14 @@ void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE; // the same recursion level. int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE; +// Raw acquire/release primitives. +// Can be used to establish happens-before edges on volatile/final fields, +// in atomic operations, etc. release_store is the same as release, but it +// breaks release sequence on addr (see C++ standard 1.10/7 for details). +void __tsan_java_acquire(jptr addr) INTERFACE_ATTRIBUTE; +void __tsan_java_release(jptr addr) INTERFACE_ATTRIBUTE; +void __tsan_java_release_store(jptr addr) INTERFACE_ATTRIBUTE; + #ifdef __cplusplus } // extern "C" #endif diff --git a/libsanitizer/tsan/tsan_md5.cc b/libsanitizer/tsan/tsan_md5.cc index 883239c2e71..f299dfc59b6 100644 --- a/libsanitizer/tsan/tsan_md5.cc +++ b/libsanitizer/tsan/tsan_md5.cc @@ -23,7 +23,7 @@ namespace __tsan { (a) += (b); #define SET(n) \ - (*(MD5_u32plus *)&ptr[(n) * 4]) + (*(const MD5_u32plus *)&ptr[(n) * 4]) #define GET(n) \ SET(n) @@ -37,13 +37,11 @@ typedef struct { MD5_u32plus block[16]; } MD5_CTX; -static void *body(MD5_CTX *ctx, void *data, ulong_t size) { - unsigned char *ptr; +static const void *body(MD5_CTX *ctx, const void *data, ulong_t size) { + const unsigned char *ptr = (const unsigned char *)data; MD5_u32plus a, b, c, d; MD5_u32plus saved_a, saved_b, saved_c, saved_d; - ptr = (unsigned char*)data; - a = ctx->a; b = ctx->b; c = ctx->c; @@ -149,7 +147,7 @@ void MD5_Init(MD5_CTX *ctx) { ctx->hi = 0; } -void MD5_Update(MD5_CTX *ctx, void *data, ulong_t size) { +void MD5_Update(MD5_CTX *ctx, const void *data, ulong_t size) { MD5_u32plus saved_lo; ulong_t used, free; @@ -169,7 +167,7 @@ void MD5_Update(MD5_CTX *ctx, void *data, ulong_t size) { } internal_memcpy(&ctx->buffer[used], data, free); - data = (unsigned char *)data + free; + data = (const unsigned char *)data + free; size -= free; body(ctx, ctx->buffer, 64); } @@ -236,7 +234,7 @@ MD5Hash md5_hash(const void *data, uptr size) { MD5Hash res; MD5_CTX ctx; MD5_Init(&ctx); - MD5_Update(&ctx, (void*)data, size); + MD5_Update(&ctx, data, size); MD5_Final((unsigned char*)&res.hash[0], &ctx); return res; } diff --git a/libsanitizer/tsan/tsan_mman.cc b/libsanitizer/tsan/tsan_mman.cc index d89610a2cc3..c8a5a6a264a 100644 --- a/libsanitizer/tsan/tsan_mman.cc +++ b/libsanitizer/tsan/tsan_mman.cc @@ -34,6 +34,23 @@ struct MapUnmapCallback { // We are about to unmap a chunk of user memory. // Mark the corresponding shadow memory as not needed. DontNeedShadowFor(p, size); + // Mark the corresponding meta shadow memory as not needed. + // Note the block does not contain any meta info at this point + // (this happens after free). + const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; + const uptr kPageSize = GetPageSizeCached() * kMetaRatio; + // Block came from LargeMmapAllocator, so must be large. + // We rely on this in the calculations below. + CHECK_GE(size, 2 * kPageSize); + uptr diff = RoundUp(p, kPageSize) - p; + if (diff != 0) { + p += diff; + size -= diff; + } + diff = p + size - RoundDown(p + size, kPageSize); + if (diff != 0) + size -= diff; + FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio); } }; @@ -43,7 +60,7 @@ Allocator *allocator() { } void InitializeAllocator() { - allocator()->Init(); + allocator()->Init(common_flags()->allocator_may_return_null); } void AllocatorThreadStart(ThreadState *thr) { @@ -61,22 +78,22 @@ void AllocatorPrintStats() { } static void SignalUnsafeCall(ThreadState *thr, uptr pc) { - if (atomic_load(&thr->in_signal_handler, memory_order_relaxed) == 0 || + if (atomic_load_relaxed(&thr->in_signal_handler) == 0 || !flags()->report_signal_unsafe) return; VarSizeStackTrace stack; ObtainCurrentStack(thr, pc, &stack); + if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) + return; ThreadRegistryLock l(ctx->thread_registry); ScopedReport rep(ReportTypeSignalUnsafe); - if (!IsFiredSuppression(ctx, rep, stack)) { - rep.AddStack(stack, true); - OutputReport(thr, rep); - } + rep.AddStack(stack, true); + OutputReport(thr, rep); } void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { if ((sz >= (1ull << 40)) || (align >= (1ull << 40))) - return AllocatorReturnNull(); + return allocator()->ReturnNullOrDie(); void *p = allocator()->Allocate(&thr->alloc_cache, sz, align); if (p == 0) return 0; @@ -87,6 +104,15 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) { return p; } +void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) { + if (CallocShouldReturnNullDueToOverflow(size, n)) + return allocator()->ReturnNullOrDie(); + void *p = user_alloc(thr, pc, n * size); + if (p) + internal_memset(p, 0, n * size); + return p; +} + void user_free(ThreadState *thr, uptr pc, void *p, bool signal) { if (ctx && ctx->initialized) OnUserFree(thr, pc, (uptr)p, true); diff --git a/libsanitizer/tsan/tsan_mman.h b/libsanitizer/tsan/tsan_mman.h index fe020af9d2d..da7cf211940 100644 --- a/libsanitizer/tsan/tsan_mman.h +++ b/libsanitizer/tsan/tsan_mman.h @@ -25,6 +25,7 @@ void AllocatorPrintStats(); // For user allocations. void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align = kDefaultAlignment, bool signal = true); +void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n); // Does not accept NULL. void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true); void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz); diff --git a/libsanitizer/tsan/tsan_mutex.cc b/libsanitizer/tsan/tsan_mutex.cc index 2e49b9d2de9..3d3f6cc82b9 100644 --- a/libsanitizer/tsan/tsan_mutex.cc +++ b/libsanitizer/tsan/tsan_mutex.cc @@ -23,7 +23,7 @@ namespace __tsan { // then Report mutex can be locked while under Threads mutex. // The leaf mutexes can be locked under any other mutexes. // Recursive locking is not supported. -#if TSAN_DEBUG && !TSAN_GO +#if SANITIZER_DEBUG && !SANITIZER_GO const MutexType MutexTypeLeaf = (MutexType)-1; static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { /*0 MutexTypeInvalid*/ {}, @@ -39,13 +39,15 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = { /*9 MutexTypeMBlock*/ {MutexTypeSyncVar}, /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar}, /*11 MutexTypeDDetector*/ {}, + /*12 MutexTypeFired*/ {MutexTypeLeaf}, + /*13 MutexTypeRacy*/ {MutexTypeLeaf}, }; static bool CanLockAdj[MutexTypeCount][MutexTypeCount]; #endif void InitializeMutex() { -#if TSAN_DEBUG && !TSAN_GO +#if SANITIZER_DEBUG && !SANITIZER_GO // Build the "can lock" adjacency matrix. // If [i][j]==true, then one can lock mutex j while under mutex i. const int N = MutexTypeCount; @@ -126,7 +128,7 @@ InternalDeadlockDetector::InternalDeadlockDetector() { // Rely on zero initialization because some mutexes can be locked before ctor. } -#if TSAN_DEBUG && !TSAN_GO +#if SANITIZER_DEBUG && !SANITIZER_GO void InternalDeadlockDetector::Lock(MutexType t) { // Printf("LOCK %d @%zu\n", t, seq_ + 1); CHECK_GT(t, MutexTypeInvalid); @@ -168,7 +170,7 @@ void InternalDeadlockDetector::CheckNoLocks() { #endif void CheckNoLocks(ThreadState *thr) { -#if TSAN_DEBUG && !TSAN_GO +#if SANITIZER_DEBUG && !SANITIZER_GO thr->internal_deadlock_detector.CheckNoLocks(); #endif } @@ -206,7 +208,7 @@ class Backoff { Mutex::Mutex(MutexType type, StatType stat_type) { CHECK_GT(type, MutexTypeInvalid); CHECK_LT(type, MutexTypeCount); -#if TSAN_DEBUG +#if SANITIZER_DEBUG type_ = type; #endif #if TSAN_COLLECT_STATS @@ -220,7 +222,7 @@ Mutex::~Mutex() { } void Mutex::Lock() { -#if TSAN_DEBUG && !TSAN_GO +#if SANITIZER_DEBUG && !SANITIZER_GO cur_thread()->internal_deadlock_detector.Lock(type_); #endif uptr cmp = kUnlocked; @@ -232,7 +234,7 @@ void Mutex::Lock() { cmp = kUnlocked; if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock, memory_order_acquire)) { -#if TSAN_COLLECT_STATS && !TSAN_GO +#if TSAN_COLLECT_STATS && !SANITIZER_GO StatInc(cur_thread(), stat_type_, backoff.Contention()); #endif return; @@ -245,13 +247,13 @@ void Mutex::Unlock() { uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release); (void)prev; DCHECK_NE(prev & kWriteLock, 0); -#if TSAN_DEBUG && !TSAN_GO +#if SANITIZER_DEBUG && !SANITIZER_GO cur_thread()->internal_deadlock_detector.Unlock(type_); #endif } void Mutex::ReadLock() { -#if TSAN_DEBUG && !TSAN_GO +#if SANITIZER_DEBUG && !SANITIZER_GO cur_thread()->internal_deadlock_detector.Lock(type_); #endif uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire); @@ -260,7 +262,7 @@ void Mutex::ReadLock() { for (Backoff backoff; backoff.Do();) { prev = atomic_load(&state_, memory_order_acquire); if ((prev & kWriteLock) == 0) { -#if TSAN_COLLECT_STATS && !TSAN_GO +#if TSAN_COLLECT_STATS && !SANITIZER_GO StatInc(cur_thread(), stat_type_, backoff.Contention()); #endif return; @@ -273,7 +275,7 @@ void Mutex::ReadUnlock() { (void)prev; DCHECK_EQ(prev & kWriteLock, 0); DCHECK_GT(prev & ~kWriteLock, 0); -#if TSAN_DEBUG && !TSAN_GO +#if SANITIZER_DEBUG && !SANITIZER_GO cur_thread()->internal_deadlock_detector.Unlock(type_); #endif } diff --git a/libsanitizer/tsan/tsan_mutex.h b/libsanitizer/tsan/tsan_mutex.h index c27bc2b4459..9960eee9bc8 100644 --- a/libsanitizer/tsan/tsan_mutex.h +++ b/libsanitizer/tsan/tsan_mutex.h @@ -30,6 +30,8 @@ enum MutexType { MutexTypeMBlock, MutexTypeJavaMBlock, MutexTypeDDetector, + MutexTypeFired, + MutexTypeRacy, // This must be the last. MutexTypeCount @@ -50,7 +52,7 @@ class Mutex { private: atomic_uintptr_t state_; -#if TSAN_DEBUG +#if SANITIZER_DEBUG MutexType type_; #endif #if TSAN_COLLECT_STATS diff --git a/libsanitizer/tsan/tsan_mutexset.h b/libsanitizer/tsan/tsan_mutexset.h index 7fdc194109f..d5406ae9f12 100644 --- a/libsanitizer/tsan/tsan_mutexset.h +++ b/libsanitizer/tsan/tsan_mutexset.h @@ -41,7 +41,7 @@ class MutexSet { } private: -#ifndef TSAN_GO +#ifndef SANITIZER_GO uptr size_; Desc descs_[kMaxSize]; #endif @@ -53,7 +53,7 @@ class MutexSet { // Go does not have mutexes, so do not spend memory and time. // (Go sync.Mutex is actually a semaphore -- can be unlocked // in different goroutine). -#ifdef TSAN_GO +#ifdef SANITIZER_GO MutexSet::MutexSet() {} void MutexSet::Add(u64 id, bool write, u64 epoch) {} void MutexSet::Del(u64 id, bool write) {} diff --git a/libsanitizer/tsan/tsan_new_delete.cc b/libsanitizer/tsan/tsan_new_delete.cc new file mode 100644 index 00000000000..ad4ed449751 --- /dev/null +++ b/libsanitizer/tsan/tsan_new_delete.cc @@ -0,0 +1,86 @@ +//===-- tsan_new_delete.cc ----------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file is a part of ThreadSanitizer (TSan), a race detector. +// +// Interceptors for operators new and delete. +//===----------------------------------------------------------------------===// +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "tsan_interceptors.h" + +using namespace __tsan; // NOLINT + +namespace std { +struct nothrow_t {}; +} // namespace std + +#define OPERATOR_NEW_BODY(mangled_name) \ + if (cur_thread()->in_symbolizer) \ + return __libc_malloc(size); \ + void *p = 0; \ + { \ + SCOPED_INTERCEPTOR_RAW(mangled_name, size); \ + p = user_alloc(thr, pc, size); \ + } \ + invoke_malloc_hook(p, size); \ + return p; + +SANITIZER_INTERFACE_ATTRIBUTE +void *operator new(__sanitizer::uptr size); +void *operator new(__sanitizer::uptr size) { + OPERATOR_NEW_BODY(_Znwm); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void *operator new[](__sanitizer::uptr size); +void *operator new[](__sanitizer::uptr size) { + OPERATOR_NEW_BODY(_Znam); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void *operator new(__sanitizer::uptr size, std::nothrow_t const&); +void *operator new(__sanitizer::uptr size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void *operator new[](__sanitizer::uptr size, std::nothrow_t const&); +void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t); +} + +#define OPERATOR_DELETE_BODY(mangled_name) \ + if (ptr == 0) return; \ + if (cur_thread()->in_symbolizer) \ + return __libc_free(ptr); \ + invoke_free_hook(ptr); \ + SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \ + user_free(thr, pc, ptr); + +SANITIZER_INTERFACE_ATTRIBUTE +void operator delete(void *ptr) NOEXCEPT; +void operator delete(void *ptr) NOEXCEPT { + OPERATOR_DELETE_BODY(_ZdlPv); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT; +void operator delete[](void *ptr) NOEXCEPT { + OPERATOR_DELETE_BODY(_ZdaPv); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&); +void operator delete(void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t); +} + +SANITIZER_INTERFACE_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&); +void operator delete[](void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t); +} diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h index 35837d2132a..07d11e572ff 100644 --- a/libsanitizer/tsan/tsan_platform.h +++ b/libsanitizer/tsan/tsan_platform.h @@ -22,10 +22,11 @@ namespace __tsan { -#if !defined(TSAN_GO) +#if !defined(SANITIZER_GO) +#if defined(__x86_64__) /* -C/C++ on linux and freebsd +C/C++ on linux/x86_64 and freebsd/x86_64 0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings 0100 0000 0000 - 0200 0000 0000: - 0200 0000 0000 - 1000 0000 0000: shadow @@ -38,7 +39,6 @@ C/C++ on linux and freebsd 7e00 0000 0000 - 7e80 0000 0000: - 7e80 0000 0000 - 8000 0000 0000: modules and main thread stack */ - const uptr kMetaShadowBeg = 0x300000000000ull; const uptr kMetaShadowEnd = 0x400000000000ull; const uptr kTraceMemBeg = 0x600000000000ull; @@ -53,6 +53,97 @@ const uptr kHiAppMemBeg = 0x7e8000000000ull; const uptr kHiAppMemEnd = 0x800000000000ull; const uptr kAppMemMsk = 0x7c0000000000ull; const uptr kAppMemXor = 0x020000000000ull; +const uptr kVdsoBeg = 0xf000000000000000ull; +#elif defined(__mips64) +/* +C/C++ on linux/mips64 +0100 0000 00 - 0200 0000 00: main binary +0200 0000 00 - 1400 0000 00: - +1400 0000 00 - 2400 0000 00: shadow +2400 0000 00 - 3000 0000 00: - +3000 0000 00 - 4000 0000 00: metainfo (memory blocks and sync objects) +4000 0000 00 - 6000 0000 00: - +6000 0000 00 - 6200 0000 00: traces +6200 0000 00 - fe00 0000 00: - +fe00 0000 00 - ff00 0000 00: heap +ff00 0000 00 - ff80 0000 00: - +ff80 0000 00 - ffff ffff ff: modules and main thread stack +*/ +const uptr kMetaShadowBeg = 0x3000000000ull; +const uptr kMetaShadowEnd = 0x4000000000ull; +const uptr kTraceMemBeg = 0x6000000000ull; +const uptr kTraceMemEnd = 0x6200000000ull; +const uptr kShadowBeg = 0x1400000000ull; +const uptr kShadowEnd = 0x2400000000ull; +const uptr kHeapMemBeg = 0xfe00000000ull; +const uptr kHeapMemEnd = 0xff00000000ull; +const uptr kLoAppMemBeg = 0x0100000000ull; +const uptr kLoAppMemEnd = 0x0200000000ull; +const uptr kHiAppMemBeg = 0xff80000000ull; +const uptr kHiAppMemEnd = 0xffffffffffull; +const uptr kAppMemMsk = 0xfc00000000ull; +const uptr kAppMemXor = 0x0400000000ull; +const uptr kVdsoBeg = 0xfffff00000ull; +#elif defined(__aarch64__) +# if SANITIZER_AARCH64_VMA == 39 +/* +C/C++ on linux/aarch64 (39-bit VMA) +0000 4000 00 - 0200 0000 00: main binary +2000 0000 00 - 4000 0000 00: shadow memory +4000 0000 00 - 5000 0000 00: metainfo +5000 0000 00 - 6000 0000 00: - +6000 0000 00 - 6200 0000 00: traces +6200 0000 00 - 7d00 0000 00: - +7d00 0000 00 - 7e00 0000 00: heap +7e00 0000 00 - 7fff ffff ff: modules and main thread stack +*/ +const uptr kLoAppMemBeg = 0x0000400000ull; +const uptr kLoAppMemEnd = 0x0200000000ull; +const uptr kShadowBeg = 0x2000000000ull; +const uptr kShadowEnd = 0x4000000000ull; +const uptr kMetaShadowBeg = 0x4000000000ull; +const uptr kMetaShadowEnd = 0x5000000000ull; +const uptr kTraceMemBeg = 0x6000000000ull; +const uptr kTraceMemEnd = 0x6200000000ull; +const uptr kHeapMemBeg = 0x7d00000000ull; +const uptr kHeapMemEnd = 0x7e00000000ull; +const uptr kHiAppMemBeg = 0x7e00000000ull; +const uptr kHiAppMemEnd = 0x7fffffffffull; +const uptr kAppMemMsk = 0x7800000000ull; +const uptr kAppMemXor = 0x0800000000ull; +const uptr kVdsoBeg = 0x7f00000000ull; +# elif SANITIZER_AARCH64_VMA == 42 +/* +C/C++ on linux/aarch64 (42-bit VMA) +00000 4000 00 - 01000 0000 00: main binary +01000 0000 00 - 10000 0000 00: - +10000 0000 00 - 20000 0000 00: shadow memory +20000 0000 00 - 26000 0000 00: - +26000 0000 00 - 28000 0000 00: metainfo +28000 0000 00 - 36200 0000 00: - +36200 0000 00 - 36240 0000 00: traces +36240 0000 00 - 3e000 0000 00: - +3e000 0000 00 - 3f000 0000 00: heap +3c000 0000 00 - 3ff00 0000 00: - +3ff00 0000 00 - 3ffff f000 00: modules and main thread stack +*/ +const uptr kLoAppMemBeg = 0x00000400000ull; +const uptr kLoAppMemEnd = 0x01000000000ull; +const uptr kShadowBeg = 0x10000000000ull; +const uptr kShadowEnd = 0x20000000000ull; +const uptr kMetaShadowBeg = 0x26000000000ull; +const uptr kMetaShadowEnd = 0x28000000000ull; +const uptr kTraceMemBeg = 0x36200000000ull; +const uptr kTraceMemEnd = 0x36400000000ull; +const uptr kHeapMemBeg = 0x3e000000000ull; +const uptr kHeapMemEnd = 0x3f000000000ull; +const uptr kHiAppMemBeg = 0x3ff00000000ull; +const uptr kHiAppMemEnd = 0x3fffff00000ull; +const uptr kAppMemMsk = 0x3c000000000ull; +const uptr kAppMemXor = 0x04000000000ull; +const uptr kVdsoBeg = 0x37f00000000ull; +# endif +#endif ALWAYS_INLINE bool IsAppMem(uptr mem) { @@ -100,7 +191,7 @@ static USED uptr UserRegions[] = { kHeapMemBeg, kHeapMemEnd, }; -#elif defined(TSAN_GO) && !SANITIZER_WINDOWS +#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS /* Go on linux, darwin and freebsd 0000 0000 1000 - 0000 1000 0000: executable @@ -162,15 +253,15 @@ static USED uptr UserRegions[] = { kAppMemBeg, kAppMemEnd, }; -#elif defined(TSAN_GO) && SANITIZER_WINDOWS +#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS /* Go on windows 0000 0000 1000 - 0000 1000 0000: executable 0000 1000 0000 - 00f8 0000 0000: - 00c0 0000 0000 - 00e0 0000 0000: heap 00e0 0000 0000 - 0100 0000 0000: - -0100 0000 0000 - 0380 0000 0000: shadow -0380 0000 0000 - 0560 0000 0000: - +0100 0000 0000 - 0500 0000 0000: shadow +0500 0000 0000 - 0560 0000 0000: - 0560 0000 0000 - 0760 0000 0000: traces 0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects) 07d0 0000 0000 - 8000 0000 0000: - @@ -181,7 +272,7 @@ const uptr kMetaShadowEnd = 0x07d000000000ull; const uptr kTraceMemBeg = 0x056000000000ull; const uptr kTraceMemEnd = 0x076000000000ull; const uptr kShadowBeg = 0x010000000000ull; -const uptr kShadowEnd = 0x038000000000ull; +const uptr kShadowEnd = 0x050000000000ull; const uptr kAppMemBeg = 0x000000001000ull; const uptr kAppMemEnd = 0x00e000000000ull; @@ -203,21 +294,21 @@ bool IsMetaMem(uptr mem) { ALWAYS_INLINE uptr MemToShadow(uptr x) { DCHECK(IsAppMem(x)); - return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg; + return ((x & ~(kShadowCell - 1)) * kShadowCnt) + kShadowBeg; } ALWAYS_INLINE u32 *MemToMeta(uptr x) { DCHECK(IsAppMem(x)); return (u32*)(((x & ~(kMetaShadowCell - 1)) / \ - kMetaShadowCell * kMetaShadowSize) | kMetaShadowEnd); + kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg); } ALWAYS_INLINE uptr ShadowToMem(uptr s) { CHECK(IsShadowMem(s)); // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection. - return (x & ~kShadowBeg) / kShadowCnt; + return (s - kShadowBeg) / kShadowCnt; } static USED uptr UserRegions[] = { @@ -249,10 +340,6 @@ uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) { void InitializePlatform(); void FlushShadowMemory(); void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive); -uptr GetRSS(); - -void *internal_start_thread(void(*func)(void*), void *arg); -void internal_join_thread(void *th); // Says whether the addr relates to a global var. // Guesses with high probability, may yield both false positives and negatives. diff --git a/libsanitizer/tsan/tsan_platform_linux.cc b/libsanitizer/tsan/tsan_platform_linux.cc index 32591316ea2..f409a8b221c 100644 --- a/libsanitizer/tsan/tsan_platform_linux.cc +++ b/libsanitizer/tsan/tsan_platform_linux.cc @@ -16,6 +16,7 @@ #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_posix.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_stackdepot.h" @@ -64,8 +65,6 @@ namespace __tsan { static uptr g_data_start; static uptr g_data_end; -const uptr kPageSize = 4096; - enum { MemTotal = 0, MemShadow = 1, @@ -85,7 +84,7 @@ void FillProfileCallback(uptr p, uptr rss, bool file, mem[MemShadow] += rss; else if (p >= kMetaShadowBeg && p < kMetaShadowEnd) mem[MemMeta] += rss; -#ifndef TSAN_GO +#ifndef SANITIZER_GO else if (p >= kHeapMemBeg && p < kHeapMemEnd) mem[MemHeap] += rss; else if (p >= kLoAppMemBeg && p < kLoAppMemEnd) @@ -116,33 +115,6 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { nlive, nthread); } -uptr GetRSS() { - uptr fd = OpenFile("/proc/self/statm", false); - if ((sptr)fd < 0) - return 0; - char buf[64]; - uptr len = internal_read(fd, buf, sizeof(buf) - 1); - internal_close(fd); - if ((sptr)len <= 0) - return 0; - buf[len] = 0; - // The format of the file is: - // 1084 89 69 11 0 79 0 - // We need the second number which is RSS in 4K units. - char *pos = buf; - // Skip the first number. - while (*pos >= '0' && *pos <= '9') - pos++; - // Skip whitespaces. - while (!(*pos >= '0' && *pos <= '9') && *pos != 0) - pos++; - // Read the number. - uptr rss = 0; - while (*pos >= '0' && *pos <= '9') - rss = rss * 10 + *pos++ - '0'; - return rss * 4096; -} - #if SANITIZER_LINUX void FlushShadowMemoryCallback( const SuspendedThreadsList &suspended_threads_list, @@ -157,12 +129,12 @@ void FlushShadowMemory() { #endif } -#ifndef TSAN_GO +#ifndef SANITIZER_GO static void ProtectRange(uptr beg, uptr end) { CHECK_LE(beg, end); if (beg == end) return; - if (beg != (uptr)Mprotect(beg, end - beg)) { + if (beg != (uptr)MmapNoAccess(beg, end - beg)) { Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); Printf("FATAL: Make sure you are not using unlimited stack\n"); Die(); @@ -198,7 +170,7 @@ static void MapRodata() { *p = kShadowRodata; internal_write(fd, marker.data(), marker.size()); // Map the file into memory. - uptr page = internal_mmap(0, kPageSize, PROT_READ | PROT_WRITE, + uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); if (internal_iserror(page)) { internal_close(fd); @@ -228,8 +200,8 @@ static void MapRodata() { void InitializeShadowMemory() { // Map memory shadow. - uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg, - kShadowEnd - kShadowBeg); + uptr shadow = + (uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg, "shadow"); if (shadow != kShadowBeg) { Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); Printf("FATAL: Make sure to compile with -fPIE and " @@ -240,23 +212,41 @@ void InitializeShadowMemory() { // Frequently a thread uses only a small part of stack and similarly // a program uses a small part of large mmap. On some programs // we see 20% memory usage reduction without huge pages for this range. -#ifdef MADV_NOHUGEPAGE - madvise((void*)MemToShadow(0x7f0000000000ULL), - 0x10000000000ULL * kShadowMultiplier, MADV_NOHUGEPAGE); + // FIXME: don't use constants here. +#if defined(__x86_64__) + const uptr kMadviseRangeBeg = 0x7f0000000000ull; + const uptr kMadviseRangeSize = 0x010000000000ull; +#elif defined(__mips64) + const uptr kMadviseRangeBeg = 0xff00000000ull; + const uptr kMadviseRangeSize = 0x0100000000ull; +#elif defined(__aarch64__) + const uptr kMadviseRangeBeg = 0x7e00000000ull; + const uptr kMadviseRangeSize = 0x0100000000ull; #endif + NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), + kMadviseRangeSize * kShadowMultiplier); + // Meta shadow is compressing and we don't flush it, + // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory. + // On one program it reduces memory consumption from 5GB to 2.5GB. + NoHugePagesInRegion(kMetaShadowBeg, kMetaShadowEnd - kMetaShadowBeg); + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); DPrintf("memory shadow: %zx-%zx (%zuGB)\n", kShadowBeg, kShadowEnd, (kShadowEnd - kShadowBeg) >> 30); // Map meta shadow. uptr meta_size = kMetaShadowEnd - kMetaShadowBeg; - uptr meta = (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size); + uptr meta = + (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size, "meta shadow"); if (meta != kMetaShadowBeg) { Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); Printf("FATAL: Make sure to compile with -fPIE and " "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg); Die(); } + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(meta, meta_size); DPrintf("meta shadow: %zx-%zx (%zuGB)\n", meta, meta + meta_size, meta_size >> 30); @@ -311,9 +301,9 @@ static void CheckAndProtect() { if (IsAppMem(p)) continue; if (p >= kHeapMemEnd && - p < kHeapMemEnd + PrimaryAllocator::AdditionalSize()) + p < HeapEnd()) continue; - if (p >= 0xf000000000000000ull) // vdso + if (p >= kVdsoBeg) // vdso break; Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end); Die(); @@ -322,10 +312,13 @@ static void CheckAndProtect() { ProtectRange(kLoAppMemEnd, kShadowBeg); ProtectRange(kShadowEnd, kMetaShadowBeg); ProtectRange(kMetaShadowEnd, kTraceMemBeg); + // Memory for traces is mapped lazily in MapThreadTrace. + // Protect the whole range for now, so that user does not map something here. + ProtectRange(kTraceMemBeg, kTraceMemEnd); ProtectRange(kTraceMemEnd, kHeapMemBeg); - ProtectRange(kHeapMemEnd + PrimaryAllocator::AdditionalSize(), kHiAppMemBeg); + ProtectRange(HeapEnd(), kHiAppMemBeg); } -#endif // #ifndef TSAN_GO +#endif // #ifndef SANITIZER_GO void InitializePlatform() { DisableCoreDumperIfNecessary(); @@ -359,7 +352,7 @@ void InitializePlatform() { ReExec(); } -#ifndef TSAN_GO +#ifndef SANITIZER_GO CheckAndProtect(); InitTlsSize(); InitDataSeg(); @@ -370,7 +363,7 @@ bool IsGlobalVar(uptr addr) { return g_data_start && addr >= g_data_start && addr < g_data_end; } -#ifndef TSAN_GO +#ifndef SANITIZER_GO // Extract file descriptors passed to glibc internal __res_iclose function. // This is required to properly "close" the fds, because we do not see internal // closes within glibc. The code is a pure hack. @@ -408,6 +401,8 @@ int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { return res; } +// Note: this function runs with async signals enabled, +// so it must not touch any tsan state. int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg) { diff --git a/libsanitizer/tsan/tsan_platform_mac.cc b/libsanitizer/tsan/tsan_platform_mac.cc index 2091318a674..a2ae26f5341 100644 --- a/libsanitizer/tsan/tsan_platform_mac.cc +++ b/libsanitizer/tsan/tsan_platform_mac.cc @@ -48,11 +48,7 @@ void FlushShadowMemory() { void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { } -uptr GetRSS() { - return 0; -} - -#ifndef TSAN_GO +#ifndef SANITIZER_GO void InitializeShadowMemory() { uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg); @@ -62,6 +58,8 @@ void InitializeShadowMemory() { "to link with -pie.\n"); Die(); } + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); DPrintf("kShadow %zx-%zx (%zuGB)\n", kShadowBeg, kShadowEnd, (kShadowEnd - kShadowBeg) >> 30); @@ -75,7 +73,9 @@ void InitializePlatform() { DisableCoreDumperIfNecessary(); } -#ifndef TSAN_GO +#ifndef SANITIZER_GO +// Note: this function runs with async signals enabled, +// so it must not touch any tsan state. int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, void *abstime), void *c, void *m, void *abstime, void(*cleanup)(void *arg), void *arg) { diff --git a/libsanitizer/tsan/tsan_platform_windows.cc b/libsanitizer/tsan/tsan_platform_windows.cc index 3f81c54e3eb..ccea22ed66e 100644 --- a/libsanitizer/tsan/tsan_platform_windows.cc +++ b/libsanitizer/tsan/tsan_platform_windows.cc @@ -29,10 +29,6 @@ void FlushShadowMemory() { void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { } -uptr GetRSS() { - return 0; -} - void InitializePlatform() { } diff --git a/libsanitizer/tsan/tsan_report.cc b/libsanitizer/tsan/tsan_report.cc index 6feab81383f..a84738dff6e 100644 --- a/libsanitizer/tsan/tsan_report.cc +++ b/libsanitizer/tsan/tsan_report.cc @@ -17,13 +17,11 @@ namespace __tsan { -ReportStack::ReportStack() : next(nullptr), info(), suppressable(false) {} +ReportStack::ReportStack() : frames(nullptr), suppressable(false) {} -ReportStack *ReportStack::New(uptr addr) { +ReportStack *ReportStack::New() { void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack)); - ReportStack *res = new(mem) ReportStack(); - res->info.address = addr; - return res; + return new(mem) ReportStack(); } ReportLocation::ReportLocation(ReportLocationType type) @@ -71,7 +69,7 @@ ReportDesc::~ReportDesc() { // FIXME(dvyukov): it must be leaking a lot of memory. } -#ifndef TSAN_GO +#ifndef SANITIZER_GO const int kThreadBufSize = 32; const char *thread_name(char *buf, int tid) { @@ -112,13 +110,15 @@ static const char *ReportTypeString(ReportType typ) { } void PrintStack(const ReportStack *ent) { - if (ent == 0) { + if (ent == 0 || ent->frames == 0) { Printf(" [failed to restore the stack]\n\n"); return; } - for (int i = 0; ent && ent->info.address; ent = ent->next, i++) { + SymbolizedStack *frame = ent->frames; + for (int i = 0; frame && frame->info.address; frame = frame->next, i++) { InternalScopedString res(2 * GetPageSizeCached()); - RenderFrame(&res, common_flags()->stack_trace_format, i, ent->info, + RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info, + common_flags()->symbolize_vs_style, common_flags()->strip_path_prefix, "__interceptor_"); Printf("%s\n", res.data()); } @@ -250,10 +250,20 @@ static ReportStack *ChooseSummaryStack(const ReportDesc *rep) { return 0; } -ReportStack *SkipTsanInternalFrames(ReportStack *ent) { - while (FrameIsInternal(ent) && ent->next) - ent = ent->next; - return ent; +static bool FrameIsInternal(const SymbolizedStack *frame) { + if (frame == 0) + return false; + const char *file = frame->info.file; + return file != 0 && + (internal_strstr(file, "tsan_interceptors.cc") || + internal_strstr(file, "sanitizer_common_interceptors.inc") || + internal_strstr(file, "tsan_interface_")); +} + +static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) { + while (FrameIsInternal(frames) && frames->next) + frames = frames->next; + return frames; } void PrintReport(const ReportDesc *rep) { @@ -323,27 +333,29 @@ void PrintReport(const ReportDesc *rep) { if (rep->typ == ReportTypeThreadLeak && rep->count > 1) Printf(" And %d more similar thread leaks.\n\n", rep->count - 1); - if (ReportStack *ent = SkipTsanInternalFrames(ChooseSummaryStack(rep))) { - const AddressInfo &info = ent->info; - ReportErrorSummary(rep_typ_str, info.file, info.line, info.function); + if (ReportStack *stack = ChooseSummaryStack(rep)) { + if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames)) + ReportErrorSummary(rep_typ_str, frame->info); } Printf("==================\n"); } -#else // #ifndef TSAN_GO +#else // #ifndef SANITIZER_GO const int kMainThreadId = 1; void PrintStack(const ReportStack *ent) { - if (ent == 0) { + if (ent == 0 || ent->frames == 0) { Printf(" [failed to restore the stack]\n"); return; } - for (int i = 0; ent; ent = ent->next, i++) { - const AddressInfo &info = ent->info; - Printf(" %s()\n %s:%d +0x%zx\n", info.function, info.file, info.line, - (void *)info.module_offset); + SymbolizedStack *frame = ent->frames; + for (int i = 0; frame; frame = frame->next, i++) { + const AddressInfo &info = frame->info; + Printf(" %s()\n %s:%d +0x%zx\n", info.function, + StripPathPrefix(info.file, common_flags()->strip_path_prefix), + info.line, (void *)info.module_offset); } } diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h index 3d99c7a7c24..68924edb547 100644 --- a/libsanitizer/tsan/tsan_report.h +++ b/libsanitizer/tsan/tsan_report.h @@ -34,10 +34,9 @@ enum ReportType { }; struct ReportStack { - ReportStack *next; - AddressInfo info; + SymbolizedStack *frames; bool suppressable; - static ReportStack *New(uptr addr); + static ReportStack *New(); private: ReportStack(); diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc index bf971b62da4..e7bdfb51884 100644 --- a/libsanitizer/tsan/tsan_rtl.cc +++ b/libsanitizer/tsan/tsan_rtl.cc @@ -22,6 +22,7 @@ #include "tsan_mman.h" #include "tsan_suppressions.h" #include "tsan_symbolize.h" +#include "ubsan/ubsan_init.h" #ifdef __SSE3__ // <emmintrin.h> transitively includes <stdlib.h>, @@ -41,7 +42,7 @@ extern "C" void __tsan_resume() { namespace __tsan { -#ifndef TSAN_GO +#ifndef SANITIZER_GO THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); #endif static char ctx_placeholder[sizeof(Context)] ALIGNED(64); @@ -64,14 +65,26 @@ static char thread_registry_placeholder[sizeof(ThreadRegistry)]; static ThreadContextBase *CreateThreadContext(u32 tid) { // Map thread trace when context is created. - MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event)); - MapThreadTrace(GetThreadTraceHeader(tid), sizeof(Trace)); - new(ThreadTrace(tid)) Trace(); + char name[50]; + internal_snprintf(name, sizeof(name), "trace %u", tid); + MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); + const uptr hdr = GetThreadTraceHeader(tid); + internal_snprintf(name, sizeof(name), "trace header %u", tid); + MapThreadTrace(hdr, sizeof(Trace), name); + new((void*)hdr) Trace(); + // We are going to use only a small part of the trace with the default + // value of history_size. However, the constructor writes to the whole trace. + // Unmap the unused part. + uptr hdr_end = hdr + sizeof(Trace); + hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); + hdr_end = RoundUp(hdr_end, GetPageSizeCached()); + if (hdr_end < hdr + sizeof(Trace)) + UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end); void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); return new(mem) ThreadContext(tid); } -#ifndef TSAN_GO +#ifndef SANITIZER_GO static const u32 kThreadQuarantineSize = 16; #else static const u32 kThreadQuarantineSize = 64; @@ -84,8 +97,10 @@ Context::Context() , nmissed_expected() , thread_registry(new(thread_registry_placeholder) ThreadRegistry( CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) + , racy_mtx(MutexTypeRacy, StatMtxRacy) , racy_stacks(MBlockRacyStacks) , racy_addresses(MBlockRacyAddresses) + , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) , fired_suppressions(8) { } @@ -100,7 +115,7 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, // , ignore_reads_and_writes() // , ignore_interceptors() , clock(tid, reuse_count) -#ifndef TSAN_GO +#ifndef SANITIZER_GO , jmp_bufs(MBlockJmpBuf) #endif , tid(tid) @@ -109,29 +124,28 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, , stk_size(stk_size) , tls_addr(tls_addr) , tls_size(tls_size) -#ifndef TSAN_GO +#ifndef SANITIZER_GO , last_sleep_clock(tid) #endif { } +#ifndef SANITIZER_GO static void MemoryProfiler(Context *ctx, fd_t fd, int i) { uptr n_threads; uptr n_running_threads; ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); InternalScopedBuffer<char> buf(4096); WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); - internal_write(fd, buf.data(), internal_strlen(buf.data())); + WriteToFile(fd, buf.data(), internal_strlen(buf.data())); } static void BackgroundThread(void *arg) { -#ifndef TSAN_GO // This is a non-initialized non-user thread, nothing to see here. // We don't use ScopedIgnoreInterceptors, because we want ignores to be // enabled even when the thread function exits (e.g. during pthread thread // shutdown code). cur_thread()->ignore_interceptors++; -#endif const u64 kMs2Ns = 1000 * 1000; fd_t mprof_fd = kInvalidFd; @@ -141,15 +155,14 @@ static void BackgroundThread(void *arg) { } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { mprof_fd = 2; } else { - InternalScopedBuffer<char> filename(4096); - internal_snprintf(filename.data(), filename.size(), "%s.%d", - flags()->profile_memory, (int)internal_getpid()); - uptr openrv = OpenFile(filename.data(), true); - if (internal_iserror(openrv)) { + InternalScopedString filename(kMaxPathLength); + filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); + fd_t fd = OpenFile(filename.data(), WrOnly); + if (fd == kInvalidFd) { Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", &filename[0]); } else { - mprof_fd = openrv; + mprof_fd = fd; } } } @@ -190,7 +203,6 @@ static void BackgroundThread(void *arg) { if (mprof_fd != kInvalidFd) MemoryProfiler(ctx, mprof_fd, i); -#ifndef TSAN_GO // Flush symbolizer cache if requested. if (flags()->flush_symbolizer_ms > 0) { u64 last = atomic_load(&ctx->last_symbolize_time_ns, @@ -202,7 +214,6 @@ static void BackgroundThread(void *arg) { atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); } } -#endif } } @@ -210,13 +221,14 @@ static void StartBackgroundThread() { ctx->background_thread = internal_start_thread(&BackgroundThread, 0); } -#ifndef TSAN_GO +#ifndef __mips__ static void StopBackgroundThread() { atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); internal_join_thread(ctx->background_thread); ctx->background_thread = 0; } #endif +#endif void DontNeedShadowFor(uptr addr, uptr size) { uptr shadow_beg = MemToShadow(addr); @@ -228,7 +240,7 @@ void MapShadow(uptr addr, uptr size) { // Global data is not 64K aligned, but there are no adjacent mappings, // so we can get away with unaligned mapping. // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment - MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier); + MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier, "shadow"); // Meta shadow is 2:1, so tread carefully. static bool data_mapped = false; @@ -240,7 +252,7 @@ void MapShadow(uptr addr, uptr size) { if (!data_mapped) { // First call maps data+bss. data_mapped = true; - MmapFixedNoReserve(meta_begin, meta_end - meta_begin); + MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); } else { // Mapping continous heap. // Windows wants 64K alignment. @@ -250,19 +262,19 @@ void MapShadow(uptr addr, uptr size) { return; if (meta_begin < mapped_meta_end) meta_begin = mapped_meta_end; - MmapFixedNoReserve(meta_begin, meta_end - meta_begin); + MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"); mapped_meta_end = meta_end; } VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", addr, addr+size, meta_begin, meta_end); } -void MapThreadTrace(uptr addr, uptr size) { +void MapThreadTrace(uptr addr, uptr size, const char *name) { DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); CHECK_GE(addr, kTraceMemBeg); CHECK_LE(addr + size, kTraceMemEnd); CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment - uptr addr1 = (uptr)MmapFixedNoReserve(addr, size); + uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name); if (addr1 != addr) { Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n", addr, size, addr1); @@ -281,11 +293,11 @@ static void CheckShadowMapping() { if (p < beg || p >= end) continue; const uptr s = MemToShadow(p); - VPrintf(3, " checking pointer %p -> %p\n", p, s); + const uptr m = (uptr)MemToMeta(p); + VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); CHECK(IsAppMem(p)); CHECK(IsShadowMem(s)); CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s)); - const uptr m = (uptr)MemToMeta(p); CHECK(IsMetaMem(m)); } } @@ -306,8 +318,10 @@ void Initialize(ThreadState *thr) { ctx = new(ctx_placeholder) Context; const char *options = GetEnv(kTsanOptionsEnv); + CacheBinaryName(); InitializeFlags(&ctx->flags, options); -#ifndef TSAN_GO + CheckVMASize(); +#ifndef SANITIZER_GO InitializeAllocator(); #endif InitializeInterceptors(); @@ -315,20 +329,23 @@ void Initialize(ThreadState *thr) { InitializePlatform(); InitializeMutex(); InitializeDynamicAnnotations(); -#ifndef TSAN_GO +#ifndef SANITIZER_GO InitializeShadowMemory(); #endif // Setup correct file descriptor for error reports. __sanitizer_set_report_path(common_flags()->log_path); InitializeSuppressions(); -#ifndef TSAN_GO +#ifndef SANITIZER_GO InitializeLibIgnore(); Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); -#endif + // On MIPS, TSan initialization is run before + // __pthread_initialize_minimal_internal() is finished, so we can not spawn + // new threads. +#ifndef __mips__ StartBackgroundThread(); -#ifndef TSAN_GO SetSandboxingCallback(StopBackgroundThread); #endif +#endif if (common_flags()->detect_deadlocks) ctx->dd = DDetector::Create(flags()); @@ -339,6 +356,9 @@ void Initialize(ThreadState *thr) { int tid = ThreadCreate(thr, 0, 0, true); CHECK_EQ(tid, 0); ThreadStart(thr, tid, internal_getpid()); +#if TSAN_CONTAINS_UBSAN + __ubsan::InitAsPlugin(); +#endif ctx->initialized = true; if (flags()->stop_on_start) { @@ -363,16 +383,15 @@ int Finalize(ThreadState *thr) { CommonSanitizerReportMutex.Unlock(); ctx->report_mtx.Unlock(); -#ifndef TSAN_GO - if (common_flags()->verbosity) - AllocatorPrintStats(); +#ifndef SANITIZER_GO + if (Verbosity()) AllocatorPrintStats(); #endif ThreadFinalize(thr); if (ctx->nreported) { failed = true; -#ifndef TSAN_GO +#ifndef SANITIZER_GO Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); #else Printf("Found %d data race(s)\n", ctx->nreported); @@ -387,19 +406,22 @@ int Finalize(ThreadState *thr) { if (common_flags()->print_suppressions) PrintMatchedSuppressions(); -#ifndef TSAN_GO +#ifndef SANITIZER_GO if (flags()->print_benign) PrintMatchedBenignRaces(); #endif failed = OnFinalize(failed); +#if TSAN_COLLECT_STATS StatAggregate(ctx->stat, thr->stat); StatOutput(ctx->stat); - return failed ? flags()->exitcode : 0; +#endif + + return failed ? common_flags()->exitcode : 0; } -#ifndef TSAN_GO +#ifndef SANITIZER_GO void ForkBefore(ThreadState *thr, uptr pc) { ctx->thread_registry->Lock(); ctx->report_mtx.Lock(); @@ -419,7 +441,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc) { VPrintf(1, "ThreadSanitizer: forked new process with pid %d," " parent had %d threads\n", (int)internal_getpid(), (int)nthread); if (nthread == 1) { - internal_start_thread(&BackgroundThread, 0); + StartBackgroundThread(); } else { // We've just forked a multi-threaded process. We cannot reasonably function // after that (some mutexes may be locked before fork). So just enable @@ -432,7 +454,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc) { } #endif -#ifdef TSAN_GO +#ifdef SANITIZER_GO NOINLINE void GrowShadowStack(ThreadState *thr) { const int sz = thr->shadow_stack_end - thr->shadow_stack; @@ -448,10 +470,10 @@ void GrowShadowStack(ThreadState *thr) { #endif u32 CurrentStackId(ThreadState *thr, uptr pc) { - if (thr->shadow_stack_pos == 0) // May happen during bootstrap. + if (!thr->is_inited) // May happen during bootstrap. return 0; if (pc != 0) { -#ifndef TSAN_GO +#ifndef SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #else if (thr->shadow_stack_pos == thr->shadow_stack_end) @@ -497,7 +519,7 @@ uptr TraceParts() { return TraceSize() / kTracePartSize; } -#ifndef TSAN_GO +#ifndef SANITIZER_GO extern "C" void __tsan_trace_switch() { TraceSwitch(cur_thread()); } @@ -530,7 +552,7 @@ void HandleRace(ThreadState *thr, u64 *shadow_mem, thr->racy_state[0] = cur.raw(); thr->racy_state[1] = old.raw(); thr->racy_shadow_addr = shadow_mem; -#ifndef TSAN_GO +#ifndef SANITIZER_GO HACKY_CALL(__tsan_report_race); #else ReportRace(thr); @@ -564,43 +586,26 @@ void MemoryAccessImpl1(ThreadState *thr, uptr addr, // it's just not worth it (performance- and complexity-wise). Shadow old(0); - if (kShadowCnt == 1) { - int idx = 0; -#include "tsan_update_shadow_word_inl.h" - } else if (kShadowCnt == 2) { - int idx = 0; -#include "tsan_update_shadow_word_inl.h" - idx = 1; -#include "tsan_update_shadow_word_inl.h" - } else if (kShadowCnt == 4) { - int idx = 0; -#include "tsan_update_shadow_word_inl.h" - idx = 1; -#include "tsan_update_shadow_word_inl.h" - idx = 2; -#include "tsan_update_shadow_word_inl.h" - idx = 3; -#include "tsan_update_shadow_word_inl.h" - } else if (kShadowCnt == 8) { - int idx = 0; -#include "tsan_update_shadow_word_inl.h" - idx = 1; -#include "tsan_update_shadow_word_inl.h" - idx = 2; -#include "tsan_update_shadow_word_inl.h" - idx = 3; + + // It release mode we manually unroll the loop, + // because empirically gcc generates better code this way. + // However, we can't afford unrolling in debug mode, because the function + // consumes almost 4K of stack. Gtest gives only 4K of stack to death test + // threads, which is not enough for the unrolled loop. +#if SANITIZER_DEBUG + for (int idx = 0; idx < 4; idx++) { #include "tsan_update_shadow_word_inl.h" - idx = 4; + } +#else + int idx = 0; #include "tsan_update_shadow_word_inl.h" - idx = 5; + idx = 1; #include "tsan_update_shadow_word_inl.h" - idx = 6; + idx = 2; #include "tsan_update_shadow_word_inl.h" - idx = 7; + idx = 3; #include "tsan_update_shadow_word_inl.h" - } else { - CHECK(false); - } +#endif // we did not find any races and had already stored // the current access info, so we are done @@ -651,7 +656,7 @@ bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { return false; } -#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4 +#if defined(__SSE3__) #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) @@ -711,11 +716,12 @@ bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { ALWAYS_INLINE bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { -#if defined(__SSE3__) && TSAN_SHADOW_COUNT == 4 +#if defined(__SSE3__) bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); // NOTE: this check can fail if the shadow is concurrently mutated - // by other threads. - DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); + // by other threads. But it still can be useful if you modify + // ContainsSameAccessFast and want to ensure that it's not completely broken. + // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); return res; #else return ContainsSameAccessSlow(s, a, sync_epoch, is_write); @@ -732,7 +738,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, (uptr)shadow_mem[0], (uptr)shadow_mem[1], (uptr)shadow_mem[2], (uptr)shadow_mem[3]); -#if TSAN_DEBUG +#if SANITIZER_DEBUG if (!IsAppMem(addr)) { Printf("Access to non app mem %zx\n", addr); DCHECK(IsAppMem(addr)); @@ -842,7 +848,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, } } else { // The region is big, reset only beginning and end. - const uptr kPageSize = 4096; + const uptr kPageSize = GetPageSizeCached(); u64 *begin = (u64*)MemToShadow(addr); u64 *end = begin + size / kShadowCell * kShadowCnt; u64 *p = begin; @@ -916,7 +922,7 @@ void FuncEntry(ThreadState *thr, uptr pc) { // Shadow stack maintenance can be replaced with // stack unwinding during trace switch (which presumably must be faster). DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); -#ifndef TSAN_GO +#ifndef SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #else if (thr->shadow_stack_pos == thr->shadow_stack_end) @@ -936,7 +942,7 @@ void FuncExit(ThreadState *thr) { } DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); -#ifndef TSAN_GO +#ifndef SANITIZER_GO DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); #endif thr->shadow_stack_pos--; @@ -947,7 +953,7 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) { thr->ignore_reads_and_writes++; CHECK_GT(thr->ignore_reads_and_writes, 0); thr->fast_state.SetIgnoreBit(); -#ifndef TSAN_GO +#ifndef SANITIZER_GO if (!ctx->after_multithreaded_fork) thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); #endif @@ -959,7 +965,7 @@ void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { CHECK_GE(thr->ignore_reads_and_writes, 0); if (thr->ignore_reads_and_writes == 0) { thr->fast_state.ClearIgnoreBit(); -#ifndef TSAN_GO +#ifndef SANITIZER_GO thr->mop_ignore_set.Reset(); #endif } @@ -969,7 +975,7 @@ void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); thr->ignore_sync++; CHECK_GT(thr->ignore_sync, 0); -#ifndef TSAN_GO +#ifndef SANITIZER_GO if (!ctx->after_multithreaded_fork) thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); #endif @@ -979,7 +985,7 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); thr->ignore_sync--; CHECK_GE(thr->ignore_sync, 0); -#ifndef TSAN_GO +#ifndef SANITIZER_GO if (thr->ignore_sync == 0) thr->sync_ignore_set.Reset(); #endif @@ -989,7 +995,7 @@ bool MD5Hash::operator==(const MD5Hash &other) const { return hash[0] == other.hash[0] && hash[1] == other.hash[1]; } -#if TSAN_DEBUG +#if SANITIZER_DEBUG void build_consistency_debug() {} #else void build_consistency_release() {} @@ -1001,19 +1007,9 @@ void build_consistency_stats() {} void build_consistency_nostats() {} #endif -#if TSAN_SHADOW_COUNT == 1 -void build_consistency_shadow1() {} -#elif TSAN_SHADOW_COUNT == 2 -void build_consistency_shadow2() {} -#elif TSAN_SHADOW_COUNT == 4 -void build_consistency_shadow4() {} -#else -void build_consistency_shadow8() {} -#endif - } // namespace __tsan -#ifndef TSAN_GO +#ifndef SANITIZER_GO // Must be included in this file to make sure everything is inlined. #include "tsan_interface_inl.h" #endif diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h index a5b88e0c358..34b009b15d7 100644 --- a/libsanitizer/tsan/tsan_rtl.h +++ b/libsanitizer/tsan/tsan_rtl.h @@ -50,10 +50,23 @@ namespace __tsan { -#ifndef TSAN_GO +#ifndef SANITIZER_GO struct MapUnmapCallback; +#if defined(__mips64) || defined(__aarch64__) +static const uptr kAllocatorSpace = 0; +static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE; +static const uptr kAllocatorRegionSizeLog = 20; +static const uptr kAllocatorNumRegions = + kAllocatorSize >> kAllocatorRegionSizeLog; +typedef TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12, + MapUnmapCallback> ByteMap; +typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0, + CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap, + MapUnmapCallback> PrimaryAllocator; +#else typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0, DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator; +#endif typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache; typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator; typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, @@ -298,7 +311,7 @@ class Shadow : public FastState { } }; -struct SignalContext; +struct ThreadSignalContext; struct JmpBuf { uptr sp; @@ -330,7 +343,7 @@ struct ThreadState { int ignore_reads_and_writes; int ignore_sync; // Go does not support ignores. -#ifndef TSAN_GO +#ifndef SANITIZER_GO IgnoreSet mop_ignore_set; IgnoreSet sync_ignore_set; #endif @@ -343,17 +356,20 @@ struct ThreadState { u64 racy_state[2]; MutexSet mset; ThreadClock clock; -#ifndef TSAN_GO +#ifndef SANITIZER_GO AllocatorCache alloc_cache; InternalAllocatorCache internal_alloc_cache; Vector<JmpBuf> jmp_bufs; int ignore_interceptors; #endif +#if TSAN_COLLECT_STATS u64 stat[StatCnt]; +#endif const int tid; const int unique_id; bool in_symbolizer; bool in_ignored_lib; + bool is_inited; bool is_dead; bool is_freeing; bool is_vptr_access; @@ -363,18 +379,20 @@ struct ThreadState { const uptr tls_size; ThreadContext *tctx; +#if SANITIZER_DEBUG && !SANITIZER_GO InternalDeadlockDetector internal_deadlock_detector; +#endif DDPhysicalThread *dd_pt; DDLogicalThread *dd_lt; atomic_uintptr_t in_signal_handler; - SignalContext *signal_ctx; + ThreadSignalContext *signal_ctx; DenseSlabAllocCache block_cache; DenseSlabAllocCache sync_cache; DenseSlabAllocCache clock_cache; -#ifndef TSAN_GO +#ifndef SANITIZER_GO u32 last_sleep_stack_id; ThreadClock last_sleep_clock; #endif @@ -389,7 +407,7 @@ struct ThreadState { uptr tls_addr, uptr tls_size); }; -#ifndef TSAN_GO +#ifndef SANITIZER_GO __attribute__((tls_model("initial-exec"))) extern THREADLOCAL char cur_thread_placeholder[]; INLINE ThreadState *cur_thread() { @@ -411,13 +429,13 @@ class ThreadContext : public ThreadContextBase { u64 epoch1; // Override superclass callbacks. - void OnDead(); - void OnJoined(void *arg); - void OnFinished(); - void OnStarted(void *arg); - void OnCreated(void *arg); - void OnReset(); - void OnDetached(void *arg); + void OnDead() override; + void OnJoined(void *arg) override; + void OnFinished() override; + void OnStarted(void *arg) override; + void OnCreated(void *arg) override; + void OnReset() override; + void OnDetached(void *arg) override; }; struct RacyStacks { @@ -438,7 +456,7 @@ struct RacyAddress { struct FiredSuppression { ReportType type; - uptr pc; + uptr pc_or_addr; Suppression *supp; }; @@ -460,9 +478,11 @@ struct Context { ThreadRegistry *thread_registry; + Mutex racy_mtx; Vector<RacyStacks> racy_stacks; Vector<RacyAddress> racy_addresses; // Number of fired suppressions may be large enough. + Mutex fired_suppressions_mtx; InternalMmapVector<FiredSuppression> fired_suppressions; DDetector *dd; @@ -479,13 +499,13 @@ extern Context *ctx; // The one and the only global runtime context. struct ScopedIgnoreInterceptors { ScopedIgnoreInterceptors() { -#ifndef TSAN_GO +#ifndef SANITIZER_GO cur_thread()->ignore_interceptors++; #endif } ~ScopedIgnoreInterceptors() { -#ifndef TSAN_GO +#ifndef SANITIZER_GO cur_thread()->ignore_interceptors--; #endif } @@ -537,19 +557,24 @@ void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack) { } +#if TSAN_COLLECT_STATS void StatAggregate(u64 *dst, u64 *src); void StatOutput(u64 *stat); +#endif + void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) { - if (kCollectStats) - thr->stat[typ] += n; +#if TSAN_COLLECT_STATS + thr->stat[typ] += n; +#endif } void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) { - if (kCollectStats) - thr->stat[typ] = n; +#if TSAN_COLLECT_STATS + thr->stat[typ] = n; +#endif } void MapShadow(uptr addr, uptr size); -void MapThreadTrace(uptr addr, uptr size); +void MapThreadTrace(uptr addr, uptr size, const char *name); void DontNeedShadowFor(uptr addr, uptr size); void InitializeShadowMemory(); void InitializeInterceptors(); @@ -562,12 +587,9 @@ void ForkChildAfter(ThreadState *thr, uptr pc); void ReportRace(ThreadState *thr); bool OutputReport(ThreadState *thr, const ScopedReport &srep); -bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, - StackTrace trace); +bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace); bool IsExpectedReport(uptr addr, uptr size); void PrintMatchedBenignRaces(); -bool FrameIsInternal(const ReportStack *frame); -ReportStack *SkipTsanInternalFrames(ReportStack *ent); #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1 # define DPrintf Printf @@ -664,6 +686,12 @@ void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr); void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD void Acquire(ThreadState *thr, uptr pc, uptr addr); +// AcquireGlobal synchronizes the current thread with all other threads. +// In terms of happens-before relation, it draws a HB edge from all threads +// (where they happen to execute right now) to the current thread. We use it to +// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal +// right before executing finalizers. This provides a coarse, but simple +// approximation of the actual required synchronization. void AcquireGlobal(ThreadState *thr, uptr pc); void Release(ThreadState *thr, uptr pc, uptr addr); void ReleaseStore(ThreadState *thr, uptr pc, uptr addr); @@ -679,7 +707,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c); // The trick is that the call preserves all registers and the compiler // does not treat it as a call. // If it does not work for you, use normal call. -#if TSAN_DEBUG == 0 && defined(__x86_64__) +#if !SANITIZER_DEBUG && defined(__x86_64__) // The caller may not create the stack frame for itself at all, // so we create a reserve stack frame for it (1024b must be enough). #define HACKY_CALL(f) \ @@ -711,7 +739,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, StatInc(thr, StatEvents); u64 pos = fs.GetTracePos(); if (UNLIKELY((pos % kTracePartSize) == 0)) { -#ifndef TSAN_GO +#ifndef SANITIZER_GO HACKY_CALL(__tsan_trace_switch); #else TraceSwitch(thr); @@ -723,6 +751,12 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs, *evp = ev; } +#ifndef SANITIZER_GO +uptr ALWAYS_INLINE HeapEnd() { + return kHeapMemEnd + PrimaryAllocator::AdditionalSize(); +} +#endif + } // namespace __tsan #endif // TSAN_RTL_H diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cc b/libsanitizer/tsan/tsan_rtl_mutex.cc index d731b4bfb59..deb7722f521 100644 --- a/libsanitizer/tsan/tsan_rtl_mutex.cc +++ b/libsanitizer/tsan/tsan_rtl_mutex.cc @@ -34,12 +34,8 @@ struct Callback : DDCallback { DDCallback::lt = thr->dd_lt; } - virtual u32 Unwind() { - return CurrentStackId(thr, pc); - } - virtual int UniqueTid() { - return thr->unique_id; - } + u32 Unwind() override { return CurrentStackId(thr, pc); } + int UniqueTid() override { return thr->unique_id; } }; void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) { @@ -86,7 +82,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr, void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) { DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr); StatInc(thr, StatMutexDestroy); -#ifndef TSAN_GO +#ifndef SANITIZER_GO // Global mutexes not marked as LINKER_INITIALIZED // cause tons of not interesting reports, so just ignore it. if (IsGlobalVar(addr)) @@ -403,7 +399,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) { s->mtx.Unlock(); } -#ifndef TSAN_GO +#ifndef SANITIZER_GO static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) { ThreadState *thr = reinterpret_cast<ThreadState*>(arg); ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); @@ -474,7 +470,7 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { for (int i = 0; i < r->n; i++) { for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { u32 stk = r->loop[i].stk[j]; - if (stk) { + if (stk && stk != 0xffffffff) { rep.AddStack(StackDepotGet(stk), true); } else { // Sometimes we fail to extract the stack trace (FIXME: investigate), diff --git a/libsanitizer/tsan/tsan_rtl_report.cc b/libsanitizer/tsan/tsan_rtl_report.cc index f86cfd4681d..d0d1fbaf45a 100644 --- a/libsanitizer/tsan/tsan_rtl_report.cc +++ b/libsanitizer/tsan/tsan_rtl_report.cc @@ -54,40 +54,43 @@ bool WEAK OnReport(const ReportDesc *rep, bool suppressed) { } #endif -static void StackStripMain(ReportStack *stack) { - ReportStack *last_frame = 0; - ReportStack *last_frame2 = 0; - for (ReportStack *ent = stack; ent; ent = ent->next) { +static void StackStripMain(SymbolizedStack *frames) { + SymbolizedStack *last_frame = nullptr; + SymbolizedStack *last_frame2 = nullptr; + for (SymbolizedStack *cur = frames; cur; cur = cur->next) { last_frame2 = last_frame; - last_frame = ent; + last_frame = cur; } if (last_frame2 == 0) return; +#ifndef SANITIZER_GO const char *last = last_frame->info.function; -#ifndef TSAN_GO const char *last2 = last_frame2->info.function; // Strip frame above 'main' if (last2 && 0 == internal_strcmp(last2, "main")) { - last_frame2->next = 0; + last_frame->ClearAll(); + last_frame2->next = nullptr; // Strip our internal thread start routine. } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { - last_frame2->next = 0; + last_frame->ClearAll(); + last_frame2->next = nullptr; // Strip global ctors init. } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { - last_frame2->next = 0; + last_frame->ClearAll(); + last_frame2->next = nullptr; // If both are 0, then we probably just failed to symbolize. } else if (last || last2) { // Ensure that we recovered stack completely. Trimmed stack // can actually happen if we do not instrument some code, // so it's only a debug print. However we must try hard to not miss it // due to our fault. - DPrintf("Bottom stack frame of stack %zx is missed\n", stack->pc); + DPrintf("Bottom stack frame is missed\n"); } #else // The last frame always point into runtime (gosched0, goexit0, runtime.main). - last_frame2->next = 0; - (void)last; + last_frame->ClearAll(); + last_frame2->next = nullptr; #endif } @@ -103,31 +106,29 @@ ReportStack *SymbolizeStackId(u32 stack_id) { static ReportStack *SymbolizeStack(StackTrace trace) { if (trace.size == 0) return 0; - ReportStack *stack = 0; + SymbolizedStack *top = nullptr; for (uptr si = 0; si < trace.size; si++) { const uptr pc = trace.trace[si]; -#ifndef TSAN_GO - // We obtain the return address, that is, address of the next instruction, - // so offset it by 1 byte. - const uptr pc1 = StackTrace::GetPreviousInstructionPc(pc); -#else - // FIXME(dvyukov): Go sometimes uses address of a function as top pc. uptr pc1 = pc; - if (si != trace.size - 1) - pc1 -= 1; -#endif - ReportStack *ent = SymbolizeCode(pc1); + // We obtain the return address, but we're interested in the previous + // instruction. + if ((pc & kExternalPCBit) == 0) + pc1 = StackTrace::GetPreviousInstructionPc(pc); + SymbolizedStack *ent = SymbolizeCode(pc1); CHECK_NE(ent, 0); - ReportStack *last = ent; + SymbolizedStack *last = ent; while (last->next) { last->info.address = pc; // restore original pc for report last = last->next; } last->info.address = pc; // restore original pc for report - last->next = stack; - stack = ent; + last->next = top; + top = ent; } - StackStripMain(stack); + StackStripMain(top); + + ReportStack *stack = ReportStack::New(); + stack->frames = top; return stack; } @@ -196,7 +197,7 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { rt->stack->suppressable = suppressable; } -#ifndef TSAN_GO +#ifndef SANITIZER_GO static ThreadContext *FindThreadByUidLocked(int unique_id) { ctx->thread_registry->CheckLocked(); for (unsigned i = 0; i < kMaxTid; i++) { @@ -241,7 +242,7 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { #endif void ScopedReport::AddThread(int unique_tid, bool suppressable) { -#ifndef TSAN_GO +#ifndef SANITIZER_GO if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid)) AddThread(tctx, suppressable); #endif @@ -297,7 +298,7 @@ void ScopedReport::AddDeadMutex(u64 id) { void ScopedReport::AddLocation(uptr addr, uptr size) { if (addr == 0) return; -#ifndef TSAN_GO +#ifndef SANITIZER_GO int fd = -1; int creat_tid = -1; u32 creat_stack = 0; @@ -347,7 +348,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) { #endif } -#ifndef TSAN_GO +#ifndef SANITIZER_GO void ScopedReport::AddSleep(u32 stack_id) { rep_->sleep = SymbolizeStackId(stack_id); } @@ -366,30 +367,23 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, // This function restores stack trace and mutex set for the thread/epoch. // It does so by getting stack trace and mutex set at the beginning of // trace part, and then replaying the trace till the given epoch. - ctx->thread_registry->CheckLocked(); - ThreadContext *tctx = static_cast<ThreadContext*>( - ctx->thread_registry->GetThreadLocked(tid)); - if (tctx == 0) - return; - if (tctx->status != ThreadStatusRunning - && tctx->status != ThreadStatusFinished - && tctx->status != ThreadStatusDead) - return; - Trace* trace = ThreadTrace(tctx->tid); - Lock l(&trace->mtx); + Trace* trace = ThreadTrace(tid); + ReadLock l(&trace->mtx); const int partidx = (epoch / kTracePartSize) % TraceParts(); TraceHeader* hdr = &trace->headers[partidx]; - if (epoch < hdr->epoch0) + if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) return; + CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); const u64 epoch0 = RoundDown(epoch, TraceSize()); const u64 eend = epoch % TraceSize(); const u64 ebegin = RoundDown(eend, kTracePartSize); DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); - InternalScopedBuffer<uptr> stack(kShadowStackSize); + Vector<uptr> stack(MBlockReportStack); + stack.Resize(hdr->stack0.size + 64); for (uptr i = 0; i < hdr->stack0.size; i++) { stack[i] = hdr->stack0.trace[i]; - DPrintf2(" #%02lu: pc=%zx\n", i, stack[i]); + DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); } if (mset) *mset = hdr->mset0; @@ -403,6 +397,8 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, if (typ == EventTypeMop) { stack[pos] = pc; } else if (typ == EventTypeFuncEnter) { + if (stack.Size() < pos + 2) + stack.Resize(pos + 2); stack[pos++] = pc; } else if (typ == EventTypeFuncExit) { if (pos > 0) @@ -425,50 +421,58 @@ void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, if (pos == 0 && stack[0] == 0) return; pos++; - stk->Init(stack.data(), pos); + stk->Init(&stack[0], pos); } static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { bool equal_stack = false; RacyStacks hash; - if (flags()->suppress_equal_stacks) { - hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); - hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); - for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { - if (hash == ctx->racy_stacks[i]) { - DPrintf("ThreadSanitizer: suppressing report as doubled (stack)\n"); - equal_stack = true; - break; - } - } - } bool equal_address = false; RacyAddress ra0 = {addr_min, addr_max}; - if (flags()->suppress_equal_addresses) { - for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { - RacyAddress ra2 = ctx->racy_addresses[i]; - uptr maxbeg = max(ra0.addr_min, ra2.addr_min); - uptr minend = min(ra0.addr_max, ra2.addr_max); - if (maxbeg < minend) { - DPrintf("ThreadSanitizer: suppressing report as doubled (addr)\n"); - equal_address = true; - break; + { + ReadLock lock(&ctx->racy_mtx); + if (flags()->suppress_equal_stacks) { + hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); + hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); + for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { + if (hash == ctx->racy_stacks[i]) { + VPrintf(2, + "ThreadSanitizer: suppressing report as doubled (stack)\n"); + equal_stack = true; + break; + } + } + } + if (flags()->suppress_equal_addresses) { + for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { + RacyAddress ra2 = ctx->racy_addresses[i]; + uptr maxbeg = max(ra0.addr_min, ra2.addr_min); + uptr minend = min(ra0.addr_max, ra2.addr_max); + if (maxbeg < minend) { + VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); + equal_address = true; + break; + } } } } - if (equal_stack || equal_address) { - if (!equal_stack) - ctx->racy_stacks.PushBack(hash); - if (!equal_address) - ctx->racy_addresses.PushBack(ra0); - return true; + if (!equal_stack && !equal_address) + return false; + if (!equal_stack) { + Lock lock(&ctx->racy_mtx); + ctx->racy_stacks.PushBack(hash); } - return false; + if (!equal_address) { + Lock lock(&ctx->racy_mtx); + ctx->racy_addresses.PushBack(ra0); + } + return true; } static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], uptr addr_min, uptr addr_max) { + Lock lock(&ctx->racy_mtx); if (flags()->suppress_equal_stacks) { RacyStacks hash; hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); @@ -482,26 +486,29 @@ static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], } bool OutputReport(ThreadState *thr, const ScopedReport &srep) { - atomic_store(&ctx->last_symbolize_time_ns, NanoTime(), memory_order_relaxed); + if (!flags()->report_bugs) + return false; + atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); const ReportDesc *rep = srep.GetReport(); Suppression *supp = 0; - uptr suppress_pc = 0; - for (uptr i = 0; suppress_pc == 0 && i < rep->mops.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->stacks.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->stacks[i], &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->threads.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); - for (uptr i = 0; suppress_pc == 0 && i < rep->locs.Size(); i++) - suppress_pc = IsSuppressed(rep->typ, rep->locs[i], &supp); - if (suppress_pc != 0) { - FiredSuppression s = {srep.GetReport()->typ, suppress_pc, supp}; + uptr pc_or_addr = 0; + for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); + for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) + pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); + if (pc_or_addr != 0) { + Lock lock(&ctx->fired_suppressions_mtx); + FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; ctx->fired_suppressions.push_back(s); } { bool old_is_freeing = thr->is_freeing; thr->is_freeing = false; - bool suppressed = OnReport(rep, suppress_pc != 0); + bool suppressed = OnReport(rep, pc_or_addr != 0); thr->is_freeing = old_is_freeing; if (suppressed) return false; @@ -509,20 +516,20 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) { PrintReport(rep); ctx->nreported++; if (flags()->halt_on_error) - internal__exit(flags()->exitcode); + Die(); return true; } -bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, - StackTrace trace) { +bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { + ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { - if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + if (ctx->fired_suppressions[k].type != type) continue; for (uptr j = 0; j < trace.size; j++) { FiredSuppression *s = &ctx->fired_suppressions[k]; - if (trace.trace[j] == s->pc) { + if (trace.trace[j] == s->pc_or_addr) { if (s->supp) - s->supp->hit_count++; + atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } @@ -530,32 +537,21 @@ bool IsFiredSuppression(Context *ctx, const ScopedReport &srep, return false; } -static bool IsFiredSuppression(Context *ctx, - const ScopedReport &srep, - uptr addr) { +static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { + ReadLock lock(&ctx->fired_suppressions_mtx); for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { - if (ctx->fired_suppressions[k].type != srep.GetReport()->typ) + if (ctx->fired_suppressions[k].type != type) continue; FiredSuppression *s = &ctx->fired_suppressions[k]; - if (addr == s->pc) { + if (addr == s->pc_or_addr) { if (s->supp) - s->supp->hit_count++; + atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); return true; } } return false; } -bool FrameIsInternal(const ReportStack *frame) { - if (frame == 0) - return false; - const char *file = frame->info.file; - return file != 0 && - (internal_strstr(file, "tsan_interceptors.cc") || - internal_strstr(file, "sanitizer_common_interceptors.inc") || - internal_strstr(file, "tsan_interface_")); -} - static bool RaceBetweenAtomicAndFree(ThreadState *thr) { Shadow s0(thr->racy_state[0]); Shadow s1(thr->racy_state[1]); @@ -602,8 +598,6 @@ void ReportRace(ThreadState *thr) { return; } - ThreadRegistryLock l0(ctx->thread_registry); - ReportType typ = ReportTypeRace; if (thr->is_vptr_access && freed) typ = ReportTypeVptrUseAfterFree; @@ -611,29 +605,35 @@ void ReportRace(ThreadState *thr) { typ = ReportTypeVptrRace; else if (freed) typ = ReportTypeUseAfterFree; - ScopedReport rep(typ); - if (IsFiredSuppression(ctx, rep, addr)) + + if (IsFiredSuppression(ctx, typ, addr)) return; + const uptr kMop = 2; VarSizeStackTrace traces[kMop]; const uptr toppc = TraceTopPC(thr); ObtainCurrentStack(thr, toppc, &traces[0]); - if (IsFiredSuppression(ctx, rep, traces[0])) + if (IsFiredSuppression(ctx, typ, traces[0])) return; - InternalScopedBuffer<MutexSet> mset2(1); - new(mset2.data()) MutexSet(); + + // MutexSet is too large to live on stack. + Vector<u64> mset_buffer(MBlockScopedBuf); + mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); + MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); + Shadow s2(thr->racy_state[1]); - RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2.data()); - if (IsFiredSuppression(ctx, rep, traces[1])) + RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2); + if (IsFiredSuppression(ctx, typ, traces[1])) return; if (HandleRacyStacks(thr, traces, addr_min, addr_max)) return; + ThreadRegistryLock l0(ctx->thread_registry); + ScopedReport rep(typ); for (uptr i = 0; i < kMop; i++) { Shadow s(thr->racy_state[i]); - rep.AddMemoryAccess(addr, s, traces[i], - i == 0 ? &thr->mset : mset2.data()); + rep.AddMemoryAccess(addr, s, traces[i], i == 0 ? &thr->mset : mset2); } for (uptr i = 0; i < kMop; i++) { @@ -647,7 +647,7 @@ void ReportRace(ThreadState *thr) { rep.AddLocation(addr_min, addr_max - addr_min); -#ifndef TSAN_GO +#ifndef SANITIZER_GO { // NOLINT Shadow s(thr->racy_state[1]); if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) @@ -668,7 +668,7 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) { } void PrintCurrentStackSlow(uptr pc) { -#ifndef TSAN_GO +#ifndef SANITIZER_GO BufferedStackTrace *ptrace = new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) BufferedStackTrace(); diff --git a/libsanitizer/tsan/tsan_rtl_thread.cc b/libsanitizer/tsan/tsan_rtl_thread.cc index d75445aa8bb..9d7e2d3028e 100644 --- a/libsanitizer/tsan/tsan_rtl_thread.cc +++ b/libsanitizer/tsan/tsan_rtl_thread.cc @@ -28,7 +28,7 @@ ThreadContext::ThreadContext(int tid) , epoch1() { } -#ifndef TSAN_GO +#ifndef SANITIZER_GO ThreadContext::~ThreadContext() { } #endif @@ -90,7 +90,7 @@ void ThreadContext::OnStarted(void *arg) { epoch1 = (u64)-1; new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count, args->stk_addr, args->stk_size, args->tls_addr, args->tls_size); -#ifndef TSAN_GO +#ifndef SANITIZER_GO thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0]; thr->shadow_stack_pos = thr->shadow_stack; thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize; @@ -102,7 +102,7 @@ void ThreadContext::OnStarted(void *arg) { thr->shadow_stack_pos = thr->shadow_stack; thr->shadow_stack_end = thr->shadow_stack + kInitStackSize; #endif -#ifndef TSAN_GO +#ifndef SANITIZER_GO AllocatorThreadStart(thr); #endif if (common_flags()->detect_deadlocks) { @@ -118,6 +118,7 @@ void ThreadContext::OnStarted(void *arg) { AcquireImpl(thr, 0, &sync); StatInc(thr, StatSyncAcquire); sync.Reset(&thr->clock_cache); + thr->is_inited = true; DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " "tls_addr=%zx tls_size=%zx\n", tid, (uptr)epoch0, args->stk_addr, args->stk_size, @@ -139,15 +140,17 @@ void ThreadContext::OnFinished() { } ctx->clock_alloc.FlushCache(&thr->clock_cache); ctx->metamap.OnThreadIdle(thr); -#ifndef TSAN_GO +#ifndef SANITIZER_GO AllocatorThreadFinish(thr); #endif thr->~ThreadState(); +#if TSAN_COLLECT_STATS StatAggregate(ctx->stat, thr->stat); +#endif thr = 0; } -#ifndef TSAN_GO +#ifndef SANITIZER_GO struct ThreadLeak { ThreadContext *tctx; int count; @@ -169,7 +172,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) { } #endif -#ifndef TSAN_GO +#ifndef SANITIZER_GO static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) { if (tctx->tid == 0) { Printf("ThreadSanitizer: main thread finished with ignores enabled\n"); @@ -201,7 +204,7 @@ static void ThreadCheckIgnore(ThreadState *thr) {} void ThreadFinalize(ThreadState *thr) { ThreadCheckIgnore(thr); -#ifndef TSAN_GO +#ifndef SANITIZER_GO if (!flags()->report_thread_leaks) return; ThreadRegistryLock l(ctx->thread_registry); @@ -237,6 +240,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) { uptr stk_size = 0; uptr tls_addr = 0; uptr tls_size = 0; +#ifndef SANITIZER_GO GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size); if (tid) { @@ -257,6 +261,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) { thr_end, tls_addr + tls_size - thr_end); } } +#endif ThreadRegistry *tr = ctx->thread_registry; OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size }; @@ -266,7 +271,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) { thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid); tr->Unlock(); -#ifndef TSAN_GO +#ifndef SANITIZER_GO if (ctx->after_multithreaded_fork) { thr->ignore_interceptors++; ThreadIgnoreBegin(thr, 0); @@ -328,7 +333,7 @@ void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, thr->tid, (void*)pc, (void*)addr, (int)size, is_write); -#if TSAN_DEBUG +#if SANITIZER_DEBUG if (!IsAppMem(addr)) { Printf("Access to non app mem %zx\n", addr); DCHECK(IsAppMem(addr)); diff --git a/libsanitizer/tsan/tsan_stat.cc b/libsanitizer/tsan/tsan_stat.cc index cdb48358551..5eccca69312 100644 --- a/libsanitizer/tsan/tsan_stat.cc +++ b/libsanitizer/tsan/tsan_stat.cc @@ -13,17 +13,14 @@ namespace __tsan { +#if TSAN_COLLECT_STATS + void StatAggregate(u64 *dst, u64 *src) { - if (!kCollectStats) - return; for (int i = 0; i < StatCnt; i++) dst[i] += src[i]; } void StatOutput(u64 *stat) { - if (!kCollectStats) - return; - stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero]; static const char *name[StatCnt] = {}; @@ -165,8 +162,9 @@ void StatOutput(u64 *stat) { name[StatMtxAtExit] = " Atexit "; name[StatMtxAnnotations] = " Annotations "; name[StatMtxMBlock] = " MBlock "; - name[StatMtxJavaMBlock] = " JavaMBlock "; name[StatMtxDeadlockDetector] = " DeadlockDetector "; + name[StatMtxFired] = " FiredSuppressions "; + name[StatMtxRacy] = " RacyStacks "; name[StatMtxFD] = " FD "; Printf("Statistics:\n"); @@ -174,4 +172,6 @@ void StatOutput(u64 *stat) { Printf("%s: %16zu\n", name[i], (uptr)stat[i]); } +#endif + } // namespace __tsan diff --git a/libsanitizer/tsan/tsan_stat.h b/libsanitizer/tsan/tsan_stat.h index 132656f035f..002570f42f1 100644 --- a/libsanitizer/tsan/tsan_stat.h +++ b/libsanitizer/tsan/tsan_stat.h @@ -167,8 +167,9 @@ enum StatType { StatMtxAnnotations, StatMtxAtExit, StatMtxMBlock, - StatMtxJavaMBlock, StatMtxDeadlockDetector, + StatMtxFired, + StatMtxRacy, StatMtxFD, // This must be the last. diff --git a/libsanitizer/tsan/tsan_suppressions.cc b/libsanitizer/tsan/tsan_suppressions.cc index 76460d90d92..5c8d03dddd2 100644 --- a/libsanitizer/tsan/tsan_suppressions.cc +++ b/libsanitizer/tsan/tsan_suppressions.cc @@ -19,6 +19,7 @@ #include "tsan_mman.h" #include "tsan_platform.h" +#ifndef SANITIZER_GO // Suppressions for true/false positives in standard libraries. static const char *const std_suppressions = // Libstdc++ 4.4 has data races in std::string. @@ -31,7 +32,6 @@ static const char *const std_suppressions = "race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n"; // Can be overriden in frontend. -#ifndef TSAN_GO extern "C" const char *WEAK __tsan_default_suppressions() { return 0; } @@ -39,84 +39,105 @@ extern "C" const char *WEAK __tsan_default_suppressions() { namespace __tsan { -static bool suppressions_inited = false; +ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +static SuppressionContext *suppression_ctx = nullptr; +static const char *kSuppressionTypes[] = { + kSuppressionRace, kSuppressionRaceTop, kSuppressionMutex, + kSuppressionThread, kSuppressionSignal, kSuppressionLib, + kSuppressionDeadlock}; void InitializeSuppressions() { - CHECK(!suppressions_inited); - SuppressionContext::InitIfNecessary(); -#ifndef TSAN_GO - SuppressionContext::Get()->Parse(__tsan_default_suppressions()); - SuppressionContext::Get()->Parse(std_suppressions); + CHECK_EQ(nullptr, suppression_ctx); + suppression_ctx = new (suppression_placeholder) // NOLINT + SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); + suppression_ctx->ParseFromFile(flags()->suppressions); +#ifndef SANITIZER_GO + suppression_ctx->Parse(__tsan_default_suppressions()); + suppression_ctx->Parse(std_suppressions); #endif - suppressions_inited = true; } -SuppressionType conv(ReportType typ) { +SuppressionContext *Suppressions() { + CHECK(suppression_ctx); + return suppression_ctx; +} + +static const char *conv(ReportType typ) { if (typ == ReportTypeRace) - return SuppressionRace; + return kSuppressionRace; else if (typ == ReportTypeVptrRace) - return SuppressionRace; + return kSuppressionRace; else if (typ == ReportTypeUseAfterFree) - return SuppressionRace; + return kSuppressionRace; else if (typ == ReportTypeVptrUseAfterFree) - return SuppressionRace; + return kSuppressionRace; else if (typ == ReportTypeThreadLeak) - return SuppressionThread; + return kSuppressionThread; else if (typ == ReportTypeMutexDestroyLocked) - return SuppressionMutex; + return kSuppressionMutex; else if (typ == ReportTypeMutexDoubleLock) - return SuppressionMutex; + return kSuppressionMutex; else if (typ == ReportTypeMutexBadUnlock) - return SuppressionMutex; + return kSuppressionMutex; else if (typ == ReportTypeMutexBadReadLock) - return SuppressionMutex; + return kSuppressionMutex; else if (typ == ReportTypeMutexBadReadUnlock) - return SuppressionMutex; + return kSuppressionMutex; else if (typ == ReportTypeSignalUnsafe) - return SuppressionSignal; + return kSuppressionSignal; else if (typ == ReportTypeErrnoInSignal) - return SuppressionNone; + return kSuppressionNone; else if (typ == ReportTypeDeadlock) - return SuppressionDeadlock; + return kSuppressionDeadlock; Printf("ThreadSanitizer: unknown report type %d\n", typ), Die(); } +static uptr IsSuppressed(const char *stype, const AddressInfo &info, + Suppression **sp) { + if (suppression_ctx->Match(info.function, stype, sp) || + suppression_ctx->Match(info.file, stype, sp) || + suppression_ctx->Match(info.module, stype, sp)) { + VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ); + atomic_fetch_add(&(*sp)->hit_count, 1, memory_order_relaxed); + return info.address; + } + return 0; +} + uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) { - if (!SuppressionContext::Get()->SuppressionCount() || stack == 0 || + CHECK(suppression_ctx); + if (!suppression_ctx->SuppressionCount() || stack == 0 || !stack->suppressable) return 0; - SuppressionType stype = conv(typ); - if (stype == SuppressionNone) + const char *stype = conv(typ); + if (0 == internal_strcmp(stype, kSuppressionNone)) return 0; - Suppression *s; - for (const ReportStack *frame = stack; frame; frame = frame->next) { - const AddressInfo &info = frame->info; - if (SuppressionContext::Get()->Match(info.function, stype, &s) || - SuppressionContext::Get()->Match(info.file, stype, &s) || - SuppressionContext::Get()->Match(info.module, stype, &s)) { - DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ); - s->hit_count++; - *sp = s; - return info.address; - } + for (const SymbolizedStack *frame = stack->frames; frame; + frame = frame->next) { + uptr pc = IsSuppressed(stype, frame->info, sp); + if (pc != 0) + return pc; } + if (0 == internal_strcmp(stype, kSuppressionRace) && stack->frames != nullptr) + return IsSuppressed(kSuppressionRaceTop, stack->frames->info, sp); return 0; } uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) { - if (!SuppressionContext::Get()->SuppressionCount() || loc == 0 || + CHECK(suppression_ctx); + if (!suppression_ctx->SuppressionCount() || loc == 0 || loc->type != ReportLocationGlobal || !loc->suppressable) return 0; - SuppressionType stype = conv(typ); - if (stype == SuppressionNone) + const char *stype = conv(typ); + if (0 == internal_strcmp(stype, kSuppressionNone)) return 0; Suppression *s; const DataInfo &global = loc->global; - if (SuppressionContext::Get()->Match(global.name, stype, &s) || - SuppressionContext::Get()->Match(global.module, stype, &s)) { - DPrintf("ThreadSanitizer: matched suppression '%s'\n", s->templ); - s->hit_count++; + if (suppression_ctx->Match(global.name, stype, &s) || + suppression_ctx->Match(global.module, stype, &s)) { + VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", s->templ); + atomic_fetch_add(&s->hit_count, 1, memory_order_relaxed); *sp = s; return global.start; } @@ -125,17 +146,18 @@ uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) { void PrintMatchedSuppressions() { InternalMmapVector<Suppression *> matched(1); - SuppressionContext::Get()->GetMatched(&matched); + CHECK(suppression_ctx); + suppression_ctx->GetMatched(&matched); if (!matched.size()) return; int hit_count = 0; for (uptr i = 0; i < matched.size(); i++) - hit_count += matched[i]->hit_count; + hit_count += atomic_load_relaxed(&matched[i]->hit_count); Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count, (int)internal_getpid()); for (uptr i = 0; i < matched.size(); i++) { - Printf("%d %s:%s\n", matched[i]->hit_count, - SuppressionTypeString(matched[i]->type), matched[i]->templ); + Printf("%d %s:%s\n", matched[i]->hit_count, matched[i]->type, + matched[i]->templ); } } } // namespace __tsan diff --git a/libsanitizer/tsan/tsan_suppressions.h b/libsanitizer/tsan/tsan_suppressions.h index e38d81ece85..58951975075 100644 --- a/libsanitizer/tsan/tsan_suppressions.h +++ b/libsanitizer/tsan/tsan_suppressions.h @@ -16,7 +16,17 @@ namespace __tsan { +const char kSuppressionNone[] = "none"; +const char kSuppressionRace[] = "race"; +const char kSuppressionRaceTop[] = "race_top"; +const char kSuppressionMutex[] = "mutex"; +const char kSuppressionThread[] = "thread"; +const char kSuppressionSignal[] = "signal"; +const char kSuppressionLib[] = "called_from_lib"; +const char kSuppressionDeadlock[] = "deadlock"; + void InitializeSuppressions(); +SuppressionContext *Suppressions(); void PrintMatchedSuppressions(); uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp); uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp); diff --git a/libsanitizer/tsan/tsan_symbolize.cc b/libsanitizer/tsan/tsan_symbolize.cc index 795f838991b..0e54562e385 100644 --- a/libsanitizer/tsan/tsan_symbolize.cc +++ b/libsanitizer/tsan/tsan_symbolize.cc @@ -34,26 +34,16 @@ void ExitSymbolizer() { thr->ignore_interceptors--; } -// Denotes fake PC values that come from JIT/JAVA/etc. -// For such PC values __tsan_symbolize_external() will be called. -const uptr kExternalPCBit = 1ULL << 60; - // May be overriden by JIT/JAVA/etc, // whatever produces PCs marked with kExternalPCBit. -extern "C" bool __tsan_symbolize_external(uptr pc, - char *func_buf, uptr func_siz, - char *file_buf, uptr file_siz, - int *line, int *col) - SANITIZER_WEAK_ATTRIBUTE; - -bool __tsan_symbolize_external(uptr pc, - char *func_buf, uptr func_siz, - char *file_buf, uptr file_siz, - int *line, int *col) { +extern "C" bool WEAK __tsan_symbolize_external(uptr pc, + char *func_buf, uptr func_siz, + char *file_buf, uptr file_siz, + int *line, int *col) { return false; } -ReportStack *SymbolizeCode(uptr addr) { +SymbolizedStack *SymbolizeCode(uptr addr) { // Check if PC comes from non-native land. if (addr & kExternalPCBit) { // Declare static to not consume too much stack space. @@ -61,36 +51,17 @@ ReportStack *SymbolizeCode(uptr addr) { static char func_buf[1024]; static char file_buf[1024]; int line, col; - ReportStack *ent = ReportStack::New(addr); - if (!__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), - file_buf, sizeof(file_buf), &line, &col)) - return ent; - ent->info.function = internal_strdup(func_buf); - ent->info.file = internal_strdup(file_buf); - ent->info.line = line; - ent->info.column = col; - return ent; - } - static const uptr kMaxAddrFrames = 16; - InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames); - for (uptr i = 0; i < kMaxAddrFrames; i++) - new(&addr_frames[i]) AddressInfo(); - uptr addr_frames_num = Symbolizer::GetOrInit()->SymbolizePC( - addr, addr_frames.data(), kMaxAddrFrames); - if (addr_frames_num == 0) - return ReportStack::New(addr); - ReportStack *top = 0; - ReportStack *bottom = 0; - for (uptr i = 0; i < addr_frames_num; i++) { - ReportStack *cur_entry = ReportStack::New(addr); - cur_entry->info = addr_frames[i]; - if (i == 0) - top = cur_entry; - else - bottom->next = cur_entry; - bottom = cur_entry; + SymbolizedStack *frame = SymbolizedStack::New(addr); + if (__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), file_buf, + sizeof(file_buf), &line, &col)) { + frame->info.function = internal_strdup(func_buf); + frame->info.file = internal_strdup(file_buf); + frame->info.line = line; + frame->info.column = col; + } + return frame; } - return top; + return Symbolizer::GetOrInit()->SymbolizePC(addr); } ReportLocation *SymbolizeData(uptr addr) { diff --git a/libsanitizer/tsan/tsan_symbolize.h b/libsanitizer/tsan/tsan_symbolize.h index 0d9077ed379..a859f6318b0 100644 --- a/libsanitizer/tsan/tsan_symbolize.h +++ b/libsanitizer/tsan/tsan_symbolize.h @@ -18,7 +18,7 @@ namespace __tsan { void EnterSymbolizer(); void ExitSymbolizer(); -ReportStack *SymbolizeCode(uptr addr); +SymbolizedStack *SymbolizeCode(uptr addr); ReportLocation *SymbolizeData(uptr addr); void SymbolizeFlush(); diff --git a/libsanitizer/tsan/tsan_sync.cc b/libsanitizer/tsan/tsan_sync.cc index 2209199ac48..91ad8c8b228 100644 --- a/libsanitizer/tsan/tsan_sync.cc +++ b/libsanitizer/tsan/tsan_sync.cc @@ -78,17 +78,21 @@ uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) { return sz; } -void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { +bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { + bool has_something = false; u32 *meta = MemToMeta(p); u32 *end = MemToMeta(p + sz); if (end == meta) end++; for (; meta < end; meta++) { u32 idx = *meta; + if (idx == 0) { + // Note: don't write to meta in this case -- the block can be huge. + continue; + } *meta = 0; - for (;;) { - if (idx == 0) - break; + has_something = true; + while (idx != 0) { if (idx & kFlagBlock) { block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask); break; @@ -104,6 +108,64 @@ void MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { } } } + return has_something; +} + +// ResetRange removes all meta objects from the range. +// It is called for large mmap-ed regions. The function is best-effort wrt +// freeing of meta objects, because we don't want to page in the whole range +// which can be huge. The function probes pages one-by-one until it finds a page +// without meta objects, at this point it stops freeing meta objects. Because +// thread stacks grow top-down, we do the same starting from end as well. +void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) { + const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize; + const uptr kPageSize = GetPageSizeCached() * kMetaRatio; + if (sz <= 4 * kPageSize) { + // If the range is small, just do the normal free procedure. + FreeRange(thr, pc, p, sz); + return; + } + // First, round both ends of the range to page size. + uptr diff = RoundUp(p, kPageSize) - p; + if (diff != 0) { + FreeRange(thr, pc, p, diff); + p += diff; + sz -= diff; + } + diff = p + sz - RoundDown(p + sz, kPageSize); + if (diff != 0) { + FreeRange(thr, pc, p + sz - diff, diff); + sz -= diff; + } + // Now we must have a non-empty page-aligned range. + CHECK_GT(sz, 0); + CHECK_EQ(p, RoundUp(p, kPageSize)); + CHECK_EQ(sz, RoundUp(sz, kPageSize)); + const uptr p0 = p; + const uptr sz0 = sz; + // Probe start of the range. + while (sz > 0) { + bool has_something = FreeRange(thr, pc, p, kPageSize); + p += kPageSize; + sz -= kPageSize; + if (!has_something) + break; + } + // Probe end of the range. + while (sz > 0) { + bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize); + sz -= kPageSize; + if (!has_something) + break; + } + // Finally, page out the whole range (including the parts that we've just + // freed). Note: we can't simply madvise, because we need to leave a zeroed + // range (otherwise __tsan_java_move can crash if it encounters a left-over + // meta objects in java heap). + uptr metap = (uptr)MemToMeta(p0); + uptr metasz = sz0 / kMetaRatio; + UnmapOrDie((void*)metap, metasz); + MmapFixedNoReserve(metap, metasz); } MBlock* MetaMap::GetBlock(uptr p) { diff --git a/libsanitizer/tsan/tsan_sync.h b/libsanitizer/tsan/tsan_sync.h index 6ed3715ee0a..50bc872275d 100644 --- a/libsanitizer/tsan/tsan_sync.h +++ b/libsanitizer/tsan/tsan_sync.h @@ -71,7 +71,8 @@ class MetaMap { void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz); uptr FreeBlock(ThreadState *thr, uptr pc, uptr p); - void FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz); + bool FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz); + void ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz); MBlock* GetBlock(uptr p); SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc, @@ -83,9 +84,9 @@ class MetaMap { void OnThreadIdle(ThreadState *thr); private: - static const u32 kFlagMask = 3 << 30; - static const u32 kFlagBlock = 1 << 30; - static const u32 kFlagSync = 2 << 30; + static const u32 kFlagMask = 3u << 30; + static const u32 kFlagBlock = 1u << 30; + static const u32 kFlagSync = 2u << 30; typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc; typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc; BlockAlloc block_alloc_; diff --git a/libsanitizer/tsan/tsan_trace.h b/libsanitizer/tsan/tsan_trace.h index 8eceb634a53..a27efa9fd7a 100644 --- a/libsanitizer/tsan/tsan_trace.h +++ b/libsanitizer/tsan/tsan_trace.h @@ -18,9 +18,9 @@ namespace __tsan { -const int kTracePartSizeBits = 14; +const int kTracePartSizeBits = 13; const int kTracePartSize = 1 << kTracePartSizeBits; -const int kTraceParts = 4 * 1024 * 1024 / kTracePartSize; +const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize; const int kTraceSize = kTracePartSize * kTraceParts; // Must fit into 3 bits. @@ -40,7 +40,7 @@ enum EventType { typedef u64 Event; struct TraceHeader { -#ifndef TSAN_GO +#ifndef SANITIZER_GO BufferedStackTrace stack0; // Start stack for the trace. #else VarSizeStackTrace stack0; @@ -52,13 +52,15 @@ struct TraceHeader { }; struct Trace { - TraceHeader headers[kTraceParts]; Mutex mtx; -#ifndef TSAN_GO +#ifndef SANITIZER_GO // Must be last to catch overflow as paging fault. // Go shadow stack is dynamically allocated. uptr shadow_stack[kShadowStackSize]; #endif + // Must be the last field, because we unmap the unused part in + // CreateThreadContext. + TraceHeader headers[kTraceParts]; Trace() : mtx(MutexTypeTrace, StatMtxTrace) { diff --git a/libsanitizer/tsan/tsan_update_shadow_word_inl.h b/libsanitizer/tsan/tsan_update_shadow_word_inl.h index 91def7bb1e8..2ea74283818 100644 --- a/libsanitizer/tsan/tsan_update_shadow_word_inl.h +++ b/libsanitizer/tsan/tsan_update_shadow_word_inl.h @@ -36,7 +36,8 @@ do { } StatInc(thr, StatShadowAnotherThread); if (HappensBefore(old, thr)) { - StoreIfNotYetStored(sp, &store_word); + if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) + StoreIfNotYetStored(sp, &store_word); break; } if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)) diff --git a/libsanitizer/ubsan/Makefile.am b/libsanitizer/ubsan/Makefile.am index 12d1ffac8f5..29b4115389f 100644 --- a/libsanitizer/ubsan/Makefile.am +++ b/libsanitizer/ubsan/Makefile.am @@ -3,7 +3,7 @@ AM_CPPFLAGS = -I $(top_srcdir) -I $(top_srcdir)/include # May be used by toolexeclibdir. gcc_version := $(shell cat $(top_srcdir)/../gcc/BASE-VER) -DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC +DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC -DCAN_SANITIZE_UB=1 AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic -Wno-long-long -fPIC -fno-builtin -fno-exceptions -fno-rtti -fomit-frame-pointer -funwind-tables -fvisibility=hidden -Wno-variadic-macros AM_CXXFLAGS += $(LIBSTDCXX_RAW_CXX_CXXFLAGS) AM_CXXFLAGS += -std=gnu++11 @@ -11,15 +11,21 @@ ACLOCAL_AMFLAGS = -I m4 toolexeclib_LTLIBRARIES = libubsan.la -ubsan_files = \ +ubsan_plugin_files = \ ubsan_diag.cc \ ubsan_flags.cc \ ubsan_handlers.cc \ ubsan_handlers_cxx.cc \ ubsan_init.cc \ ubsan_type_hash.cc \ + ubsan_type_hash_itanium.cc \ + ubsan_type_hash_win.cc \ ubsan_value.cc +ubsan_files = \ + $(ubsan_plugin_files) \ + ubsan_init_standalone.cc + libubsan_la_SOURCES = $(ubsan_files) libubsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la if !USING_MAC_INTERPOSE @@ -29,10 +35,13 @@ if LIBBACKTRACE_SUPPORTED libubsan_la_LIBADD += $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la endif libubsan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS) +if USE_CXX_ABI_FLAG +libubsan_la_LIBADD += -lc++abi +endif libubsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libubsan) # Use special rules for files that require RTTI support. -ubsan_handlers_cxx.% ubsan_type_hash.% : AM_CXXFLAGS += -frtti +ubsan_handlers_cxx.% ubsan_type_hash.% ubsan_type_hash_itanium.% : AM_CXXFLAGS += -frtti # Work around what appears to be a GNU make bug handling MAKEFLAGS # values defined in terms of make variables, as is the case for CC and diff --git a/libsanitizer/ubsan/Makefile.in b/libsanitizer/ubsan/Makefile.in index 64aad19a59f..3b5cfbf3778 100644 --- a/libsanitizer/ubsan/Makefile.in +++ b/libsanitizer/ubsan/Makefile.in @@ -54,6 +54,7 @@ host_triplet = @host@ target_triplet = @target@ @USING_MAC_INTERPOSE_FALSE@am__append_1 = $(top_builddir)/interception/libinterception.la @LIBBACKTRACE_SUPPORTED_TRUE@am__append_2 = $(top_builddir)/libbacktrace/libsanitizer_libbacktrace.la +@USE_CXX_ABI_FLAG_TRUE@am__append_3 = -lc++abi subdir = ubsan DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 @@ -106,11 +107,14 @@ LTLIBRARIES = $(toolexeclib_LTLIBRARIES) am__DEPENDENCIES_1 = libubsan_la_DEPENDENCIES = \ $(top_builddir)/sanitizer_common/libsanitizer_common.la \ - $(am__append_1) $(am__append_2) $(am__DEPENDENCIES_1) + $(am__append_1) $(am__append_2) $(am__DEPENDENCIES_1) \ + $(am__DEPENDENCIES_1) am__objects_1 = ubsan_diag.lo ubsan_flags.lo ubsan_handlers.lo \ ubsan_handlers_cxx.lo ubsan_init.lo ubsan_type_hash.lo \ + ubsan_type_hash_itanium.lo ubsan_type_hash_win.lo \ ubsan_value.lo -am_libubsan_la_OBJECTS = $(am__objects_1) +am__objects_2 = $(am__objects_1) ubsan_init_standalone.lo +am_libubsan_la_OBJECTS = $(am__objects_2) libubsan_la_OBJECTS = $(am_libubsan_la_OBJECTS) libubsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \ $(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \ @@ -160,7 +164,7 @@ CXXCPP = @CXXCPP@ CXXDEPMODE = @CXXDEPMODE@ CXXFLAGS = @CXXFLAGS@ CYGPATH_W = @CYGPATH_W@ -DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC +DEFS = -D_GNU_SOURCE -D_DEBUG -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -DPIC -DCAN_SANITIZE_UB=1 DEPDIR = @DEPDIR@ DSYMUTIL = @DSYMUTIL@ DUMPBIN = @DUMPBIN@ @@ -290,19 +294,26 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \ -std=gnu++11 ACLOCAL_AMFLAGS = -I m4 toolexeclib_LTLIBRARIES = libubsan.la -ubsan_files = \ +ubsan_plugin_files = \ ubsan_diag.cc \ ubsan_flags.cc \ ubsan_handlers.cc \ ubsan_handlers_cxx.cc \ ubsan_init.cc \ ubsan_type_hash.cc \ + ubsan_type_hash_itanium.cc \ + ubsan_type_hash_win.cc \ ubsan_value.cc +ubsan_files = \ + $(ubsan_plugin_files) \ + ubsan_init_standalone.cc + libubsan_la_SOURCES = $(ubsan_files) libubsan_la_LIBADD = \ $(top_builddir)/sanitizer_common/libsanitizer_common.la \ - $(am__append_1) $(am__append_2) $(LIBSTDCXX_RAW_CXX_LDFLAGS) + $(am__append_1) $(am__append_2) $(LIBSTDCXX_RAW_CXX_LDFLAGS) \ + $(am__append_3) libubsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libubsan) # Work around what appears to be a GNU make bug handling MAKEFLAGS @@ -424,7 +435,10 @@ distclean-compile: @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_handlers_cxx.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_init_standalone.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash_itanium.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_type_hash_win.Plo@am__quote@ @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ubsan_value.Plo@am__quote@ .cc.o: @@ -631,7 +645,7 @@ uninstall-am: uninstall-toolexeclibLTLIBRARIES # Use special rules for files that require RTTI support. -ubsan_handlers_cxx.% ubsan_type_hash.% : AM_CXXFLAGS += -frtti +ubsan_handlers_cxx.% ubsan_type_hash.% ubsan_type_hash_itanium.% : AM_CXXFLAGS += -frtti # Tell versions [3.59,3.63) of GNU make to not export all variables. # Otherwise a system limit (for SysV at least) may be exceeded. diff --git a/libsanitizer/ubsan/ubsan_checks.inc b/libsanitizer/ubsan/ubsan_checks.inc new file mode 100644 index 00000000000..935d82c5079 --- /dev/null +++ b/libsanitizer/ubsan/ubsan_checks.inc @@ -0,0 +1,51 @@ +//===-- ubsan_checks.inc ----------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// List of checks handled by UBSan runtime. +// +//===----------------------------------------------------------------------===// +#ifndef UBSAN_CHECK +# error "Define UBSAN_CHECK prior to including this file!" +#endif + +// UBSAN_CHECK(Name, SummaryKind, FlagName) +// SummaryKind and FlagName should be string literals. + +UBSAN_CHECK(GenericUB, "undefined-behavior", "-fsanitize=undefined") +UBSAN_CHECK(NullPointerUse, "null-pointer-use", "-fsanitize=null") +UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use", + "-fsanitize=alignment") +UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size", + "-fsanitize=object-size") +UBSAN_CHECK(SignedIntegerOverflow, "signed-integer-overflow", + "-fsanitize=signed-integer-overflow") +UBSAN_CHECK(UnsignedIntegerOverflow, "unsigned-integer-overflow", + "-fsanitize=unsigned-integer-overflow") +UBSAN_CHECK(IntegerDivideByZero, "integer-divide-by-zero", + "-fsanitize=integer-divide-by-zero") +UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero", + "-fsanitize=float-divide-by-zero") +UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "-fsanitize=shift-base") +UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent", + "-fsanitize=shift-exponent") +UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "-fsanitize=bounds") +UBSAN_CHECK(UnreachableCall, "unreachable-call", "-fsanitize=unreachable") +UBSAN_CHECK(MissingReturn, "missing-return", "-fsanitize=return") +UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index", + "-fsanitize=vla-bound") +UBSAN_CHECK(FloatCastOverflow, "float-cast-overflow", + "-fsanitize=float-cast-overflow") +UBSAN_CHECK(InvalidBoolLoad, "invalid-bool-load", "-fsanitize=bool") +UBSAN_CHECK(InvalidEnumLoad, "invalid-enum-load", "-fsanitize=enum") +UBSAN_CHECK(FunctionTypeMismatch, "function-type-mismatch", + "-fsanitize=function") +UBSAN_CHECK(InvalidNullReturn, "invalid-null-return", + "-fsanitize=returns-nonnull-attribute") +UBSAN_CHECK(InvalidNullArgument, "invalid-null-argument", + "-fsanitize=nonnull-attribute") +UBSAN_CHECK(DynamicTypeMismatch, "dynamic-type-mismatch", "-fsanitize=vptr") +UBSAN_CHECK(CFIBadType, "cfi-bad-type", "-fsanitize=cfi") diff --git a/libsanitizer/ubsan/ubsan_diag.cc b/libsanitizer/ubsan/ubsan_diag.cc index 028ac1a9b97..1197f837f75 100644 --- a/libsanitizer/ubsan/ubsan_diag.cc +++ b/libsanitizer/ubsan/ubsan_diag.cc @@ -9,19 +9,23 @@ // //===----------------------------------------------------------------------===// +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB #include "ubsan_diag.h" #include "ubsan_init.h" #include "ubsan_flags.h" +#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_report_decorator.h" #include "sanitizer_common/sanitizer_stacktrace.h" #include "sanitizer_common/sanitizer_stacktrace_printer.h" +#include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include <stdio.h> using namespace __ubsan; static void MaybePrintStackTrace(uptr pc, uptr bp) { - // We assume that flags are already parsed: InitIfNecessary + // We assume that flags are already parsed, as UBSan runtime // will definitely be called when we print the first diagnostics message. if (!flags()->print_stacktrace) return; @@ -37,20 +41,41 @@ static void MaybePrintStackTrace(uptr pc, uptr bp) { stack.Print(); } -static void MaybeReportErrorSummary(Location Loc) { +static const char *ConvertTypeToString(ErrorType Type) { + switch (Type) { +#define UBSAN_CHECK(Name, SummaryKind, FlagName) \ + case ErrorType::Name: \ + return SummaryKind; +#include "ubsan_checks.inc" +#undef UBSAN_CHECK + } + UNREACHABLE("unknown ErrorType!"); +} + +static void MaybeReportErrorSummary(Location Loc, ErrorType Type) { if (!common_flags()->print_summary) return; - // Don't try to unwind the stack trace in UBSan summaries: just use the - // provided location. + if (!flags()->report_error_type) + Type = ErrorType::GenericUB; + const char *ErrorKind = ConvertTypeToString(Type); if (Loc.isSourceLocation()) { SourceLocation SLoc = Loc.getSourceLocation(); if (!SLoc.isInvalid()) { - ReportErrorSummary("undefined-behavior", SLoc.getFilename(), - SLoc.getLine(), ""); + AddressInfo AI; + AI.file = internal_strdup(SLoc.getFilename()); + AI.line = SLoc.getLine(); + AI.column = SLoc.getColumn(); + AI.function = internal_strdup(""); // Avoid printing ?? as function name. + ReportErrorSummary(ErrorKind, AI); + AI.Clear(); return; } + } else if (Loc.isSymbolizedStack()) { + const AddressInfo &AI = Loc.getSymbolizedStack()->info; + ReportErrorSummary(ErrorKind, AI); + return; } - ReportErrorSummary("undefined-behavior"); + ReportErrorSummary(ErrorKind); } namespace { @@ -64,31 +89,9 @@ class Decorator : public SanitizerCommonDecorator { }; } -Location __ubsan::getCallerLocation(uptr CallerLoc) { - if (!CallerLoc) - return Location(); - - uptr Loc = StackTrace::GetPreviousInstructionPc(CallerLoc); - return getFunctionLocation(Loc, 0); -} - -Location __ubsan::getFunctionLocation(uptr Loc, const char **FName) { - if (!Loc) - return Location(); - InitIfNecessary(); - - AddressInfo Info; - if (!Symbolizer::GetOrInit()->SymbolizePC(Loc, &Info, 1) || !Info.module || - !*Info.module) - return Location(Loc); - - if (FName && Info.function) - *FName = Info.function; - - if (!Info.file) - return ModuleLocation(Info.module, Info.module_offset); - - return SourceLocation(Info.file, Info.line, Info.column); +SymbolizedStack *__ubsan::getSymbolizedLocation(uptr PC) { + InitAsStandaloneIfNecessary(); + return Symbolizer::GetOrInit()->SymbolizePC(PC); } Diag &Diag::operator<<(const TypeDescriptor &V) { @@ -129,18 +132,27 @@ static void renderLocation(Location Loc) { LocBuffer.append("<unknown>"); else RenderSourceLocation(&LocBuffer, SLoc.getFilename(), SLoc.getLine(), - SLoc.getColumn(), common_flags()->strip_path_prefix); - break; - } - case Location::LK_Module: { - ModuleLocation MLoc = Loc.getModuleLocation(); - RenderModuleLocation(&LocBuffer, MLoc.getModuleName(), MLoc.getOffset(), - common_flags()->strip_path_prefix); + SLoc.getColumn(), common_flags()->symbolize_vs_style, + common_flags()->strip_path_prefix); break; } case Location::LK_Memory: LocBuffer.append("%p", Loc.getMemoryLocation()); break; + case Location::LK_Symbolized: { + const AddressInfo &Info = Loc.getSymbolizedStack()->info; + if (Info.file) { + RenderSourceLocation(&LocBuffer, Info.file, Info.line, Info.column, + common_flags()->symbolize_vs_style, + common_flags()->strip_path_prefix); + } else if (Info.module) { + RenderModuleLocation(&LocBuffer, Info.module, Info.module_offset, + common_flags()->strip_path_prefix); + } else { + LocBuffer.append("%p", Info.address); + } + break; + } case Location::LK_Null: LocBuffer.append("<unknown>"); break; @@ -164,8 +176,12 @@ static void renderText(const char *Message, const Diag::Arg *Args) { case Diag::AK_String: Printf("%s", A.String); break; - case Diag::AK_Mangled: { - Printf("'%s'", Symbolizer::GetOrInit()->Demangle(A.String)); + case Diag::AK_TypeName: { + if (SANITIZER_WINDOWS) + // The Windows implementation demangles names early. + Printf("'%s'", A.String); + else + Printf("'%s'", Symbolizer::GetOrInit()->Demangle(A.String)); break; } case Diag::AK_SInt: @@ -185,7 +201,11 @@ static void renderText(const char *Message, const Diag::Arg *Args) { // FIXME: Support floating-point formatting in sanitizer_common's // printf, and stop using snprintf here. char Buffer[32]; +#if SANITIZER_WINDOWS + sprintf_s(Buffer, sizeof(Buffer), "%Lg", (long double)A.Float); +#else snprintf(Buffer, sizeof(Buffer), "%Lg", (long double)A.Float); +#endif Printf("%s", Buffer); break; } @@ -332,25 +352,38 @@ Diag::~Diag() { NumRanges, Args); } -ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc) - : Opts(Opts), SummaryLoc(SummaryLoc) { - InitIfNecessary(); +ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc, + ErrorType Type) + : Opts(Opts), SummaryLoc(SummaryLoc), Type(Type) { + InitAsStandaloneIfNecessary(); CommonSanitizerReportMutex.Lock(); } ScopedReport::~ScopedReport() { MaybePrintStackTrace(Opts.pc, Opts.bp); - MaybeReportErrorSummary(SummaryLoc); + MaybeReportErrorSummary(SummaryLoc, Type); CommonSanitizerReportMutex.Unlock(); if (Opts.DieAfterReport || flags()->halt_on_error) Die(); } -bool __ubsan::MatchSuppression(const char *Str, SuppressionType Type) { +ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; +static SuppressionContext *suppression_ctx = nullptr; +static const char kVptrCheck[] = "vptr_check"; +static const char *kSuppressionTypes[] = { kVptrCheck }; + +void __ubsan::InitializeSuppressions() { + CHECK_EQ(nullptr, suppression_ctx); + suppression_ctx = new (suppression_placeholder) // NOLINT + SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); + suppression_ctx->ParseFromFile(flags()->suppressions); +} + +bool __ubsan::IsVptrCheckSuppressed(const char *TypeName) { + InitAsStandaloneIfNecessary(); + CHECK(suppression_ctx); Suppression *s; - // If .preinit_array is not used, it is possible that the UBSan runtime is not - // initialized. - if (!SANITIZER_CAN_USE_PREINIT_ARRAY) - InitIfNecessary(); - return SuppressionContext::Get()->Match(Str, Type, &s); + return suppression_ctx->Match(TypeName, kVptrCheck, &s); } + +#endif // CAN_SANITIZE_UB diff --git a/libsanitizer/ubsan/ubsan_diag.h b/libsanitizer/ubsan/ubsan_diag.h index fbb3f1ff9d4..7103d522953 100644 --- a/libsanitizer/ubsan/ubsan_diag.h +++ b/libsanitizer/ubsan/ubsan_diag.h @@ -13,79 +13,84 @@ #include "ubsan_value.h" #include "sanitizer_common/sanitizer_stacktrace.h" -#include "sanitizer_common/sanitizer_suppressions.h" +#include "sanitizer_common/sanitizer_symbolizer.h" namespace __ubsan { -/// \brief A location within a loaded module in the program. These are used when -/// the location can't be resolved to a SourceLocation. -class ModuleLocation { - const char *ModuleName; - uptr Offset; +class SymbolizedStackHolder { + SymbolizedStack *Stack; + + void clear() { + if (Stack) + Stack->ClearAll(); + } public: - ModuleLocation() : ModuleName(0), Offset(0) {} - ModuleLocation(const char *ModuleName, uptr Offset) - : ModuleName(ModuleName), Offset(Offset) {} - const char *getModuleName() const { return ModuleName; } - uptr getOffset() const { return Offset; } + explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr) + : Stack(Stack) {} + ~SymbolizedStackHolder() { clear(); } + void reset(SymbolizedStack *S) { + if (Stack != S) + clear(); + Stack = S; + } + const SymbolizedStack *get() const { return Stack; } }; +SymbolizedStack *getSymbolizedLocation(uptr PC); + +inline SymbolizedStack *getCallerLocation(uptr CallerPC) { + CHECK(CallerPC); + uptr PC = StackTrace::GetPreviousInstructionPc(CallerPC); + return getSymbolizedLocation(PC); +} + /// A location of some data within the program's address space. typedef uptr MemoryLocation; /// \brief Location at which a diagnostic can be emitted. Either a -/// SourceLocation, a ModuleLocation, or a MemoryLocation. +/// SourceLocation, a MemoryLocation, or a SymbolizedStack. class Location { public: - enum LocationKind { LK_Null, LK_Source, LK_Module, LK_Memory }; + enum LocationKind { LK_Null, LK_Source, LK_Memory, LK_Symbolized }; private: LocationKind Kind; // FIXME: In C++11, wrap these in an anonymous union. SourceLocation SourceLoc; - ModuleLocation ModuleLoc; MemoryLocation MemoryLoc; + const SymbolizedStack *SymbolizedLoc; // Not owned. public: Location() : Kind(LK_Null) {} Location(SourceLocation Loc) : Kind(LK_Source), SourceLoc(Loc) {} - Location(ModuleLocation Loc) : - Kind(LK_Module), ModuleLoc(Loc) {} Location(MemoryLocation Loc) : Kind(LK_Memory), MemoryLoc(Loc) {} + // SymbolizedStackHolder must outlive Location object. + Location(const SymbolizedStackHolder &Stack) : + Kind(LK_Symbolized), SymbolizedLoc(Stack.get()) {} LocationKind getKind() const { return Kind; } bool isSourceLocation() const { return Kind == LK_Source; } - bool isModuleLocation() const { return Kind == LK_Module; } bool isMemoryLocation() const { return Kind == LK_Memory; } + bool isSymbolizedStack() const { return Kind == LK_Symbolized; } SourceLocation getSourceLocation() const { CHECK(isSourceLocation()); return SourceLoc; } - ModuleLocation getModuleLocation() const { - CHECK(isModuleLocation()); - return ModuleLoc; - } MemoryLocation getMemoryLocation() const { CHECK(isMemoryLocation()); return MemoryLoc; } + const SymbolizedStack *getSymbolizedStack() const { + CHECK(isSymbolizedStack()); + return SymbolizedLoc; + } }; -/// Try to obtain a location for the caller. This might fail, and produce either -/// an invalid location or a module location for the caller. -Location getCallerLocation(uptr CallerLoc = GET_CALLER_PC()); - -/// Try to obtain a location for the given function pointer. This might fail, -/// and produce either an invalid location or a module location for the caller. -/// If FName is non-null and the name of the function is known, set *FName to -/// the function name, otherwise *FName is unchanged. -Location getFunctionLocation(uptr Loc, const char **FName); - /// A diagnostic severity level. enum DiagLevel { DL_Error, ///< An error. @@ -106,11 +111,11 @@ public: const char *getText() const { return Text; } }; -/// \brief A mangled C++ name. Really just a strong typedef for 'const char*'. -class MangledName { +/// \brief A C++ type name. Really just a strong typedef for 'const char*'. +class TypeName { const char *Name; public: - MangledName(const char *Name) : Name(Name) {} + TypeName(const char *Name) : Name(Name) {} const char *getName() const { return Name; } }; @@ -134,7 +139,7 @@ public: /// Kinds of arguments, corresponding to members of \c Arg's union. enum ArgKind { AK_String, ///< A string argument, displayed as-is. - AK_Mangled,///< A C++ mangled name, demangled before display. + AK_TypeName,///< A C++ type name, possibly demangled before display. AK_UInt, ///< An unsigned integer argument. AK_SInt, ///< A signed integer argument. AK_Float, ///< A floating-point argument. @@ -145,7 +150,7 @@ public: struct Arg { Arg() {} Arg(const char *String) : Kind(AK_String), String(String) {} - Arg(MangledName MN) : Kind(AK_Mangled), String(MN.getName()) {} + Arg(TypeName TN) : Kind(AK_TypeName), String(TN.getName()) {} Arg(UIntMax UInt) : Kind(AK_UInt), UInt(UInt) {} Arg(SIntMax SInt) : Kind(AK_SInt), SInt(SInt) {} Arg(FloatMax Float) : Kind(AK_Float), Float(Float) {} @@ -195,7 +200,7 @@ public: ~Diag(); Diag &operator<<(const char *Str) { return AddArg(Str); } - Diag &operator<<(MangledName MN) { return AddArg(MN); } + Diag &operator<<(TypeName TN) { return AddArg(TN); } Diag &operator<<(unsigned long long V) { return AddArg(UIntMax(V)); } Diag &operator<<(const void *V) { return AddArg(V); } Diag &operator<<(const TypeDescriptor &V); @@ -212,6 +217,12 @@ struct ReportOptions { uptr bp; }; +enum class ErrorType { +#define UBSAN_CHECK(Name, SummaryKind, FlagName) Name, +#include "ubsan_checks.inc" +#undef UBSAN_CHECK +}; + #define GET_REPORT_OPTIONS(die_after_report) \ GET_CALLER_PC_BP; \ ReportOptions Opts = {die_after_report, pc, bp} @@ -222,13 +233,17 @@ struct ReportOptions { class ScopedReport { ReportOptions Opts; Location SummaryLoc; + ErrorType Type; public: - ScopedReport(ReportOptions Opts, Location SummaryLoc); + ScopedReport(ReportOptions Opts, Location SummaryLoc, + ErrorType Type = ErrorType::GenericUB); + void setErrorType(ErrorType T) { Type = T; } ~ScopedReport(); }; -bool MatchSuppression(const char *Str, SuppressionType Type); +void InitializeSuppressions(); +bool IsVptrCheckSuppressed(const char *TypeName); } // namespace __ubsan diff --git a/libsanitizer/ubsan/ubsan_flags.cc b/libsanitizer/ubsan/ubsan_flags.cc index 9645a857260..4e7db7a6a57 100644 --- a/libsanitizer/ubsan/ubsan_flags.cc +++ b/libsanitizer/ubsan/ubsan_flags.cc @@ -9,53 +9,78 @@ // //===----------------------------------------------------------------------===// +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB #include "ubsan_flags.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" namespace __ubsan { -static const char *MaybeCallUbsanDefaultOptions() { +const char *MaybeCallUbsanDefaultOptions() { return (&__ubsan_default_options) ? __ubsan_default_options() : ""; } -void InitializeCommonFlags() { - CommonFlags *cf = common_flags(); - SetCommonFlagsDefaults(cf); - cf->print_summary = false; - // Override from user-specified string. - ParseCommonFlagsFromString(cf, MaybeCallUbsanDefaultOptions()); - // Override from environment variable. - ParseCommonFlagsFromString(cf, GetEnv("UBSAN_OPTIONS")); -} - Flags ubsan_flags; -static void ParseFlagsFromString(Flags *f, const char *str) { - if (!str) - return; - ParseFlag(str, &f->halt_on_error, "halt_on_error", - "Crash the program after printing the first error report"); - ParseFlag(str, &f->print_stacktrace, "print_stacktrace", - "Include full stacktrace into an error report"); +void Flags::SetDefaults() { +#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "ubsan_flags.inc" +#undef UBSAN_FLAG +} + +void RegisterUbsanFlags(FlagParser *parser, Flags *f) { +#define UBSAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "ubsan_flags.inc" +#undef UBSAN_FLAG } void InitializeFlags() { + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.print_summary = false; + OverrideCommonFlags(cf); + } + Flags *f = flags(); - // Default values. - f->halt_on_error = false; - f->print_stacktrace = false; + f->SetDefaults(); + + FlagParser parser; + RegisterCommonFlags(&parser); + RegisterUbsanFlags(&parser, f); + // Override from user-specified string. - ParseFlagsFromString(f, MaybeCallUbsanDefaultOptions()); + parser.ParseString(MaybeCallUbsanDefaultOptions()); // Override from environment variable. - ParseFlagsFromString(f, GetEnv("UBSAN_OPTIONS")); + parser.ParseString(GetEnv("UBSAN_OPTIONS")); + SetVerbosity(common_flags()->verbosity); + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) parser.PrintFlagDescriptions(); } } // namespace __ubsan -#if !SANITIZER_SUPPORTS_WEAK_HOOKS extern "C" { + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *__ubsan_default_options() { return ""; } -} // extern "C" #endif + +#if SANITIZER_WINDOWS +const char *__ubsan_default_default_options() { return ""; } +# ifdef _WIN64 +# pragma comment(linker, "/alternatename:__ubsan_default_options=__ubsan_default_default_options") +# else +# pragma comment(linker, "/alternatename:___ubsan_default_options=___ubsan_default_default_options") +# endif +#endif + +} // extern "C" + +#endif // CAN_SANITIZE_UB diff --git a/libsanitizer/ubsan/ubsan_flags.h b/libsanitizer/ubsan/ubsan_flags.h index 00be9b004f4..2604b6b00cc 100644 --- a/libsanitizer/ubsan/ubsan_flags.h +++ b/libsanitizer/ubsan/ubsan_flags.h @@ -13,18 +13,27 @@ #include "sanitizer_common/sanitizer_internal_defs.h" +namespace __sanitizer { +class FlagParser; +} + namespace __ubsan { struct Flags { - bool halt_on_error; - bool print_stacktrace; +#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "ubsan_flags.inc" +#undef UBSAN_FLAG + + void SetDefaults(); }; extern Flags ubsan_flags; inline Flags *flags() { return &ubsan_flags; } -void InitializeCommonFlags(); void InitializeFlags(); +void RegisterUbsanFlags(FlagParser *parser, Flags *f); + +const char *MaybeCallUbsanDefaultOptions(); } // namespace __ubsan diff --git a/libsanitizer/ubsan/ubsan_flags.inc b/libsanitizer/ubsan/ubsan_flags.inc new file mode 100644 index 00000000000..170777adf26 --- /dev/null +++ b/libsanitizer/ubsan/ubsan_flags.inc @@ -0,0 +1,24 @@ +//===-- ubsan_flags.inc -----------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// UBSan runtime flags. +// +//===----------------------------------------------------------------------===// +#ifndef UBSAN_FLAG +# error "Define UBSAN_FLAG prior to including this file!" +#endif + +// UBSAN_FLAG(Type, Name, DefaultValue, Description) +// See COMMON_FLAG in sanitizer_flags.inc for more details. + +UBSAN_FLAG(bool, halt_on_error, false, + "Crash the program after printing the first error report") +UBSAN_FLAG(bool, print_stacktrace, false, + "Include full stacktrace into an error report") +UBSAN_FLAG(const char *, suppressions, "", "Suppressions file name.") +UBSAN_FLAG(bool, report_error_type, false, + "Print specific error type instead of 'undefined-behavior' in summary.") diff --git a/libsanitizer/ubsan/ubsan_handlers.cc b/libsanitizer/ubsan/ubsan_handlers.cc index 770d3c28a4a..8530fcffc88 100644 --- a/libsanitizer/ubsan/ubsan_handlers.cc +++ b/libsanitizer/ubsan/ubsan_handlers.cc @@ -9,6 +9,8 @@ // //===----------------------------------------------------------------------===// +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB #include "ubsan_handlers.h" #include "ubsan_diag.h" @@ -35,29 +37,36 @@ const char *TypeCheckKinds[] = { } static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, - Location FallbackLoc, ReportOptions Opts) { + ReportOptions Opts) { Location Loc = Data->Loc.acquire(); // Use the SourceLocation from Data to track deduplication, even if 'invalid' if (ignoreReport(Loc.getSourceLocation(), Opts)) return; - if (Data->Loc.isInvalid()) + SymbolizedStackHolder FallbackLoc; + if (Data->Loc.isInvalid()) { + FallbackLoc.reset(getCallerLocation(Opts.pc)); Loc = FallbackLoc; + } ScopedReport R(Opts, Loc); - if (!Pointer) + if (!Pointer) { + R.setErrorType(ErrorType::NullPointerUse); Diag(Loc, DL_Error, "%0 null pointer of type %1") << TypeCheckKinds[Data->TypeCheckKind] << Data->Type; - else if (Data->Alignment && (Pointer & (Data->Alignment - 1))) + } else if (Data->Alignment && (Pointer & (Data->Alignment - 1))) { + R.setErrorType(ErrorType::MisalignedPointerUse); Diag(Loc, DL_Error, "%0 misaligned address %1 for type %3, " "which requires %2 byte alignment") << TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Alignment << Data->Type; - else + } else { + R.setErrorType(ErrorType::InsufficientObjectSize); Diag(Loc, DL_Error, "%0 address %1 with insufficient space " "for an object of type %2") << TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type; + } if (Pointer) Diag(Pointer, DL_Note, "pointer points here"); } @@ -65,12 +74,12 @@ static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer, void __ubsan::__ubsan_handle_type_mismatch(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(false); - handleTypeMismatchImpl(Data, Pointer, getCallerLocation(), Opts); + handleTypeMismatchImpl(Data, Pointer, Opts); } void __ubsan::__ubsan_handle_type_mismatch_abort(TypeMismatchData *Data, ValueHandle Pointer) { GET_REPORT_OPTIONS(true); - handleTypeMismatchImpl(Data, Pointer, getCallerLocation(), Opts); + handleTypeMismatchImpl(Data, Pointer, Opts); Die(); } @@ -83,11 +92,13 @@ static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS, if (ignoreReport(Loc, Opts)) return; - ScopedReport R(Opts, Loc); + bool IsSigned = Data->Type.isSignedIntegerTy(); + ScopedReport R(Opts, Loc, IsSigned ? ErrorType::SignedIntegerOverflow + : ErrorType::UnsignedIntegerOverflow); Diag(Loc, DL_Error, "%0 integer overflow: " "%1 %2 %3 cannot be represented in type %4") - << (Data->Type.isSignedIntegerTy() ? "signed" : "unsigned") + << (IsSigned ? "signed" : "unsigned") << Value(Data->Type, LHS) << Operator << RHS << Data->Type; } @@ -112,17 +123,18 @@ static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal, if (ignoreReport(Loc, Opts)) return; - ScopedReport R(Opts, Loc); + bool IsSigned = Data->Type.isSignedIntegerTy(); + ScopedReport R(Opts, Loc, IsSigned ? ErrorType::SignedIntegerOverflow + : ErrorType::UnsignedIntegerOverflow); - if (Data->Type.isSignedIntegerTy()) + if (IsSigned) Diag(Loc, DL_Error, "negation of %0 cannot be represented in type %1; " "cast to an unsigned type to negate this value to itself") - << Value(Data->Type, OldVal) << Data->Type; + << Value(Data->Type, OldVal) << Data->Type; else - Diag(Loc, DL_Error, - "negation of %0 cannot be represented in type %1") - << Value(Data->Type, OldVal) << Data->Type; + Diag(Loc, DL_Error, "negation of %0 cannot be represented in type %1") + << Value(Data->Type, OldVal) << Data->Type; } void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data, @@ -147,12 +159,16 @@ static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS, Value LHSVal(Data->Type, LHS); Value RHSVal(Data->Type, RHS); - if (RHSVal.isMinusOne()) + if (RHSVal.isMinusOne()) { + R.setErrorType(ErrorType::SignedIntegerOverflow); Diag(Loc, DL_Error, "division of %0 by -1 cannot be represented in type %1") << LHSVal << Data->Type; - else + } else { + R.setErrorType(Data->Type.isIntegerTy() ? ErrorType::IntegerDivideByZero + : ErrorType::FloatDivideByZero); Diag(Loc, DL_Error, "division by zero"); + } } void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data, @@ -179,18 +195,23 @@ static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data, Value LHSVal(Data->LHSType, LHS); Value RHSVal(Data->RHSType, RHS); - if (RHSVal.isNegative()) + if (RHSVal.isNegative()) { + R.setErrorType(ErrorType::InvalidShiftExponent); Diag(Loc, DL_Error, "shift exponent %0 is negative") << RHSVal; - else if (RHSVal.getPositiveIntValue() >= Data->LHSType.getIntegerBitWidth()) - Diag(Loc, DL_Error, - "shift exponent %0 is too large for %1-bit type %2") - << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType; - else if (LHSVal.isNegative()) + } else if (RHSVal.getPositiveIntValue() >= + Data->LHSType.getIntegerBitWidth()) { + R.setErrorType(ErrorType::InvalidShiftExponent); + Diag(Loc, DL_Error, "shift exponent %0 is too large for %1-bit type %2") + << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType; + } else if (LHSVal.isNegative()) { + R.setErrorType(ErrorType::InvalidShiftBase); Diag(Loc, DL_Error, "left shift of negative value %0") << LHSVal; - else + } else { + R.setErrorType(ErrorType::InvalidShiftBase); Diag(Loc, DL_Error, "left shift of %0 by %1 places cannot be represented in type %2") - << LHSVal << RHSVal << Data->LHSType; + << LHSVal << RHSVal << Data->LHSType; + } } void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data, @@ -214,7 +235,7 @@ static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index, if (ignoreReport(Loc, Opts)) return; - ScopedReport R(Opts, Loc); + ScopedReport R(Opts, Loc, ErrorType::OutOfBoundsIndex); Value IndexVal(Data->IndexType, Index); Diag(Loc, DL_Error, "index %0 out of bounds for type %1") @@ -235,7 +256,7 @@ void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data, static void handleBuiltinUnreachableImpl(UnreachableData *Data, ReportOptions Opts) { - ScopedReport R(Opts, Data->Loc); + ScopedReport R(Opts, Data->Loc, ErrorType::UnreachableCall); Diag(Data->Loc, DL_Error, "execution reached a __builtin_unreachable() call"); } @@ -246,7 +267,7 @@ void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) { } static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) { - ScopedReport R(Opts, Data->Loc); + ScopedReport R(Opts, Data->Loc, ErrorType::MissingReturn); Diag(Data->Loc, DL_Error, "execution reached the end of a value-returning function " "without returning a value"); @@ -264,7 +285,7 @@ static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound, if (ignoreReport(Loc, Opts)) return; - ScopedReport R(Opts, Loc); + ScopedReport R(Opts, Loc, ErrorType::NonPositiveVLAIndex); Diag(Loc, DL_Error, "variable length array bound evaluates to " "non-positive value %0") @@ -283,25 +304,59 @@ void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data, Die(); } -static void handleFloatCastOverflow(FloatCastOverflowData *Data, - ValueHandle From, ReportOptions Opts) { - // TODO: Add deduplication once a SourceLocation is generated for this check. - Location Loc = getCallerLocation(); - ScopedReport R(Opts, Loc); +static bool looksLikeFloatCastOverflowDataV1(void *Data) { + // First field is either a pointer to filename or a pointer to a + // TypeDescriptor. + u8 *FilenameOrTypeDescriptor; + internal_memcpy(&FilenameOrTypeDescriptor, Data, + sizeof(FilenameOrTypeDescriptor)); + + // Heuristic: For float_cast_overflow, the TypeKind will be either TK_Integer + // (0x0), TK_Float (0x1) or TK_Unknown (0xff). If both types are known, + // adding both bytes will be 0 or 1 (for BE or LE). If it were a filename, + // adding two printable characters will not yield such a value. Otherwise, + // if one of them is 0xff, this is most likely TK_Unknown type descriptor. + u16 MaybeFromTypeKind = + FilenameOrTypeDescriptor[0] + FilenameOrTypeDescriptor[1]; + return MaybeFromTypeKind < 2 || FilenameOrTypeDescriptor[0] == 0xff || + FilenameOrTypeDescriptor[1] == 0xff; +} + +static void handleFloatCastOverflow(void *DataPtr, ValueHandle From, + ReportOptions Opts) { + SymbolizedStackHolder CallerLoc; + Location Loc; + const TypeDescriptor *FromType, *ToType; + + if (looksLikeFloatCastOverflowDataV1(DataPtr)) { + auto Data = reinterpret_cast<FloatCastOverflowData *>(DataPtr); + CallerLoc.reset(getCallerLocation(Opts.pc)); + Loc = CallerLoc; + FromType = &Data->FromType; + ToType = &Data->ToType; + } else { + auto Data = reinterpret_cast<FloatCastOverflowDataV2 *>(DataPtr); + SourceLocation SLoc = Data->Loc.acquire(); + if (ignoreReport(SLoc, Opts)) + return; + Loc = SLoc; + FromType = &Data->FromType; + ToType = &Data->ToType; + } + + ScopedReport R(Opts, Loc, ErrorType::FloatCastOverflow); Diag(Loc, DL_Error, "value %0 is outside the range of representable values of type %2") - << Value(Data->FromType, From) << Data->FromType << Data->ToType; + << Value(*FromType, From) << *FromType << *ToType; } -void __ubsan::__ubsan_handle_float_cast_overflow(FloatCastOverflowData *Data, - ValueHandle From) { +void __ubsan::__ubsan_handle_float_cast_overflow(void *Data, ValueHandle From) { GET_REPORT_OPTIONS(false); handleFloatCastOverflow(Data, From, Opts); } -void -__ubsan::__ubsan_handle_float_cast_overflow_abort(FloatCastOverflowData *Data, - ValueHandle From) { +void __ubsan::__ubsan_handle_float_cast_overflow_abort(void *Data, + ValueHandle From) { GET_REPORT_OPTIONS(true); handleFloatCastOverflow(Data, From, Opts); Die(); @@ -313,7 +368,11 @@ static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val, if (ignoreReport(Loc, Opts)) return; - ScopedReport R(Opts, Loc); + // This check could be more precise if we used different handlers for + // -fsanitize=bool and -fsanitize=enum. + bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'")); + ScopedReport R(Opts, Loc, IsBool ? ErrorType::InvalidBoolLoad + : ErrorType::InvalidEnumLoad); Diag(Loc, DL_Error, "load of value %0, which is not a valid value for type %1") @@ -335,16 +394,21 @@ void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data, static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data, ValueHandle Function, ReportOptions Opts) { - const char *FName = "(unknown)"; + SourceLocation CallLoc = Data->Loc.acquire(); + if (ignoreReport(CallLoc, Opts)) + return; - Location Loc = getFunctionLocation(Function, &FName); + ScopedReport R(Opts, CallLoc, ErrorType::FunctionTypeMismatch); - ScopedReport R(Opts, Loc); + SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); + const char *FName = FLoc.get()->info.function; + if (!FName) + FName = "(unknown)"; - Diag(Data->Loc, DL_Error, + Diag(CallLoc, DL_Error, "call to function %0 through pointer to incorrect function type %1") - << FName << Data->Type; - Diag(Loc, DL_Note, "%0 defined here") << FName; + << FName << Data->Type; + Diag(FLoc, DL_Note, "%0 defined here") << FName; } void @@ -366,7 +430,7 @@ static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts) { if (ignoreReport(Loc, Opts)) return; - ScopedReport R(Opts, Loc); + ScopedReport R(Opts, Loc, ErrorType::InvalidNullReturn); Diag(Loc, DL_Error, "null pointer returned from function declared to never " "return null"); @@ -390,7 +454,7 @@ static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts) { if (ignoreReport(Loc, Opts)) return; - ScopedReport R(Opts, Loc); + ScopedReport R(Opts, Loc, ErrorType::InvalidNullArgument); Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to " "never be null") << Data->ArgIndex; @@ -408,3 +472,37 @@ void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) { handleNonNullArg(Data, Opts); Die(); } + +static void handleCFIBadIcall(CFIBadIcallData *Data, ValueHandle Function, + ReportOptions Opts) { + SourceLocation Loc = Data->Loc.acquire(); + if (ignoreReport(Loc, Opts)) + return; + + ScopedReport R(Opts, Loc); + + Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during " + "indirect function call") + << Data->Type; + + SymbolizedStackHolder FLoc(getSymbolizedLocation(Function)); + const char *FName = FLoc.get()->info.function; + if (!FName) + FName = "(unknown)"; + Diag(FLoc, DL_Note, "%0 defined here") << FName; +} + +void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *Data, + ValueHandle Function) { + GET_REPORT_OPTIONS(false); + handleCFIBadIcall(Data, Function, Opts); +} + +void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *Data, + ValueHandle Function) { + GET_REPORT_OPTIONS(true); + handleCFIBadIcall(Data, Function, Opts); + Die(); +} + +#endif // CAN_SANITIZE_UB diff --git a/libsanitizer/ubsan/ubsan_handlers.h b/libsanitizer/ubsan/ubsan_handlers.h index 92365d81893..668535868e9 100644 --- a/libsanitizer/ubsan/ubsan_handlers.h +++ b/libsanitizer/ubsan/ubsan_handlers.h @@ -95,14 +95,22 @@ struct VLABoundData { /// \brief Handle a VLA with a non-positive bound. RECOVERABLE(vla_bound_not_positive, VLABoundData *Data, ValueHandle Bound) +// Keeping this around for binary compatibility with (sanitized) programs +// compiled with older compilers. struct FloatCastOverflowData { - // FIXME: SourceLocation Loc; const TypeDescriptor &FromType; const TypeDescriptor &ToType; }; -/// \brief Handle overflow in a conversion to or from a floating-point type. -RECOVERABLE(float_cast_overflow, FloatCastOverflowData *Data, ValueHandle From) +struct FloatCastOverflowDataV2 { + SourceLocation Loc; + const TypeDescriptor &FromType; + const TypeDescriptor &ToType; +}; + +/// Handle overflow in a conversion to or from a floating-point type. +/// void *Data is one of FloatCastOverflowData* or FloatCastOverflowDataV2* +RECOVERABLE(float_cast_overflow, void *Data, ValueHandle From) struct InvalidValueData { SourceLocation Loc; @@ -138,6 +146,14 @@ struct NonNullArgData { /// \brief Handle passing null pointer to function with nonnull attribute. RECOVERABLE(nonnull_arg, NonNullArgData *Data) +struct CFIBadIcallData { + SourceLocation Loc; + const TypeDescriptor &Type; +}; + +/// \brief Handle control flow integrity failure for indirect function calls. +RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function) + } #endif // UBSAN_HANDLERS_H diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.cc b/libsanitizer/ubsan/ubsan_handlers_cxx.cc index 86f3e57a8bc..b50b4d4636d 100644 --- a/libsanitizer/ubsan/ubsan_handlers_cxx.cc +++ b/libsanitizer/ubsan/ubsan_handlers_cxx.cc @@ -11,6 +11,8 @@ // //===----------------------------------------------------------------------===// +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB #include "ubsan_handlers_cxx.h" #include "ubsan_diag.h" #include "ubsan_type_hash.h" @@ -33,16 +35,15 @@ static void HandleDynamicTypeCacheMiss( return; // Check if error report should be suppressed. - DynamicTypeInfo DTI = getDynamicTypeInfo((void*)Pointer); - if (DTI.isValid() && - MatchSuppression(DTI.getMostDerivedTypeName(), SuppressionVptrCheck)) + DynamicTypeInfo DTI = getDynamicTypeInfoFromObject((void*)Pointer); + if (DTI.isValid() && IsVptrCheckSuppressed(DTI.getMostDerivedTypeName())) return; SourceLocation Loc = Data->Loc.acquire(); if (Loc.isDisabled()) return; - ScopedReport R(Opts, Loc); + ScopedReport R(Opts, Loc, ErrorType::DynamicTypeMismatch); Diag(Loc, DL_Error, "%0 address %1 which does not point to an object of type %2") @@ -51,19 +52,19 @@ static void HandleDynamicTypeCacheMiss( // If possible, say what type it actually points to. if (!DTI.isValid()) Diag(Pointer, DL_Note, "object has invalid vptr") - << MangledName(DTI.getMostDerivedTypeName()) + << TypeName(DTI.getMostDerivedTypeName()) << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr"); else if (!DTI.getOffset()) Diag(Pointer, DL_Note, "object is of type %0") - << MangledName(DTI.getMostDerivedTypeName()) + << TypeName(DTI.getMostDerivedTypeName()) << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0"); else // FIXME: Find the type at the specified offset, and include that // in the note. Diag(Pointer - DTI.getOffset(), DL_Note, "object is base class subobject at offset %0 within object of type %1") - << DTI.getOffset() << MangledName(DTI.getMostDerivedTypeName()) - << MangledName(DTI.getSubobjectTypeName()) + << DTI.getOffset() << TypeName(DTI.getMostDerivedTypeName()) + << TypeName(DTI.getSubobjectTypeName()) << Range(Pointer, Pointer + sizeof(uptr), "vptr for %2 base class of %1"); } @@ -78,3 +79,42 @@ void __ubsan::__ubsan_handle_dynamic_type_cache_miss_abort( GET_REPORT_OPTIONS(true); HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts); } + +static void HandleCFIBadType(CFIBadTypeData *Data, ValueHandle Vtable, + ReportOptions Opts) { + SourceLocation Loc = Data->Loc.acquire(); + ScopedReport R(Opts, Loc, ErrorType::CFIBadType); + DynamicTypeInfo DTI = getDynamicTypeInfoFromVtable((void*)Vtable); + + static const char *TypeCheckKinds[] = { + "virtual call", + "non-virtual call", + "base-to-derived cast", + "cast to unrelated type", + }; + + Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during " + "%1 (vtable address %2)") + << Data->Type << TypeCheckKinds[Data->TypeCheckKind] << (void *)Vtable; + + // If possible, say what type it actually points to. + if (!DTI.isValid()) + Diag(Vtable, DL_Note, "invalid vtable"); + else + Diag(Vtable, DL_Note, "vtable is of type %0") + << TypeName(DTI.getMostDerivedTypeName()); +} + +void __ubsan::__ubsan_handle_cfi_bad_type(CFIBadTypeData *Data, + ValueHandle Vtable) { + GET_REPORT_OPTIONS(false); + HandleCFIBadType(Data, Vtable, Opts); +} + +void __ubsan::__ubsan_handle_cfi_bad_type_abort(CFIBadTypeData *Data, + ValueHandle Vtable) { + GET_REPORT_OPTIONS(true); + HandleCFIBadType(Data, Vtable, Opts); +} + +#endif // CAN_SANITIZE_UB diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.h b/libsanitizer/ubsan/ubsan_handlers_cxx.h index 3419744e390..b1486864298 100644 --- a/libsanitizer/ubsan/ubsan_handlers_cxx.h +++ b/libsanitizer/ubsan/ubsan_handlers_cxx.h @@ -23,6 +23,12 @@ struct DynamicTypeCacheMissData { unsigned char TypeCheckKind; }; +struct CFIBadTypeData { + SourceLocation Loc; + const TypeDescriptor &Type; + unsigned char TypeCheckKind; +}; + /// \brief Handle a runtime type check failure, caused by an incorrect vptr. /// When this handler is called, all we know is that the type was not in the /// cache; this does not necessarily imply the existence of a bug. @@ -33,6 +39,13 @@ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_handle_dynamic_type_cache_miss_abort( DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash); +/// \brief Handle a control flow integrity check failure by printing a +/// diagnostic. +extern "C" SANITIZER_INTERFACE_ATTRIBUTE void +__ubsan_handle_cfi_bad_type(CFIBadTypeData *Data, ValueHandle Vtable); +extern "C" SANITIZER_INTERFACE_ATTRIBUTE void +__ubsan_handle_cfi_bad_type_abort(CFIBadTypeData *Data, ValueHandle Vtable); + } #endif // UBSAN_HANDLERS_H diff --git a/libsanitizer/ubsan/ubsan_init.cc b/libsanitizer/ubsan/ubsan_init.cc index f28fc811f50..5da4122c07a 100644 --- a/libsanitizer/ubsan/ubsan_init.cc +++ b/libsanitizer/ubsan/ubsan_init.cc @@ -9,51 +9,68 @@ // //===----------------------------------------------------------------------===// +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB +#include "ubsan_diag.h" #include "ubsan_init.h" #include "ubsan_flags.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_mutex.h" -#include "sanitizer_common/sanitizer_suppressions.h" #include "sanitizer_common/sanitizer_symbolizer.h" using namespace __ubsan; -static bool ubsan_inited; +static enum { + UBSAN_MODE_UNKNOWN = 0, + UBSAN_MODE_STANDALONE, + UBSAN_MODE_PLUGIN +} ubsan_mode; +static StaticSpinMutex ubsan_init_mu; -void __ubsan::InitIfNecessary() { -#if !SANITIZER_CAN_USE_PREINIT_ARRAY - // No need to lock mutex if we're initializing from preinit array. - static StaticSpinMutex init_mu; - SpinMutexLock l(&init_mu); -#endif - if (LIKELY(ubsan_inited)) - return; - if (0 == internal_strcmp(SanitizerToolName, "SanitizerTool")) { - // WARNING: If this condition holds, then either UBSan runs in a standalone - // mode, or initializer for another sanitizer hasn't run yet. In a latter - // case, another sanitizer will overwrite "SanitizerToolName" and reparse - // common flags. It means, that we are not allowed to *use* common flags - // in this function. - SanitizerToolName = "UndefinedBehaviorSanitizer"; - InitializeCommonFlags(); - } - // Initialize UBSan-specific flags. +static void CommonInit() { + InitializeSuppressions(); +} + +static void CommonStandaloneInit() { + SanitizerToolName = "UndefinedBehaviorSanitizer"; InitializeFlags(); - SuppressionContext::InitIfNecessary(); - ubsan_inited = true; + CacheBinaryName(); + __sanitizer_set_report_path(common_flags()->log_path); + InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + CommonInit(); + ubsan_mode = UBSAN_MODE_STANDALONE; +} + +void __ubsan::InitAsStandalone() { + if (SANITIZER_CAN_USE_PREINIT_ARRAY) { + CHECK_EQ(UBSAN_MODE_UNKNOWN, ubsan_mode); + CommonStandaloneInit(); + return; + } + SpinMutexLock l(&ubsan_init_mu); + CHECK_NE(UBSAN_MODE_PLUGIN, ubsan_mode); + if (ubsan_mode == UBSAN_MODE_UNKNOWN) + CommonStandaloneInit(); } -#if SANITIZER_CAN_USE_PREINIT_ARRAY -__attribute__((section(".preinit_array"), used)) -void (*__local_ubsan_preinit)(void) = __ubsan::InitIfNecessary; -#else -// Use a dynamic initializer. -class UbsanInitializer { - public: - UbsanInitializer() { - InitIfNecessary(); +void __ubsan::InitAsStandaloneIfNecessary() { + if (SANITIZER_CAN_USE_PREINIT_ARRAY) { + CHECK_NE(UBSAN_MODE_UNKNOWN, ubsan_mode); + return; } -}; -static UbsanInitializer ubsan_initializer; -#endif // SANITIZER_CAN_USE_PREINIT_ARRAY + SpinMutexLock l(&ubsan_init_mu); + if (ubsan_mode == UBSAN_MODE_UNKNOWN) + CommonStandaloneInit(); +} + +void __ubsan::InitAsPlugin() { +#if !SANITIZER_CAN_USE_PREINIT_ARRAY + SpinMutexLock l(&ubsan_init_mu); +#endif + CHECK_EQ(UBSAN_MODE_UNKNOWN, ubsan_mode); + CommonInit(); + ubsan_mode = UBSAN_MODE_PLUGIN; +} + +#endif // CAN_SANITIZE_UB diff --git a/libsanitizer/ubsan/ubsan_init.h b/libsanitizer/ubsan/ubsan_init.h index b76bbfe7bb7..6a8366f82bf 100644 --- a/libsanitizer/ubsan/ubsan_init.h +++ b/libsanitizer/ubsan/ubsan_init.h @@ -13,9 +13,16 @@ namespace __ubsan { -// NOTE: This function might take a lock (if .preinit_array initialization is -// not used). It's generally a bad idea to call it on a fast path. -void InitIfNecessary(); +// Initialize UBSan as a standalone tool. Typically should be called early +// during initialization. +void InitAsStandalone(); + +// Initialize UBSan as a standalone tool, if it hasn't been initialized before. +void InitAsStandaloneIfNecessary(); + +// Initializes UBSan as a plugin tool. This function should be called once +// from "parent tool" (e.g. ASan) initialization. +void InitAsPlugin(); } // namespace __ubsan diff --git a/libsanitizer/ubsan/ubsan_init_standalone.cc b/libsanitizer/ubsan/ubsan_init_standalone.cc new file mode 100644 index 00000000000..1630fd724fb --- /dev/null +++ b/libsanitizer/ubsan/ubsan_init_standalone.cc @@ -0,0 +1,32 @@ +//===-- ubsan_init_standalone.cc ------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Initialization of standalone UBSan runtime. +// +//===----------------------------------------------------------------------===// + +#include "ubsan_platform.h" +#if !CAN_SANITIZE_UB +# error "UBSan is not supported on this platform!" +#endif + +#include "sanitizer_common/sanitizer_internal_defs.h" +#include "ubsan_init.h" + +#if SANITIZER_CAN_USE_PREINIT_ARRAY +__attribute__((section(".preinit_array"), used)) +void (*__local_ubsan_preinit)(void) = __ubsan::InitAsStandalone; +#else +// Use a dynamic initializer. +class UbsanStandaloneInitializer { + public: + UbsanStandaloneInitializer() { + __ubsan::InitAsStandalone(); + } +}; +static UbsanStandaloneInitializer ubsan_standalone_initializer; +#endif // SANITIZER_CAN_USE_PREINIT_ARRAY diff --git a/libsanitizer/ubsan/ubsan_platform.h b/libsanitizer/ubsan/ubsan_platform.h new file mode 100644 index 00000000000..999bf815493 --- /dev/null +++ b/libsanitizer/ubsan/ubsan_platform.h @@ -0,0 +1,27 @@ +//===-- ubsan_platform.h ----------------------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Defines the platforms which UBSan is supported at. +// +//===----------------------------------------------------------------------===// +#ifndef UBSAN_PLATFORM_H +#define UBSAN_PLATFORM_H + +#ifndef CAN_SANITIZE_UB +// Other platforms should be easy to add, and probably work as-is. +#if (defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)) && \ + (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || \ + defined(__aarch64__) || defined(__mips__) || defined(__powerpc64__)) +# define CAN_SANITIZE_UB 1 +#elif defined(_WIN32) +# define CAN_SANITIZE_UB 1 +#else +# define CAN_SANITIZE_UB 0 +#endif +#endif //CAN_SANITIZE_UB + +#endif diff --git a/libsanitizer/ubsan/ubsan_type_hash.cc b/libsanitizer/ubsan/ubsan_type_hash.cc index 5eab1f561f2..65160aa4aba 100644 --- a/libsanitizer/ubsan/ubsan_type_hash.cc +++ b/libsanitizer/ubsan/ubsan_type_hash.cc @@ -9,240 +9,24 @@ // relationships. This file is only linked into C++ compilations, and is // permitted to use language features which require a C++ ABI library. // +// Most of the implementation lives in an ABI-specific source file +// (ubsan_type_hash_{itanium,win}.cc). +// //===----------------------------------------------------------------------===// +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB #include "ubsan_type_hash.h" #include "sanitizer_common/sanitizer_common.h" -// The following are intended to be binary compatible with the definitions -// given in the Itanium ABI. We make no attempt to be ODR-compatible with -// those definitions, since existing ABI implementations aren't. - -namespace std { - class type_info { - public: - virtual ~type_info(); - - const char *__type_name; - }; -} - -namespace __cxxabiv1 { - -/// Type info for classes with no bases, and base class for type info for -/// classes with bases. -class __class_type_info : public std::type_info { - virtual ~__class_type_info(); -}; - -/// Type info for classes with simple single public inheritance. -class __si_class_type_info : public __class_type_info { -public: - virtual ~__si_class_type_info(); - - const __class_type_info *__base_type; -}; - -class __base_class_type_info { -public: - const __class_type_info *__base_type; - long __offset_flags; - - enum __offset_flags_masks { - __virtual_mask = 0x1, - __public_mask = 0x2, - __offset_shift = 8 - }; -}; - -/// Type info for classes with multiple, virtual, or non-public inheritance. -class __vmi_class_type_info : public __class_type_info { -public: - virtual ~__vmi_class_type_info(); - - unsigned int flags; - unsigned int base_count; - __base_class_type_info base_info[1]; -}; - -} - -namespace abi = __cxxabiv1; - -// We implement a simple two-level cache for type-checking results. For each -// (vptr,type) pair, a hash is computed. This hash is assumed to be globally -// unique; if it collides, we will get false negatives, but: -// * such a collision would have to occur on the *first* bad access, -// * the probability of such a collision is low (and for a 64-bit target, is -// negligible), and -// * the vptr, and thus the hash, can be affected by ASLR, so multiple runs -// give better coverage. -// -// The first caching layer is a small hash table with no chaining; buckets are -// reused as needed. The second caching layer is a large hash table with open -// chaining. We can freely evict from either layer since this is just a cache. -// -// FIXME: Make these hash table accesses thread-safe. The races here are benign: -// assuming the unsequenced loads and stores don't misbehave too badly, -// the worst case is false negatives or poor cache behavior, not false -// positives or crashes. - -/// Find a bucket to store the given hash value in. -static __ubsan::HashValue *getTypeCacheHashTableBucket(__ubsan::HashValue V) { - static const unsigned HashTableSize = 65537; - static __ubsan::HashValue __ubsan_vptr_hash_set[HashTableSize]; - - unsigned First = (V & 65535) ^ 1; - unsigned Probe = First; - for (int Tries = 5; Tries; --Tries) { - if (!__ubsan_vptr_hash_set[Probe] || __ubsan_vptr_hash_set[Probe] == V) - return &__ubsan_vptr_hash_set[Probe]; - Probe += ((V >> 16) & 65535) + 1; - if (Probe >= HashTableSize) - Probe -= HashTableSize; - } - // FIXME: Pick a random entry from the probe sequence to evict rather than - // just taking the first. - return &__ubsan_vptr_hash_set[First]; -} - /// A cache of recently-checked hashes. Mini hash table with "random" evictions. __ubsan::HashValue __ubsan::__ubsan_vptr_type_cache[__ubsan::VptrTypeCacheSize]; -/// \brief Determine whether \p Derived has a \p Base base class subobject at -/// offset \p Offset. -static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived, - const abi::__class_type_info *Base, - sptr Offset) { - if (Derived->__type_name == Base->__type_name) - return Offset == 0; - - if (const abi::__si_class_type_info *SI = - dynamic_cast<const abi::__si_class_type_info*>(Derived)) - return isDerivedFromAtOffset(SI->__base_type, Base, Offset); - - const abi::__vmi_class_type_info *VTI = - dynamic_cast<const abi::__vmi_class_type_info*>(Derived); - if (!VTI) - // No base class subobjects. - return false; - - // Look for a base class which is derived from \p Base at the right offset. - for (unsigned int base = 0; base != VTI->base_count; ++base) { - // FIXME: Curtail the recursion if this base can't possibly contain the - // given offset. - sptr OffsetHere = VTI->base_info[base].__offset_flags >> - abi::__base_class_type_info::__offset_shift; - if (VTI->base_info[base].__offset_flags & - abi::__base_class_type_info::__virtual_mask) - // For now, just punt on virtual bases and say 'yes'. - // FIXME: OffsetHere is the offset in the vtable of the virtual base - // offset. Read the vbase offset out of the vtable and use it. - return true; - if (isDerivedFromAtOffset(VTI->base_info[base].__base_type, - Base, Offset - OffsetHere)) - return true; - } - - return false; -} - -/// \brief Find the derived-most dynamic base class of \p Derived at offset -/// \p Offset. -static const abi::__class_type_info *findBaseAtOffset( - const abi::__class_type_info *Derived, sptr Offset) { - if (!Offset) - return Derived; - - if (const abi::__si_class_type_info *SI = - dynamic_cast<const abi::__si_class_type_info*>(Derived)) - return findBaseAtOffset(SI->__base_type, Offset); - - const abi::__vmi_class_type_info *VTI = - dynamic_cast<const abi::__vmi_class_type_info*>(Derived); - if (!VTI) - // No base class subobjects. - return 0; - - for (unsigned int base = 0; base != VTI->base_count; ++base) { - sptr OffsetHere = VTI->base_info[base].__offset_flags >> - abi::__base_class_type_info::__offset_shift; - if (VTI->base_info[base].__offset_flags & - abi::__base_class_type_info::__virtual_mask) - // FIXME: Can't handle virtual bases yet. - continue; - if (const abi::__class_type_info *Base = - findBaseAtOffset(VTI->base_info[base].__base_type, - Offset - OffsetHere)) - return Base; - } - - return 0; -} - -namespace { - -struct VtablePrefix { - /// The offset from the vptr to the start of the most-derived object. - /// This will only be greater than zero in some virtual base class vtables - /// used during object con-/destruction, and will usually be exactly zero. - sptr Offset; - /// The type_info object describing the most-derived class type. - std::type_info *TypeInfo; -}; -VtablePrefix *getVtablePrefix(void *Object) { - VtablePrefix **VptrPtr = reinterpret_cast<VtablePrefix**>(Object); - if (!*VptrPtr) - return 0; - VtablePrefix *Prefix = *VptrPtr - 1; - if (!Prefix->TypeInfo) - // This can't possibly be a valid vtable. - return 0; - return Prefix; -} - +__ubsan::DynamicTypeInfo __ubsan::getDynamicTypeInfoFromObject(void *Object) { + void *VtablePtr = *reinterpret_cast<void **>(Object); + return getDynamicTypeInfoFromVtable(VtablePtr); } -bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) { - // A crash anywhere within this function probably means the vptr is corrupted. - // FIXME: Perform these checks more cautiously. - - // Check whether this is something we've evicted from the cache. - HashValue *Bucket = getTypeCacheHashTableBucket(Hash); - if (*Bucket == Hash) { - __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash; - return true; - } - - VtablePrefix *Vtable = getVtablePrefix(Object); - if (!Vtable) - return false; - - // Check that this is actually a type_info object for a class type. - abi::__class_type_info *Derived = - dynamic_cast<abi::__class_type_info*>(Vtable->TypeInfo); - if (!Derived) - return false; - - abi::__class_type_info *Base = (abi::__class_type_info*)Type; - if (!isDerivedFromAtOffset(Derived, Base, -Vtable->Offset)) - return false; - - // Success. Cache this result. - __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash; - *Bucket = Hash; - return true; -} - -__ubsan::DynamicTypeInfo __ubsan::getDynamicTypeInfo(void *Object) { - VtablePrefix *Vtable = getVtablePrefix(Object); - if (!Vtable) - return DynamicTypeInfo(0, 0, 0); - const abi::__class_type_info *ObjectType = findBaseAtOffset( - static_cast<const abi::__class_type_info*>(Vtable->TypeInfo), - -Vtable->Offset); - return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset, - ObjectType ? ObjectType->__type_name : "<unknown>"); -} +#endif // CAN_SANITIZE_UB diff --git a/libsanitizer/ubsan/ubsan_type_hash.h b/libsanitizer/ubsan/ubsan_type_hash.h index 138559f204d..2da070edffa 100644 --- a/libsanitizer/ubsan/ubsan_type_hash.h +++ b/libsanitizer/ubsan/ubsan_type_hash.h @@ -39,7 +39,10 @@ public: }; /// \brief Get information about the dynamic type of an object. -DynamicTypeInfo getDynamicTypeInfo(void *Object); +DynamicTypeInfo getDynamicTypeInfoFromObject(void *Object); + +/// \brief Get information about the dynamic type of an object from its vtable. +DynamicTypeInfo getDynamicTypeInfoFromVtable(void *Vtable); /// \brief Check whether the dynamic type of \p Object has a \p Type subobject /// at offset 0. diff --git a/libsanitizer/ubsan/ubsan_type_hash_itanium.cc b/libsanitizer/ubsan/ubsan_type_hash_itanium.cc new file mode 100644 index 00000000000..e4f133434d6 --- /dev/null +++ b/libsanitizer/ubsan/ubsan_type_hash_itanium.cc @@ -0,0 +1,249 @@ +//===-- ubsan_type_hash_itanium.cc ----------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implementation of type hashing/lookup for Itanium C++ ABI. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB && !SANITIZER_WINDOWS +#include "ubsan_type_hash.h" + +#include "sanitizer_common/sanitizer_common.h" + +// The following are intended to be binary compatible with the definitions +// given in the Itanium ABI. We make no attempt to be ODR-compatible with +// those definitions, since existing ABI implementations aren't. + +namespace std { + class type_info { + public: + virtual ~type_info(); + + const char *__type_name; + }; +} + +namespace __cxxabiv1 { + +/// Type info for classes with no bases, and base class for type info for +/// classes with bases. +class __class_type_info : public std::type_info { + ~__class_type_info() override; +}; + +/// Type info for classes with simple single public inheritance. +class __si_class_type_info : public __class_type_info { +public: + ~__si_class_type_info() override; + + const __class_type_info *__base_type; +}; + +class __base_class_type_info { +public: + const __class_type_info *__base_type; + long __offset_flags; + + enum __offset_flags_masks { + __virtual_mask = 0x1, + __public_mask = 0x2, + __offset_shift = 8 + }; +}; + +/// Type info for classes with multiple, virtual, or non-public inheritance. +class __vmi_class_type_info : public __class_type_info { +public: + ~__vmi_class_type_info() override; + + unsigned int flags; + unsigned int base_count; + __base_class_type_info base_info[1]; +}; + +} + +namespace abi = __cxxabiv1; + +// We implement a simple two-level cache for type-checking results. For each +// (vptr,type) pair, a hash is computed. This hash is assumed to be globally +// unique; if it collides, we will get false negatives, but: +// * such a collision would have to occur on the *first* bad access, +// * the probability of such a collision is low (and for a 64-bit target, is +// negligible), and +// * the vptr, and thus the hash, can be affected by ASLR, so multiple runs +// give better coverage. +// +// The first caching layer is a small hash table with no chaining; buckets are +// reused as needed. The second caching layer is a large hash table with open +// chaining. We can freely evict from either layer since this is just a cache. +// +// FIXME: Make these hash table accesses thread-safe. The races here are benign: +// assuming the unsequenced loads and stores don't misbehave too badly, +// the worst case is false negatives or poor cache behavior, not false +// positives or crashes. + +/// Find a bucket to store the given hash value in. +static __ubsan::HashValue *getTypeCacheHashTableBucket(__ubsan::HashValue V) { + static const unsigned HashTableSize = 65537; + static __ubsan::HashValue __ubsan_vptr_hash_set[HashTableSize]; + + unsigned First = (V & 65535) ^ 1; + unsigned Probe = First; + for (int Tries = 5; Tries; --Tries) { + if (!__ubsan_vptr_hash_set[Probe] || __ubsan_vptr_hash_set[Probe] == V) + return &__ubsan_vptr_hash_set[Probe]; + Probe += ((V >> 16) & 65535) + 1; + if (Probe >= HashTableSize) + Probe -= HashTableSize; + } + // FIXME: Pick a random entry from the probe sequence to evict rather than + // just taking the first. + return &__ubsan_vptr_hash_set[First]; +} + +/// \brief Determine whether \p Derived has a \p Base base class subobject at +/// offset \p Offset. +static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived, + const abi::__class_type_info *Base, + sptr Offset) { + if (Derived->__type_name == Base->__type_name) + return Offset == 0; + + if (const abi::__si_class_type_info *SI = + dynamic_cast<const abi::__si_class_type_info*>(Derived)) + return isDerivedFromAtOffset(SI->__base_type, Base, Offset); + + const abi::__vmi_class_type_info *VTI = + dynamic_cast<const abi::__vmi_class_type_info*>(Derived); + if (!VTI) + // No base class subobjects. + return false; + + // Look for a base class which is derived from \p Base at the right offset. + for (unsigned int base = 0; base != VTI->base_count; ++base) { + // FIXME: Curtail the recursion if this base can't possibly contain the + // given offset. + sptr OffsetHere = VTI->base_info[base].__offset_flags >> + abi::__base_class_type_info::__offset_shift; + if (VTI->base_info[base].__offset_flags & + abi::__base_class_type_info::__virtual_mask) + // For now, just punt on virtual bases and say 'yes'. + // FIXME: OffsetHere is the offset in the vtable of the virtual base + // offset. Read the vbase offset out of the vtable and use it. + return true; + if (isDerivedFromAtOffset(VTI->base_info[base].__base_type, + Base, Offset - OffsetHere)) + return true; + } + + return false; +} + +/// \brief Find the derived-most dynamic base class of \p Derived at offset +/// \p Offset. +static const abi::__class_type_info *findBaseAtOffset( + const abi::__class_type_info *Derived, sptr Offset) { + if (!Offset) + return Derived; + + if (const abi::__si_class_type_info *SI = + dynamic_cast<const abi::__si_class_type_info*>(Derived)) + return findBaseAtOffset(SI->__base_type, Offset); + + const abi::__vmi_class_type_info *VTI = + dynamic_cast<const abi::__vmi_class_type_info*>(Derived); + if (!VTI) + // No base class subobjects. + return 0; + + for (unsigned int base = 0; base != VTI->base_count; ++base) { + sptr OffsetHere = VTI->base_info[base].__offset_flags >> + abi::__base_class_type_info::__offset_shift; + if (VTI->base_info[base].__offset_flags & + abi::__base_class_type_info::__virtual_mask) + // FIXME: Can't handle virtual bases yet. + continue; + if (const abi::__class_type_info *Base = + findBaseAtOffset(VTI->base_info[base].__base_type, + Offset - OffsetHere)) + return Base; + } + + return 0; +} + +namespace { + +struct VtablePrefix { + /// The offset from the vptr to the start of the most-derived object. + /// This will only be greater than zero in some virtual base class vtables + /// used during object con-/destruction, and will usually be exactly zero. + sptr Offset; + /// The type_info object describing the most-derived class type. + std::type_info *TypeInfo; +}; +VtablePrefix *getVtablePrefix(void *Vtable) { + VtablePrefix *Vptr = reinterpret_cast<VtablePrefix*>(Vtable); + if (!Vptr) + return 0; + VtablePrefix *Prefix = Vptr - 1; + if (!Prefix->TypeInfo) + // This can't possibly be a valid vtable. + return 0; + return Prefix; +} + +} + +bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) { + // A crash anywhere within this function probably means the vptr is corrupted. + // FIXME: Perform these checks more cautiously. + + // Check whether this is something we've evicted from the cache. + HashValue *Bucket = getTypeCacheHashTableBucket(Hash); + if (*Bucket == Hash) { + __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash; + return true; + } + + void *VtablePtr = *reinterpret_cast<void **>(Object); + VtablePrefix *Vtable = getVtablePrefix(VtablePtr); + if (!Vtable) + return false; + + // Check that this is actually a type_info object for a class type. + abi::__class_type_info *Derived = + dynamic_cast<abi::__class_type_info*>(Vtable->TypeInfo); + if (!Derived) + return false; + + abi::__class_type_info *Base = (abi::__class_type_info*)Type; + if (!isDerivedFromAtOffset(Derived, Base, -Vtable->Offset)) + return false; + + // Success. Cache this result. + __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash; + *Bucket = Hash; + return true; +} + +__ubsan::DynamicTypeInfo +__ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) { + VtablePrefix *Vtable = getVtablePrefix(VtablePtr); + if (!Vtable) + return DynamicTypeInfo(0, 0, 0); + const abi::__class_type_info *ObjectType = findBaseAtOffset( + static_cast<const abi::__class_type_info*>(Vtable->TypeInfo), + -Vtable->Offset); + return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset, + ObjectType ? ObjectType->__type_name : "<unknown>"); +} + +#endif // CAN_SANITIZE_UB && !SANITIZER_WINDOWS diff --git a/libsanitizer/ubsan/ubsan_type_hash_win.cc b/libsanitizer/ubsan/ubsan_type_hash_win.cc new file mode 100644 index 00000000000..a2eb1a71cb9 --- /dev/null +++ b/libsanitizer/ubsan/ubsan_type_hash_win.cc @@ -0,0 +1,79 @@ +//===-- ubsan_type_hash_win.cc --------------------------------------------===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Implementation of type hashing/lookup for Microsoft C++ ABI. +// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB && SANITIZER_WINDOWS +#include "ubsan_type_hash.h" + +#include "sanitizer_common/sanitizer_common.h" + +#include <typeinfo> + +struct CompleteObjectLocator { + int is_image_relative; + int offset_to_top; + int vfptr_offset; + int rtti_addr; + int chd_addr; + int obj_locator_addr; +}; + +struct CompleteObjectLocatorAbs { + int is_image_relative; + int offset_to_top; + int vfptr_offset; + std::type_info *rtti_addr; + void *chd_addr; + CompleteObjectLocator *obj_locator_addr; +}; + +bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) { + // FIXME: Implement. + return false; +} + +__ubsan::DynamicTypeInfo +__ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) { + // The virtual table may not have a complete object locator if the object + // was compiled without RTTI (i.e. we might be reading from some other global + // laid out before the virtual table), so we need to carefully validate each + // pointer dereference and perform sanity checks. + CompleteObjectLocator **obj_locator_ptr = + ((CompleteObjectLocator**)VtablePtr)-1; + if (!IsAccessibleMemoryRange((uptr)obj_locator_ptr, sizeof(void*))) + return DynamicTypeInfo(0, 0, 0); + + CompleteObjectLocator *obj_locator = *obj_locator_ptr; + if (!IsAccessibleMemoryRange((uptr)obj_locator, + sizeof(CompleteObjectLocator))) + return DynamicTypeInfo(0, 0, 0); + + std::type_info *tinfo; + if (obj_locator->is_image_relative == 1) { + char *image_base = ((char *)obj_locator) - obj_locator->obj_locator_addr; + tinfo = (std::type_info *)(image_base + obj_locator->rtti_addr); + } else if (obj_locator->is_image_relative == 0) + tinfo = ((CompleteObjectLocatorAbs *)obj_locator)->rtti_addr; + else + // Probably not a complete object locator. + return DynamicTypeInfo(0, 0, 0); + + if (!IsAccessibleMemoryRange((uptr)tinfo, sizeof(std::type_info))) + return DynamicTypeInfo(0, 0, 0); + + // Okay, this is probably a std::type_info. Request its name. + // FIXME: Implement a base class search like we do for Itanium. + return DynamicTypeInfo(tinfo->name(), obj_locator->offset_to_top, + "<unknown>"); +} + +#endif // CAN_SANITIZE_UB && SANITIZER_WINDOWS diff --git a/libsanitizer/ubsan/ubsan_value.cc b/libsanitizer/ubsan/ubsan_value.cc index e2f664d3b24..e327f6ffc2e 100644 --- a/libsanitizer/ubsan/ubsan_value.cc +++ b/libsanitizer/ubsan/ubsan_value.cc @@ -10,6 +10,8 @@ // //===----------------------------------------------------------------------===// +#include "ubsan_platform.h" +#if CAN_SANITIZE_UB #include "ubsan_value.h" #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_libc.h" @@ -79,7 +81,14 @@ FloatMax Value::getFloatValue() const { #endif case 32: { float Value; - internal_memcpy(&Value, &Val, 4); +#if defined(__BIG_ENDIAN__) + // For big endian the float value is in the last 4 bytes. + // On some targets we may only have 4 bytes so we count backwards from + // the end of Val to account for both the 32-bit and 64-bit cases. + internal_memcpy(&Value, ((const char*)(&Val + 1)) - 4, 4); +#else + internal_memcpy(&Value, &Val, 4); +#endif return Value; } case 64: { @@ -98,3 +107,5 @@ FloatMax Value::getFloatValue() const { } UNREACHABLE("unexpected floating point bit width"); } + +#endif // CAN_SANITIZE_UB diff --git a/libsanitizer/ubsan/ubsan_value.h b/libsanitizer/ubsan/ubsan_value.h index abfd31fbd99..cca1ac85b77 100644 --- a/libsanitizer/ubsan/ubsan_value.h +++ b/libsanitizer/ubsan/ubsan_value.h @@ -12,12 +12,6 @@ #ifndef UBSAN_VALUE_H #define UBSAN_VALUE_H -// For now, only support Linux, FreeBSD and Darwin. Other platforms should -// be easy to add, and probably work as-is. -#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__APPLE__) -#error "UBSan not supported for this platform!" -#endif - #include "sanitizer_common/sanitizer_atomic.h" #include "sanitizer_common/sanitizer_common.h" @@ -30,7 +24,6 @@ __extension__ typedef unsigned __int128 u128; #define HAVE_INT128_T 0 #endif - namespace __ubsan { /// \brief Largest integer types we support. |