summaryrefslogtreecommitdiff
path: root/libsanitizer
diff options
context:
space:
mode:
authorchefmax <chefmax@138bc75d-0d04-0410-961f-82ee72b054a4>2016-11-08 22:04:09 +0000
committerchefmax <chefmax@138bc75d-0d04-0410-961f-82ee72b054a4>2016-11-08 22:04:09 +0000
commit23e394371c5e8f4e51306c8e890a6707b782eb9d (patch)
tree9c1a8b279416b5f379d7631c1b7f36ab18797212 /libsanitizer
parent23c2e114ba136dcbbc00435ec2c70a7f2a50bfef (diff)
downloadgcc-23e394371c5e8f4e51306c8e890a6707b782eb9d.tar.gz
libsanitizer/
* All source files: Merge from upstream 285547. * configure.tgt (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): New variable. * configure.ac (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): Handle it. * asan/Makefile.am (asan_files): Add new files. * asan/Makefile.in: Regenerate. * ubsan/Makefile.in: Likewise. * lsan/Makefile.in: Likewise. * tsan/Makefile.am (tsan_files): Add new files. * tsan/Makefile.in: Regenerate. * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files. (EXTRA_libsanitizer_common_la_SOURCES): Define. (libsanitizer_common_la_LIBADD): Likewise. (libsanitizer_common_la_DEPENDENCIES): Likewise. * sanitizer_common/Makefile.in: Regenerate. * interception/Makefile.in: Likewise. * libbacktace/Makefile.in: Likewise. * Makefile.in: Likewise. * configure: Likewise. * merge.sh: Handle builtins/assembly.h merging. * builtins/assembly.h: New file. * asan/libtool-version: Bump the libasan SONAME. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@241977 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer')
-rw-r--r--libsanitizer/ChangeLog25
-rw-r--r--libsanitizer/MERGE2
-rw-r--r--libsanitizer/Makefile.in1
-rw-r--r--libsanitizer/asan/Makefile.am3
-rw-r--r--libsanitizer/asan/Makefile.in14
-rw-r--r--libsanitizer/asan/asan_activation.cc8
-rw-r--r--libsanitizer/asan/asan_allocator.cc106
-rw-r--r--libsanitizer/asan/asan_allocator.h43
-rw-r--r--libsanitizer/asan/asan_debugging.cc125
-rw-r--r--libsanitizer/asan/asan_descriptions.cc484
-rw-r--r--libsanitizer/asan/asan_descriptions.h251
-rw-r--r--libsanitizer/asan/asan_errors.cc494
-rw-r--r--libsanitizer/asan/asan_errors.h376
-rw-r--r--libsanitizer/asan/asan_fake_stack.cc6
-rw-r--r--libsanitizer/asan/asan_fake_stack.h21
-rw-r--r--libsanitizer/asan/asan_flags.cc18
-rw-r--r--libsanitizer/asan/asan_flags.inc21
-rw-r--r--libsanitizer/asan/asan_globals.cc166
-rw-r--r--libsanitizer/asan/asan_init_version.h14
-rw-r--r--libsanitizer/asan/asan_interceptors.cc106
-rw-r--r--libsanitizer/asan/asan_interceptors.h14
-rw-r--r--libsanitizer/asan/asan_interface_internal.h28
-rw-r--r--libsanitizer/asan/asan_internal.h38
-rw-r--r--libsanitizer/asan/asan_linux.cc13
-rw-r--r--libsanitizer/asan/asan_mac.cc218
-rw-r--r--libsanitizer/asan/asan_malloc_linux.cc10
-rw-r--r--libsanitizer/asan/asan_malloc_mac.cc4
-rw-r--r--libsanitizer/asan/asan_malloc_win.cc139
-rw-r--r--libsanitizer/asan/asan_mapping.h77
-rw-r--r--libsanitizer/asan/asan_memory_profile.cc98
-rw-r--r--libsanitizer/asan/asan_new_delete.cc20
-rw-r--r--libsanitizer/asan/asan_poisoning.cc44
-rw-r--r--libsanitizer/asan/asan_poisoning.h2
-rw-r--r--libsanitizer/asan/asan_posix.cc30
-rw-r--r--libsanitizer/asan/asan_report.cc989
-rw-r--r--libsanitizer/asan/asan_report.h26
-rw-r--r--libsanitizer/asan/asan_rtl.cc89
-rw-r--r--libsanitizer/asan/asan_scariness_score.h72
-rw-r--r--libsanitizer/asan/asan_stack.h5
-rw-r--r--libsanitizer/asan/asan_suppressions.cc1
-rw-r--r--libsanitizer/asan/asan_thread.cc123
-rw-r--r--libsanitizer/asan/asan_thread.h41
-rw-r--r--libsanitizer/asan/asan_win.cc206
-rw-r--r--libsanitizer/asan/asan_win_dll_thunk.cc44
-rw-r--r--libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc23
-rw-r--r--libsanitizer/asan/libtool-version2
-rw-r--r--libsanitizer/builtins/assembly.h169
-rwxr-xr-xlibsanitizer/configure6
-rw-r--r--libsanitizer/configure.ac1
-rw-r--r--libsanitizer/configure.tgt2
-rw-r--r--libsanitizer/include/sanitizer/allocator_interface.h17
-rw-r--r--libsanitizer/include/sanitizer/common_interface_defs.h58
-rw-r--r--libsanitizer/include/sanitizer/coverage_interface.h1
-rw-r--r--libsanitizer/include/sanitizer/esan_interface.h48
-rw-r--r--libsanitizer/include/sanitizer/linux_syscall_hooks.h23
-rw-r--r--libsanitizer/interception/Makefile.in1
-rw-r--r--libsanitizer/interception/interception.h6
-rw-r--r--libsanitizer/interception/interception_win.cc995
-rw-r--r--libsanitizer/interception/interception_win.h30
-rw-r--r--libsanitizer/libbacktrace/Makefile.in1
-rw-r--r--libsanitizer/lsan/Makefile.in1
-rw-r--r--libsanitizer/lsan/lsan.cc2
-rw-r--r--libsanitizer/lsan/lsan.h7
-rw-r--r--libsanitizer/lsan/lsan_allocator.cc17
-rw-r--r--libsanitizer/lsan/lsan_common.cc57
-rw-r--r--libsanitizer/lsan/lsan_common.h15
-rw-r--r--libsanitizer/lsan/lsan_common_linux.cc49
-rw-r--r--libsanitizer/lsan/lsan_flags.inc4
-rw-r--r--libsanitizer/lsan/lsan_interceptors.cc25
-rw-r--r--libsanitizer/lsan/lsan_thread.cc27
-rw-r--r--libsanitizer/lsan/lsan_thread.h7
-rwxr-xr-xlibsanitizer/merge.sh4
-rw-r--r--libsanitizer/sanitizer_common/Makefile.am5
-rw-r--r--libsanitizer/sanitizer_common/Makefile.in62
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.cc107
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.h1428
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h100
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_combined.h209
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_interface.h8
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_internal.h11
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h246
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h302
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h503
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h271
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h215
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator_stats.h103
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_asm.h14
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h13
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.cc209
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.h215
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc966
-rwxr-xr-xlibsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc38
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc55
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc58
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc161
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc23
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc9
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.cc56
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.h7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_flags.inc49
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_interface_internal.h4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_internal_defs.h53
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.cc32
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_libc.h14
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.cc283
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux.h8
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc120
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_mips64.S21
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_s390.cc189
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_linux_x86_64.S23
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_list.h32
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.cc406
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_mac.h18
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc26
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform.h111
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h55
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc31
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h112
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix.cc63
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix.h9
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc107
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_printf.cc9
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps.h5
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc22
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc26
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_quarantine.h6
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cc19
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.h7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc44
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc29
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h7
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc34
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_suppressions.cc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_suppressions.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.cc3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer.h14
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc48
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc37
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc107
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc8
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_termination.cc84
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_thread_registry.cc4
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc2
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc10
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_win.cc272
-rw-r--r--libsanitizer/tsan/Makefile.am9
-rw-r--r--libsanitizer/tsan/Makefile.in78
-rw-r--r--libsanitizer/tsan/tsan_clock.cc2
-rw-r--r--libsanitizer/tsan/tsan_debugging.cc160
-rw-r--r--libsanitizer/tsan/tsan_defs.h22
-rw-r--r--libsanitizer/tsan/tsan_dense_alloc.h2
-rw-r--r--libsanitizer/tsan/tsan_flags.cc9
-rw-r--r--libsanitizer/tsan/tsan_flags.inc7
-rw-r--r--libsanitizer/tsan/tsan_interceptors.cc696
-rw-r--r--libsanitizer/tsan/tsan_interceptors.h16
-rw-r--r--libsanitizer/tsan/tsan_interceptors_mac.cc357
-rw-r--r--libsanitizer/tsan/tsan_interface.h291
-rw-r--r--libsanitizer/tsan/tsan_interface_atomic.cc43
-rw-r--r--libsanitizer/tsan/tsan_interface_java.cc2
-rw-r--r--libsanitizer/tsan/tsan_libdispatch_mac.cc613
-rw-r--r--libsanitizer/tsan/tsan_malloc_mac.cc43
-rw-r--r--libsanitizer/tsan/tsan_mman.cc118
-rw-r--r--libsanitizer/tsan/tsan_mman.h5
-rw-r--r--libsanitizer/tsan/tsan_mutex.cc1
-rw-r--r--libsanitizer/tsan/tsan_mutex.h1
-rw-r--r--libsanitizer/tsan/tsan_mutexset.h4
-rw-r--r--libsanitizer/tsan/tsan_new_delete.cc8
-rw-r--r--libsanitizer/tsan/tsan_platform.h860
-rw-r--r--libsanitizer/tsan/tsan_platform_linux.cc179
-rw-r--r--libsanitizer/tsan/tsan_platform_mac.cc49
-rw-r--r--libsanitizer/tsan/tsan_platform_posix.cc73
-rw-r--r--libsanitizer/tsan/tsan_platform_windows.cc3
-rw-r--r--libsanitizer/tsan/tsan_ppc_regs.h94
-rw-r--r--libsanitizer/tsan/tsan_preinit.cc25
-rw-r--r--libsanitizer/tsan/tsan_report.cc52
-rw-r--r--libsanitizer/tsan/tsan_report.h3
-rw-r--r--libsanitizer/tsan/tsan_rtl.cc114
-rw-r--r--libsanitizer/tsan/tsan_rtl.h90
-rw-r--r--libsanitizer/tsan/tsan_rtl_aarch64.S2
-rw-r--r--libsanitizer/tsan/tsan_rtl_amd64.S101
-rw-r--r--libsanitizer/tsan/tsan_rtl_mips64.S212
-rw-r--r--libsanitizer/tsan/tsan_rtl_mutex.cc67
-rw-r--r--libsanitizer/tsan/tsan_rtl_ppc64.S286
-rw-r--r--libsanitizer/tsan/tsan_rtl_proc.cc59
-rw-r--r--libsanitizer/tsan/tsan_rtl_report.cc71
-rw-r--r--libsanitizer/tsan/tsan_rtl_thread.cc46
-rw-r--r--libsanitizer/tsan/tsan_stat.cc1
-rw-r--r--libsanitizer/tsan/tsan_stat.h1
-rw-r--r--libsanitizer/tsan/tsan_suppressions.cc15
-rw-r--r--libsanitizer/tsan/tsan_symbolize.cc10
-rw-r--r--libsanitizer/tsan/tsan_sync.cc69
-rw-r--r--libsanitizer/tsan/tsan_sync.h20
-rw-r--r--libsanitizer/tsan/tsan_trace.h4
-rw-r--r--libsanitizer/ubsan/Makefile.in1
-rw-r--r--libsanitizer/ubsan/ubsan_checks.inc56
-rw-r--r--libsanitizer/ubsan/ubsan_diag.cc229
-rw-r--r--libsanitizer/ubsan/ubsan_diag.h21
-rw-r--r--libsanitizer/ubsan/ubsan_flags.cc2
-rw-r--r--libsanitizer/ubsan/ubsan_handlers.cc254
-rw-r--r--libsanitizer/ubsan/ubsan_handlers.h19
-rw-r--r--libsanitizer/ubsan/ubsan_handlers_cxx.cc105
-rw-r--r--libsanitizer/ubsan/ubsan_handlers_cxx.h14
-rw-r--r--libsanitizer/ubsan/ubsan_init.cc1
-rw-r--r--libsanitizer/ubsan/ubsan_platform.h3
-rw-r--r--libsanitizer/ubsan/ubsan_type_hash.h4
-rw-r--r--libsanitizer/ubsan/ubsan_type_hash_itanium.cc22
-rw-r--r--libsanitizer/ubsan/ubsan_value.cc4
211 files changed, 14569 insertions, 5347 deletions
diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog
index 0a5282e3e9d..a490230f594 100644
--- a/libsanitizer/ChangeLog
+++ b/libsanitizer/ChangeLog
@@ -1,3 +1,28 @@
+2016-11-09 Maxim Ostapenko <m.ostapenko@samsung.com>
+
+ * All source files: Merge from upstream 285547.
+ * configure.tgt (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): New
+ variable.
+ * configure.ac (SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS): Handle it.
+ * asan/Makefile.am (asan_files): Add new files.
+ * asan/Makefile.in: Regenerate.
+ * ubsan/Makefile.in: Likewise.
+ * lsan/Makefile.in: Likewise.
+ * tsan/Makefile.am (tsan_files): Add new files.
+ * tsan/Makefile.in: Regenerate.
+ * sanitizer_common/Makefile.am (sanitizer_common_files): Add new files.
+ (EXTRA_libsanitizer_common_la_SOURCES): Define.
+ (libsanitizer_common_la_LIBADD): Likewise.
+ (libsanitizer_common_la_DEPENDENCIES): Likewise.
+ * sanitizer_common/Makefile.in: Regenerate.
+ * interception/Makefile.in: Likewise.
+ * libbacktace/Makefile.in: Likewise.
+ * Makefile.in: Likewise.
+ * configure: Likewise.
+ * merge.sh: Handle builtins/assembly.h merging.
+ * builtins/assembly.h: New file.
+ * asan/libtool-version: Bump the libasan SONAME.
+
2016-09-21 Jakub Jelinek <jakub@redhat.com>
PR sanitizer/77567
diff --git a/libsanitizer/MERGE b/libsanitizer/MERGE
index dfd606aa0c6..21c2f390e97 100644
--- a/libsanitizer/MERGE
+++ b/libsanitizer/MERGE
@@ -1,4 +1,4 @@
-253555
+285547
The first line of this file holds the svn revision number of the
last merge done from the master library sources.
diff --git a/libsanitizer/Makefile.in b/libsanitizer/Makefile.in
index 1278900c0d4..28ba4b8e46a 100644
--- a/libsanitizer/Makefile.in
+++ b/libsanitizer/Makefile.in
@@ -210,6 +210,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
diff --git a/libsanitizer/asan/Makefile.am b/libsanitizer/asan/Makefile.am
index bd3cd735a98..bfd28c0388e 100644
--- a/libsanitizer/asan/Makefile.am
+++ b/libsanitizer/asan/Makefile.am
@@ -19,6 +19,8 @@ asan_files = \
asan_activation.cc \
asan_allocator.cc \
asan_debugging.cc \
+ asan_descriptions.cc \
+ asan_errors.cc \
asan_fake_stack.cc \
asan_flags.cc \
asan_globals.cc \
@@ -28,6 +30,7 @@ asan_files = \
asan_malloc_linux.cc \
asan_malloc_mac.cc \
asan_malloc_win.cc \
+ asan_memory_profile.cc \
asan_new_delete.cc \
asan_poisoning.cc \
asan_posix.cc \
diff --git a/libsanitizer/asan/Makefile.in b/libsanitizer/asan/Makefile.in
index 229c7b400d1..2d9f4a7b815 100644
--- a/libsanitizer/asan/Makefile.in
+++ b/libsanitizer/asan/Makefile.in
@@ -112,9 +112,10 @@ libasan_la_DEPENDENCIES = \
$(top_builddir)/lsan/libsanitizer_lsan.la $(am__append_2) \
$(am__append_3) $(am__DEPENDENCIES_1)
am__objects_1 = asan_activation.lo asan_allocator.lo asan_debugging.lo \
- asan_fake_stack.lo asan_flags.lo asan_globals.lo \
- asan_interceptors.lo asan_linux.lo asan_mac.lo \
- asan_malloc_linux.lo asan_malloc_mac.lo asan_malloc_win.lo \
+ asan_descriptions.lo asan_errors.lo asan_fake_stack.lo \
+ asan_flags.lo asan_globals.lo asan_interceptors.lo \
+ asan_linux.lo asan_mac.lo asan_malloc_linux.lo \
+ asan_malloc_mac.lo asan_malloc_win.lo asan_memory_profile.lo \
asan_new_delete.lo asan_poisoning.lo asan_posix.lo \
asan_report.lo asan_rtl.lo asan_stack.lo asan_stats.lo \
asan_suppressions.lo asan_thread.lo asan_win.lo \
@@ -219,6 +220,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
@@ -308,6 +310,8 @@ asan_files = \
asan_activation.cc \
asan_allocator.cc \
asan_debugging.cc \
+ asan_descriptions.cc \
+ asan_errors.cc \
asan_fake_stack.cc \
asan_flags.cc \
asan_globals.cc \
@@ -317,6 +321,7 @@ asan_files = \
asan_malloc_linux.cc \
asan_malloc_mac.cc \
asan_malloc_win.cc \
+ asan_memory_profile.cc \
asan_new_delete.cc \
asan_poisoning.cc \
asan_posix.cc \
@@ -454,6 +459,8 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_activation.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_debugging.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_descriptions.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_errors.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_fake_stack.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_globals.Plo@am__quote@
@@ -463,6 +470,7 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_malloc_win.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_memory_profile.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_new_delete.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_poisoning.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/asan_posix.Plo@am__quote@
diff --git a/libsanitizer/asan/asan_activation.cc b/libsanitizer/asan/asan_activation.cc
index 58867956086..ecd767c5985 100644
--- a/libsanitizer/asan/asan_activation.cc
+++ b/libsanitizer/asan/asan_activation.cc
@@ -45,6 +45,7 @@ static struct AsanDeactivatedFlags {
FlagParser parser;
RegisterActivationFlags(&parser, &f, &cf);
+ cf.SetDefaults();
// Copy the current activation flags.
allocator_options.CopyTo(&f, &cf);
cf.malloc_context_size = malloc_context_size;
@@ -59,12 +60,7 @@ static struct AsanDeactivatedFlags {
parser.ParseString(env);
}
- // Override from getprop asan.options.
- char buf[100];
- GetExtraActivationFlags(buf, sizeof(buf));
- parser.ParseString(buf);
-
- SetVerbosity(cf.verbosity);
+ InitializeCommonFlags(&cf);
if (Verbosity()) ReportUnrecognizedFlags();
diff --git a/libsanitizer/asan/asan_allocator.cc b/libsanitizer/asan/asan_allocator.cc
index 187c405e118..d3ddb904d58 100644
--- a/libsanitizer/asan/asan_allocator.cc
+++ b/libsanitizer/asan/asan_allocator.cc
@@ -221,7 +221,7 @@ void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
struct Allocator {
static const uptr kMaxAllowedMallocSize =
- FIRST_32_SECOND_64(3UL << 30, 1UL << 40);
+ FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
static const uptr kMaxThreadLocalQuarantine =
FIRST_32_SECOND_64(1 << 18, 1 << 20);
@@ -264,9 +264,43 @@ struct Allocator {
SharedInitCode(options);
}
+ void RePoisonChunk(uptr chunk) {
+ // This could a user-facing chunk (with redzones), or some internal
+ // housekeeping chunk, like TransferBatch. Start by assuming the former.
+ AsanChunk *ac = GetAsanChunk((void *)chunk);
+ uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
+ uptr beg = ac->Beg();
+ uptr end = ac->Beg() + ac->UsedSize(true);
+ uptr chunk_end = chunk + allocated_size;
+ if (chunk < beg && beg < end && end <= chunk_end) {
+ // Looks like a valid AsanChunk. Or maybe not. Be conservative and only
+ // poison the redzones.
+ PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
+ uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ FastPoisonShadowPartialRightRedzone(
+ end_aligned_down, end - end_aligned_down,
+ chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
+ } else {
+ // This can not be an AsanChunk. Poison everything. It may be reused as
+ // AsanChunk later.
+ PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+ }
+
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
SharedInitCode(options);
+
+ // Poison all existing allocation's redzones.
+ if (CanPoisonMemory()) {
+ allocator.ForceLock();
+ allocator.ForEachChunk(
+ [](uptr chunk, void *alloc) {
+ ((Allocator *)alloc)->RePoisonChunk(chunk);
+ },
+ this);
+ allocator.ForceUnlock();
+ }
}
void GetOptions(AllocatorOptions *options) const {
@@ -354,7 +388,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
}
AsanThread *t = GetCurrentThread();
@@ -371,8 +405,7 @@ struct Allocator {
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
- if (!allocated)
- return allocator.ReturnNullOrDie();
+ if (!allocated) return allocator.ReturnNullOrDieOnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
@@ -455,29 +488,28 @@ struct Allocator {
return res;
}
- void AtomicallySetQuarantineFlag(AsanChunk *m, void *ptr,
+ // Set quarantine flag if chunk is allocated, issue ASan error report on
+ // available and quarantined chunks. Return true on success, false otherwise.
+ bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
BufferedStackTrace *stack) {
u8 old_chunk_state = CHUNK_ALLOCATED;
// Flip the chunk_state atomically to avoid race on double-free.
- if (!atomic_compare_exchange_strong((atomic_uint8_t*)m, &old_chunk_state,
- CHUNK_QUARANTINE, memory_order_acquire))
+ if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
+ CHUNK_QUARANTINE,
+ memory_order_acquire)) {
ReportInvalidFree(ptr, old_chunk_state, stack);
+ // It's not safe to push a chunk in quarantine on invalid free.
+ return false;
+ }
CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+ return true;
}
// Expects the chunk to already be marked as quarantined by using
- // AtomicallySetQuarantineFlag.
+ // AtomicallySetQuarantineFlagIfAllocated.
void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack,
AllocType alloc_type) {
CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
-
- if (m->alloc_type != alloc_type) {
- if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
- ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
- (AllocType)alloc_type);
- }
- }
-
CHECK_GE(m->alloc_tid, 0);
if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
CHECK_EQ(m->free_tid, kInvalidTid);
@@ -514,13 +546,24 @@ struct Allocator {
uptr chunk_beg = p - kChunkHeaderSize;
AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+ ASAN_FREE_HOOK(ptr);
+ // Must mark the chunk as quarantined before any changes to its metadata.
+ // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
+ if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
+
+ if (m->alloc_type != alloc_type) {
+ if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
+ ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
+ (AllocType)alloc_type);
+ }
+ }
+
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, delete_size, stack);
}
- ASAN_FREE_HOOK(ptr);
- // Must mark the chunk as quarantined before any changes to its metadata.
- AtomicallySetQuarantineFlag(m, ptr, stack);
+
QuarantineChunk(m, ptr, stack, alloc_type);
}
@@ -551,7 +594,7 @@ struct Allocator {
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
- return allocator.ReturnNullOrDie();
+ return allocator.ReturnNullOrDieOnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.
@@ -642,6 +685,8 @@ struct Allocator {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
+
+ void ReleaseToOS() { allocator.ReleaseToOS(); }
};
static Allocator instance(LINKER_INITIALIZED);
@@ -653,11 +698,17 @@ static AsanAllocator &get_allocator() {
bool AsanChunkView::IsValid() {
return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
}
+bool AsanChunkView::IsAllocated() {
+ return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
+}
uptr AsanChunkView::Beg() { return chunk_->Beg(); }
uptr AsanChunkView::End() { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() { return chunk_->UsedSize(); }
uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
+AllocType AsanChunkView::GetAllocType() {
+ return (AllocType)chunk_->alloc_type;
+}
static StackTrace GetStackTraceFromId(u32 id) {
CHECK(id);
@@ -666,16 +717,22 @@ static StackTrace GetStackTraceFromId(u32 id) {
return res;
}
+u32 AsanChunkView::GetAllocStackId() { return chunk_->alloc_context_id; }
+u32 AsanChunkView::GetFreeStackId() { return chunk_->free_context_id; }
+
StackTrace AsanChunkView::GetAllocStack() {
- return GetStackTraceFromId(chunk_->alloc_context_id);
+ return GetStackTraceFromId(GetAllocStackId());
}
StackTrace AsanChunkView::GetFreeStack() {
- return GetStackTraceFromId(chunk_->free_context_id);
+ return GetStackTraceFromId(GetFreeStackId());
}
+void ReleaseToOS() { instance.ReleaseToOS(); }
+
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
+ SetAllocatorReleaseToOSCallback(ReleaseToOS);
}
void ReInitializeAllocator(const AllocatorOptions &options) {
@@ -689,6 +746,9 @@ void GetAllocatorOptions(AllocatorOptions *options) {
AsanChunkView FindHeapChunkByAddress(uptr addr) {
return instance.FindHeapChunkByAddress(addr);
}
+AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
+ return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
+}
void AsanThreadLocalMallocStorage::CommitBack() {
instance.CommitBack(this);
@@ -752,7 +812,7 @@ int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
return 0;
}
-uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp) {
+uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
if (!ptr) return 0;
uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
if (flags()->check_malloc_usable_size && (usable_size == 0)) {
diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h
index 921131ab4e9..7eeddadd547 100644
--- a/libsanitizer/asan/asan_allocator.h
+++ b/libsanitizer/asan/asan_allocator.h
@@ -47,16 +47,20 @@ void GetAllocatorOptions(AllocatorOptions *options);
class AsanChunkView {
public:
explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
- bool IsValid(); // Checks if AsanChunkView points to a valid allocated
- // or quarantined chunk.
- uptr Beg(); // First byte of user memory.
- uptr End(); // Last byte of user memory.
- uptr UsedSize(); // Size requested by the user.
+ bool IsValid(); // Checks if AsanChunkView points to a valid allocated
+ // or quarantined chunk.
+ bool IsAllocated(); // Checks if the memory is currently allocated.
+ uptr Beg(); // First byte of user memory.
+ uptr End(); // Last byte of user memory.
+ uptr UsedSize(); // Size requested by the user.
uptr AllocTid();
uptr FreeTid();
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
+ u32 GetAllocStackId();
+ u32 GetFreeStackId();
StackTrace GetAllocStack();
StackTrace GetFreeStack();
+ AllocType GetAllocType();
bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) {
if (addr >= Beg() && (addr + access_size) <= End()) {
*offset = addr - Beg();
@@ -85,6 +89,7 @@ class AsanChunkView {
};
AsanChunkView FindHeapChunkByAddress(uptr address);
+AsanChunkView FindHeapChunkByAllocBeg(uptr address);
// List of AsanChunks with total size.
class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
@@ -112,18 +117,36 @@ struct AsanMapUnmapCallback {
# if defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif defined(__aarch64__) && SANITIZER_ANDROID
+const uptr kAllocatorSpace = 0x3000000000ULL;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+typedef VeryCompactSizeClassMap SizeClassMap;
# elif defined(__aarch64__)
-// AArch64/SANITIZIER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
+// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA
// so no need to different values for different VMA.
const uptr kAllocatorSpace = 0x10000000000ULL;
const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif SANITIZER_WINDOWS
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x8000000000ULL; // 500G
+typedef DefaultSizeClassMap SizeClassMap;
# else
const uptr kAllocatorSpace = 0x600000000000ULL;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-# endif
typedef DefaultSizeClassMap SizeClassMap;
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0 /*metadata*/,
- SizeClassMap, AsanMapUnmapCallback> PrimaryAllocator;
+# endif
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef __asan::SizeClassMap SizeClassMap;
+ typedef AsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#else // Fallback to SizeClassAllocator32.
static const uptr kRegionSizeLog = 20;
static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog;
@@ -169,7 +192,7 @@ void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack);
-uptr asan_malloc_usable_size(void *ptr, uptr pc, uptr bp);
+uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
uptr asan_mz_size(const void *ptr);
void asan_mz_force_lock();
diff --git a/libsanitizer/asan/asan_debugging.cc b/libsanitizer/asan/asan_debugging.cc
index 6e33a9d4c51..1c8d0df8f6e 100644
--- a/libsanitizer/asan/asan_debugging.cc
+++ b/libsanitizer/asan/asan_debugging.cc
@@ -12,74 +12,39 @@
//===----------------------------------------------------------------------===//
#include "asan_allocator.h"
+#include "asan_descriptions.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
#include "asan_thread.h"
-namespace __asan {
-
-void GetInfoForStackVar(uptr addr, AddressDescription *descr, AsanThread *t) {
- descr->name[0] = 0;
- descr->region_address = 0;
- descr->region_size = 0;
- descr->region_kind = "stack";
+namespace {
+using namespace __asan;
- AsanThread::StackFrameAccess access;
- if (!t->GetStackFrameAccessByAddr(addr, &access))
- return;
+static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
+ char *name, uptr name_size,
+ uptr &region_address, uptr &region_size) {
InternalMmapVector<StackVarDescr> vars(16);
- if (!ParseFrameDescription(access.frame_descr, &vars)) {
+ if (!ParseFrameDescription(frame_descr, &vars)) {
return;
}
for (uptr i = 0; i < vars.size(); i++) {
- if (access.offset <= vars[i].beg + vars[i].size) {
- internal_strncat(descr->name, vars[i].name_pos,
- Min(descr->name_size, vars[i].name_len));
- descr->region_address = addr - (access.offset - vars[i].beg);
- descr->region_size = vars[i].size;
+ if (offset <= vars[i].beg + vars[i].size) {
+ // We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
+ // if we're limiting the copy due to name_len, we add 1 to ensure we copy
+ // the whole name and then terminate with '\0'.
+ internal_strlcpy(name, vars[i].name_pos,
+ Min(name_size, vars[i].name_len + 1));
+ region_address = addr - (offset - vars[i].beg);
+ region_size = vars[i].size;
return;
}
}
}
-void GetInfoForHeapAddress(uptr addr, AddressDescription *descr) {
- AsanChunkView chunk = FindHeapChunkByAddress(addr);
-
- descr->name[0] = 0;
- descr->region_address = 0;
- descr->region_size = 0;
-
- if (!chunk.IsValid()) {
- descr->region_kind = "heap-invalid";
- return;
- }
-
- descr->region_address = chunk.Beg();
- descr->region_size = chunk.UsedSize();
- descr->region_kind = "heap";
-}
-
-void AsanLocateAddress(uptr addr, AddressDescription *descr) {
- if (DescribeAddressIfShadow(addr, descr, /* print */ false)) {
- return;
- }
- if (GetInfoForAddressIfGlobal(addr, descr)) {
- return;
- }
- asanThreadRegistry().Lock();
- AsanThread *thread = FindThreadByStackAddress(addr);
- asanThreadRegistry().Unlock();
- if (thread) {
- GetInfoForStackVar(addr, descr, thread);
- return;
- }
- GetInfoForHeapAddress(addr, descr);
-}
-
-static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
+uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
bool alloc_stack) {
AsanChunkView chunk = FindHeapChunkByAddress(addr);
if (!chunk.IsValid()) return 0;
@@ -106,18 +71,58 @@ static uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
return 0;
}
-} // namespace __asan
-
-using namespace __asan;
+} // namespace
SANITIZER_INTERFACE_ATTRIBUTE
const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
- uptr *region_address, uptr *region_size) {
- AddressDescription descr = { name, name_size, 0, 0, nullptr };
- AsanLocateAddress(addr, &descr);
- if (region_address) *region_address = descr.region_address;
- if (region_size) *region_size = descr.region_size;
- return descr.region_kind;
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ AddressDescription descr(addr);
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (auto shadow = descr.AsShadow()) {
+ // region_{address,size} are already 0
+ switch (shadow->kind) {
+ case kShadowKindLow:
+ region_kind = "low shadow";
+ break;
+ case kShadowKindGap:
+ region_kind = "shadow gap";
+ break;
+ case kShadowKindHigh:
+ region_kind = "high shadow";
+ break;
+ }
+ } else if (auto heap = descr.AsHeap()) {
+ region_kind = "heap";
+ region_address = heap->chunk_access.chunk_begin;
+ region_size = heap->chunk_access.chunk_size;
+ } else if (auto stack = descr.AsStack()) {
+ region_kind = "stack";
+ if (!stack->frame_descr) {
+ // region_{address,size} are already 0
+ } else {
+ FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
+ name_size, region_address, region_size);
+ }
+ } else if (auto global = descr.AsGlobal()) {
+ region_kind = "global";
+ auto &g = global->globals[0];
+ internal_strlcpy(name, g.name, name_size);
+ region_address = g.beg;
+ region_size = g.size;
+ } else {
+ // region_{address,size} are already 0
+ region_kind = "heap-invalid";
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
}
SANITIZER_INTERFACE_ATTRIBUTE
diff --git a/libsanitizer/asan/asan_descriptions.cc b/libsanitizer/asan/asan_descriptions.cc
new file mode 100644
index 00000000000..35d1619f2d9
--- /dev/null
+++ b/libsanitizer/asan/asan_descriptions.cc
@@ -0,0 +1,484 @@
+//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan functions for getting information about an address and/or printing it.
+//===----------------------------------------------------------------------===//
+
+#include "asan_descriptions.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+// Return " (thread_name) " or an empty string if the name is empty.
+const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
+ uptr buff_len) {
+ const char *name = t->name;
+ if (name[0] == '\0') return "";
+ buff[0] = 0;
+ internal_strncat(buff, " (", 3);
+ internal_strncat(buff, name, buff_len - 4);
+ internal_strncat(buff, ")", 2);
+ return buff;
+}
+
+const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len) {
+ if (tid == kInvalidTid) return "";
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *t = GetThreadContextByTidLocked(tid);
+ return ThreadNameWithParenthesis(t, buff, buff_len);
+}
+
+void DescribeThread(AsanThreadContext *context) {
+ CHECK(context);
+ asanThreadRegistry().CheckLocked();
+ // No need to announce the main thread.
+ if (context->tid == 0 || context->announced) {
+ return;
+ }
+ context->announced = true;
+ char tname[128];
+ InternalScopedString str(1024);
+ str.append("Thread T%d%s", context->tid,
+ ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
+ if (context->parent_tid == kInvalidTid) {
+ str.append(" created by unknown thread\n");
+ Printf("%s", str.data());
+ return;
+ }
+ str.append(
+ " created by T%d%s here:\n", context->parent_tid,
+ ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
+ Printf("%s", str.data());
+ StackDepotGet(context->stack_id).Print();
+ // Recursively described parent thread if needed.
+ if (flags()->print_full_thread_history) {
+ AsanThreadContext *parent_context =
+ GetThreadContextByTidLocked(context->parent_tid);
+ DescribeThread(parent_context);
+ }
+}
+
+// Shadow descriptions
+static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
+ CHECK(!AddrIsInMem(addr));
+ if (AddrIsInShadowGap(addr)) {
+ *shadow_kind = kShadowKindGap;
+ } else if (AddrIsInHighShadow(addr)) {
+ *shadow_kind = kShadowKindHigh;
+ } else if (AddrIsInLowShadow(addr)) {
+ *shadow_kind = kShadowKindLow;
+ } else {
+ CHECK(0 && "Address is not in memory and not in shadow?");
+ return false;
+ }
+ return true;
+}
+
+bool DescribeAddressIfShadow(uptr addr) {
+ ShadowAddressDescription descr;
+ if (!GetShadowAddressInformation(addr, &descr)) return false;
+ descr.Print();
+ return true;
+}
+
+bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) {
+ if (AddrIsInMem(addr)) return false;
+ ShadowKind shadow_kind;
+ if (!GetShadowKind(addr, &shadow_kind)) return false;
+ if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr;
+ descr->addr = addr;
+ descr->kind = shadow_kind;
+ return true;
+}
+
+// Heap descriptions
+static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
+ AsanChunkView chunk, uptr addr,
+ uptr access_size) {
+ descr->bad_addr = addr;
+ if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeLeft;
+ } else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeRight;
+ if (descr->offset < 0) {
+ descr->bad_addr -= descr->offset;
+ descr->offset = 0;
+ }
+ } else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeInside;
+ } else {
+ descr->access_type = kAccessTypeUnknown;
+ }
+ descr->chunk_begin = chunk.Beg();
+ descr->chunk_size = chunk.UsedSize();
+ descr->alloc_type = chunk.GetAllocType();
+}
+
+static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
+ Decorator d;
+ InternalScopedString str(4096);
+ str.append("%s", d.Location());
+ switch (descr.access_type) {
+ case kAccessTypeLeft:
+ str.append("%p is located %zd bytes to the left of",
+ (void *)descr.bad_addr, descr.offset);
+ break;
+ case kAccessTypeRight:
+ str.append("%p is located %zd bytes to the right of",
+ (void *)descr.bad_addr, descr.offset);
+ break;
+ case kAccessTypeInside:
+ str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
+ descr.offset);
+ break;
+ case kAccessTypeUnknown:
+ str.append(
+ "%p is located somewhere around (this is AddressSanitizer bug!)",
+ (void *)descr.bad_addr);
+ }
+ str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
+ (void *)descr.chunk_begin,
+ (void *)(descr.chunk_begin + descr.chunk_size));
+ str.append("%s", d.EndLocation());
+ Printf("%s", str.data());
+}
+
+bool GetHeapAddressInformation(uptr addr, uptr access_size,
+ HeapAddressDescription *descr) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+ if (!chunk.IsValid()) {
+ return false;
+ }
+ descr->addr = addr;
+ GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr,
+ access_size);
+ CHECK_NE(chunk.AllocTid(), kInvalidTid);
+ descr->alloc_tid = chunk.AllocTid();
+ descr->alloc_stack_id = chunk.GetAllocStackId();
+ descr->free_tid = chunk.FreeTid();
+ if (descr->free_tid != kInvalidTid)
+ descr->free_stack_id = chunk.GetFreeStackId();
+ return true;
+}
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+bool DescribeAddressIfHeap(uptr addr, uptr access_size) {
+ HeapAddressDescription descr;
+ if (!GetHeapAddressInformation(addr, access_size, &descr)) {
+ Printf(
+ "AddressSanitizer can not describe address in more detail "
+ "(wild memory access suspected).\n");
+ return false;
+ }
+ descr.Print();
+ return true;
+}
+
+// Stack descriptions
+bool GetStackAddressInformation(uptr addr, uptr access_size,
+ StackAddressDescription *descr) {
+ AsanThread *t = FindThreadByStackAddress(addr);
+ if (!t) return false;
+
+ descr->addr = addr;
+ descr->tid = t->tid();
+ // Try to fetch precise stack frame for this access.
+ AsanThread::StackFrameAccess access;
+ if (!t->GetStackFrameAccessByAddr(addr, &access)) {
+ descr->frame_descr = nullptr;
+ return true;
+ }
+
+ descr->offset = access.offset;
+ descr->access_size = access_size;
+ descr->frame_pc = access.frame_pc;
+ descr->frame_descr = access.frame_descr;
+
+#if SANITIZER_PPC64V1
+ // On PowerPC64 ELFv1, the address of a function actually points to a
+ // three-doubleword data structure with the first field containing
+ // the address of the function's code.
+ descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc);
+#endif
+ descr->frame_pc += 16;
+
+ return true;
+}
+
+static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
+ uptr access_size, uptr prev_var_end,
+ uptr next_var_beg) {
+ uptr var_end = var.beg + var.size;
+ uptr addr_end = addr + access_size;
+ const char *pos_descr = nullptr;
+ // If the variable [var.beg, var_end) is the nearest variable to the
+ // current memory access, indicate it in the log.
+ if (addr >= var.beg) {
+ if (addr_end <= var_end)
+ pos_descr = "is inside"; // May happen if this is a use-after-return.
+ else if (addr < var_end)
+ pos_descr = "partially overflows";
+ else if (addr_end <= next_var_beg &&
+ next_var_beg - addr_end >= addr - var_end)
+ pos_descr = "overflows";
+ } else {
+ if (addr_end > var.beg)
+ pos_descr = "partially underflows";
+ else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
+ pos_descr = "underflows";
+ }
+ InternalScopedString str(1024);
+ str.append(" [%zd, %zd)", var.beg, var_end);
+ // Render variable name.
+ str.append(" '");
+ for (uptr i = 0; i < var.name_len; ++i) {
+ str.append("%c", var.name_pos[i]);
+ }
+ str.append("'");
+ if (pos_descr) {
+ Decorator d;
+ // FIXME: we may want to also print the size of the access here,
+ // but in case of accesses generated by memset it may be confusing.
+ str.append("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.EndLocation());
+ } else {
+ str.append("\n");
+ }
+ Printf("%s", str.data());
+}
+
+bool DescribeAddressIfStack(uptr addr, uptr access_size) {
+ StackAddressDescription descr;
+ if (!GetStackAddressInformation(addr, access_size, &descr)) return false;
+ descr.Print();
+ return true;
+}
+
+// Global descriptions
+static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
+ const __asan_global &g) {
+ InternalScopedString str(4096);
+ Decorator d;
+ str.append("%s", d.Location());
+ if (addr < g.beg) {
+ str.append("%p is located %zd bytes to the left", (void *)addr,
+ g.beg - addr);
+ } else if (addr + access_size > g.beg + g.size) {
+ if (addr < g.beg + g.size) addr = g.beg + g.size;
+ str.append("%p is located %zd bytes to the right", (void *)addr,
+ addr - (g.beg + g.size));
+ } else {
+ // Can it happen?
+ str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
+ }
+ str.append(" of global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g);
+ str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
+ str.append("%s", d.EndLocation());
+ PrintGlobalNameIfASCII(&str, g);
+ Printf("%s", str.data());
+}
+
+bool GetGlobalAddressInformation(uptr addr, uptr access_size,
+ GlobalAddressDescription *descr) {
+ descr->addr = addr;
+ int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites,
+ ARRAY_SIZE(descr->globals));
+ descr->size = globals_num;
+ descr->access_size = access_size;
+ return globals_num != 0;
+}
+
+bool DescribeAddressIfGlobal(uptr addr, uptr access_size,
+ const char *bug_type) {
+ GlobalAddressDescription descr;
+ if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false;
+
+ descr.Print(bug_type);
+ return true;
+}
+
+void ShadowAddressDescription::Print() const {
+ Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]);
+}
+
+void GlobalAddressDescription::Print(const char *bug_type) const {
+ for (int i = 0; i < size; i++) {
+ DescribeAddressRelativeToGlobal(addr, access_size, globals[i]);
+ if (bug_type &&
+ 0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
+ reg_sites[i]) {
+ Printf(" registered at:\n");
+ StackDepotGet(reg_sites[i]).Print();
+ }
+ }
+}
+
+void StackAddressDescription::Print() const {
+ Decorator d;
+ char tname[128];
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%d%s", addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+
+ if (!frame_descr) {
+ Printf("%s\n", d.EndLocation());
+ return;
+ }
+ Printf(" at offset %zu in frame%s\n", offset, d.EndLocation());
+
+ // Now we print the frame where the alloca has happened.
+ // We print this frame as a stack trace with one element.
+ // The symbolizer may print more than one frame if inlining was involved.
+ // The frame numbers may be different than those in the stack trace printed
+ // previously. That's unfortunate, but I have no better solution,
+ // especially given that the alloca may be from entirely different place
+ // (e.g. use-after-scope, or different thread's stack).
+ Printf("%s", d.EndLocation());
+ StackTrace alloca_stack(&frame_pc, 1);
+ alloca_stack.Print();
+
+ InternalMmapVector<StackVarDescr> vars(16);
+ if (!ParseFrameDescription(frame_descr, &vars)) {
+ Printf(
+ "AddressSanitizer can't parse the stack frame "
+ "descriptor: |%s|\n",
+ frame_descr);
+ // 'addr' is a stack address, so return true even if we can't parse frame
+ return;
+ }
+ uptr n_objects = vars.size();
+ // Report the number of stack objects.
+ Printf(" This frame has %zu object(s):\n", n_objects);
+
+ // Report all objects in this frame.
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
+ uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
+ PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end,
+ next_var_beg);
+ }
+ Printf(
+ "HINT: this may be a false positive if your program uses "
+ "some custom stack unwind mechanism or swapcontext\n");
+ if (SANITIZER_WINDOWS)
+ Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
+ else
+ Printf(" (longjmp and C++ exceptions *are* supported)\n");
+
+ DescribeThread(GetThreadContextByTidLocked(tid));
+}
+
+void HeapAddressDescription::Print() const {
+ PrintHeapChunkAccess(addr, chunk_access);
+
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
+ StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
+
+ char tname[128];
+ Decorator d;
+ AsanThreadContext *free_thread = nullptr;
+ if (free_tid != kInvalidTid) {
+ free_thread = GetThreadContextByTidLocked(free_tid);
+ Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
+ free_thread->tid,
+ ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ StackTrace free_stack = GetStackTraceFromId(free_stack_id);
+ free_stack.Print();
+ Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
+ alloc_thread->tid,
+ ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ } else {
+ Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
+ alloc_thread->tid,
+ ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
+ d.EndAllocation());
+ }
+ alloc_stack.Print();
+ DescribeThread(GetCurrentThread());
+ if (free_thread) DescribeThread(free_thread);
+ DescribeThread(alloc_thread);
+}
+
+AddressDescription::AddressDescription(uptr addr, uptr access_size,
+ bool shouldLockThreadRegistry) {
+ if (GetShadowAddressInformation(addr, &data.shadow)) {
+ data.kind = kAddressKindShadow;
+ return;
+ }
+ if (GetHeapAddressInformation(addr, access_size, &data.heap)) {
+ data.kind = kAddressKindHeap;
+ return;
+ }
+
+ bool isStackMemory = false;
+ if (shouldLockThreadRegistry) {
+ ThreadRegistryLock l(&asanThreadRegistry());
+ isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
+ } else {
+ isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
+ }
+ if (isStackMemory) {
+ data.kind = kAddressKindStack;
+ return;
+ }
+
+ if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
+ data.kind = kAddressKindGlobal;
+ return;
+ }
+ data.kind = kAddressKindWild;
+ addr = 0;
+}
+
+void PrintAddressDescription(uptr addr, uptr access_size,
+ const char *bug_type) {
+ ShadowAddressDescription shadow_descr;
+ if (GetShadowAddressInformation(addr, &shadow_descr)) {
+ shadow_descr.Print();
+ return;
+ }
+
+ GlobalAddressDescription global_descr;
+ if (GetGlobalAddressInformation(addr, access_size, &global_descr)) {
+ global_descr.Print(bug_type);
+ return;
+ }
+
+ StackAddressDescription stack_descr;
+ if (GetStackAddressInformation(addr, access_size, &stack_descr)) {
+ stack_descr.Print();
+ return;
+ }
+
+ HeapAddressDescription heap_descr;
+ if (GetHeapAddressInformation(addr, access_size, &heap_descr)) {
+ heap_descr.Print();
+ return;
+ }
+
+ // We exhausted our possibilities. Bail out.
+ Printf(
+ "AddressSanitizer can not describe address in more detail "
+ "(wild memory access suspected).\n");
+}
+} // namespace __asan
diff --git a/libsanitizer/asan/asan_descriptions.h b/libsanitizer/asan/asan_descriptions.h
new file mode 100644
index 00000000000..584b9ba6491
--- /dev/null
+++ b/libsanitizer/asan/asan_descriptions.h
@@ -0,0 +1,251 @@
+//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_descriptions.cc.
+// TODO(filcab): Most struct definitions should move to the interface headers.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_DESCRIPTIONS_H
+#define ASAN_DESCRIPTIONS_H
+
+#include "asan_allocator.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+
+namespace __asan {
+
+void DescribeThread(AsanThreadContext *context);
+static inline void DescribeThread(AsanThread *t) {
+ if (t) DescribeThread(t->context());
+}
+const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
+ uptr buff_len);
+const char *ThreadNameWithParenthesis(u32 tid, char buff[], uptr buff_len);
+
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Access() { return Blue(); }
+ const char *EndAccess() { return Default(); }
+ const char *Location() { return Green(); }
+ const char *EndLocation() { return Default(); }
+ const char *Allocation() { return Magenta(); }
+ const char *EndAllocation() { return Default(); }
+
+ const char *ShadowByte(u8 byte) {
+ switch (byte) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanArrayCookieMagic:
+ return Red();
+ case kAsanHeapFreeMagic:
+ return Magenta();
+ case kAsanStackLeftRedzoneMagic:
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ return Red();
+ case kAsanStackAfterReturnMagic:
+ return Magenta();
+ case kAsanInitializationOrderMagic:
+ return Cyan();
+ case kAsanUserPoisonedMemoryMagic:
+ case kAsanContiguousContainerOOBMagic:
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ return Blue();
+ case kAsanStackUseAfterScopeMagic:
+ return Magenta();
+ case kAsanGlobalRedzoneMagic:
+ return Red();
+ case kAsanInternalHeapMagic:
+ return Yellow();
+ case kAsanIntraObjectRedzone:
+ return Yellow();
+ default:
+ return Default();
+ }
+ }
+ const char *EndShadowByte() { return Default(); }
+ const char *MemoryByte() { return Magenta(); }
+ const char *EndMemoryByte() { return Default(); }
+};
+
+enum ShadowKind : u8 {
+ kShadowKindLow,
+ kShadowKindGap,
+ kShadowKindHigh,
+};
+static const char *const ShadowNames[] = {"low shadow", "shadow gap",
+ "high shadow"};
+
+struct ShadowAddressDescription {
+ uptr addr;
+ ShadowKind kind;
+ u8 shadow_byte;
+
+ void Print() const;
+};
+
+bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
+bool DescribeAddressIfShadow(uptr addr);
+
+enum AccessType {
+ kAccessTypeLeft,
+ kAccessTypeRight,
+ kAccessTypeInside,
+ kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
+};
+
+struct ChunkAccess {
+ uptr bad_addr;
+ sptr offset;
+ uptr chunk_begin;
+ uptr chunk_size;
+ u32 access_type : 2;
+ u32 alloc_type : 2;
+};
+
+struct HeapAddressDescription {
+ uptr addr;
+ uptr alloc_tid;
+ uptr free_tid;
+ u32 alloc_stack_id;
+ u32 free_stack_id;
+ ChunkAccess chunk_access;
+
+ void Print() const;
+};
+
+bool GetHeapAddressInformation(uptr addr, uptr access_size,
+ HeapAddressDescription *descr);
+bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
+
+struct StackAddressDescription {
+ uptr addr;
+ uptr tid;
+ uptr offset;
+ uptr frame_pc;
+ uptr access_size;
+ const char *frame_descr;
+
+ void Print() const;
+};
+
+bool GetStackAddressInformation(uptr addr, uptr access_size,
+ StackAddressDescription *descr);
+
+struct GlobalAddressDescription {
+ uptr addr;
+ // Assume address is close to at most four globals.
+ static const int kMaxGlobals = 4;
+ __asan_global globals[kMaxGlobals];
+ u32 reg_sites[kMaxGlobals];
+ uptr access_size;
+ u8 size;
+
+ void Print(const char *bug_type = "") const;
+};
+
+bool GetGlobalAddressInformation(uptr addr, uptr access_size,
+ GlobalAddressDescription *descr);
+bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
+
+// General function to describe an address. Will try to describe the address as
+// a shadow, global (variable), stack, or heap address.
+// bug_type is optional and is used for checking if we're reporting an
+// initialization-order-fiasco
+// The proper access_size should be passed for stack, global, and heap
+// addresses. Defaults to 1.
+// Each of the *AddressDescription functions has its own Print() member, which
+// may take access_size and bug_type parameters if needed.
+void PrintAddressDescription(uptr addr, uptr access_size = 1,
+ const char *bug_type = "");
+
+enum AddressKind {
+ kAddressKindWild,
+ kAddressKindShadow,
+ kAddressKindHeap,
+ kAddressKindStack,
+ kAddressKindGlobal,
+};
+
+class AddressDescription {
+ struct AddressDescriptionData {
+ AddressKind kind;
+ union {
+ ShadowAddressDescription shadow;
+ HeapAddressDescription heap;
+ StackAddressDescription stack;
+ GlobalAddressDescription global;
+ uptr addr;
+ };
+ };
+
+ AddressDescriptionData data;
+
+ public:
+ AddressDescription() = default;
+ // shouldLockThreadRegistry allows us to skip locking if we're sure we already
+ // have done it.
+ AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
+ : AddressDescription(addr, 1, shouldLockThreadRegistry) {}
+ AddressDescription(uptr addr, uptr access_size,
+ bool shouldLockThreadRegistry = true);
+
+ uptr Address() const {
+ switch (data.kind) {
+ case kAddressKindWild:
+ return data.addr;
+ case kAddressKindShadow:
+ return data.shadow.addr;
+ case kAddressKindHeap:
+ return data.heap.addr;
+ case kAddressKindStack:
+ return data.stack.addr;
+ case kAddressKindGlobal:
+ return data.global.addr;
+ }
+ UNREACHABLE("AddressInformation kind is invalid");
+ }
+ void Print(const char *bug_descr = nullptr) const {
+ switch (data.kind) {
+ case kAddressKindWild:
+ Printf("Address %p is a wild pointer.\n", data.addr);
+ return;
+ case kAddressKindShadow:
+ return data.shadow.Print();
+ case kAddressKindHeap:
+ return data.heap.Print();
+ case kAddressKindStack:
+ return data.stack.Print();
+ case kAddressKindGlobal:
+ // initialization-order-fiasco has a special Print()
+ return data.global.Print(bug_descr);
+ }
+ UNREACHABLE("AddressInformation kind is invalid");
+ }
+
+ void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
+
+ const ShadowAddressDescription *AsShadow() const {
+ return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
+ }
+ const HeapAddressDescription *AsHeap() const {
+ return data.kind == kAddressKindHeap ? &data.heap : nullptr;
+ }
+ const StackAddressDescription *AsStack() const {
+ return data.kind == kAddressKindStack ? &data.stack : nullptr;
+ }
+ const GlobalAddressDescription *AsGlobal() const {
+ return data.kind == kAddressKindGlobal ? &data.global : nullptr;
+ }
+};
+
+} // namespace __asan
+
+#endif // ASAN_DESCRIPTIONS_H
diff --git a/libsanitizer/asan/asan_errors.cc b/libsanitizer/asan/asan_errors.cc
new file mode 100644
index 00000000000..73c4cca843b
--- /dev/null
+++ b/libsanitizer/asan/asan_errors.cc
@@ -0,0 +1,494 @@
+//===-- asan_errors.cc ------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan implementation for error structures.
+//===----------------------------------------------------------------------===//
+
+#include "asan_errors.h"
+#include <signal.h>
+#include "asan_descriptions.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+void ErrorStackOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: stack-overflow on address %p"
+ " (pc %p bp %p sp %p T%d)\n",
+ (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ BufferedStackTrace stack;
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
+ common_flags()->fast_unwind_on_fatal);
+ stack.Print();
+ ReportErrorSummary("stack-overflow", &stack);
+}
+
+static void MaybeDumpInstructionBytes(uptr pc) {
+ if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return;
+ InternalScopedString str(1024);
+ str.append("First 16 instruction bytes at pc: ");
+ if (IsAccessibleMemoryRange(pc, 16)) {
+ for (int i = 0; i < 16; ++i) {
+ PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " ");
+ }
+ str.append("\n");
+ } else {
+ str.append("unaccessible\n");
+ }
+ Report("%s", str.data());
+}
+
+void ErrorDeadlySignal::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ const char *description = DescribeSignalOrException(signo);
+ Report(
+ "ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
+ "T%d)\n",
+ description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
+ Printf("%s", d.EndWarning());
+ if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n");
+ if (is_memory_access) {
+ const char *access_type =
+ write_flag == SignalContext::WRITE
+ ? "WRITE"
+ : (write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
+ Report("The signal is caused by a %s memory access.\n", access_type);
+ if (addr < GetPageSizeCached())
+ Report("Hint: address points to the zero page.\n");
+ }
+ scariness.Print();
+ BufferedStackTrace stack;
+ GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
+ common_flags()->fast_unwind_on_fatal);
+ stack.Print();
+ MaybeDumpInstructionBytes(pc);
+ Printf("AddressSanitizer can not provide additional info.\n");
+ ReportErrorSummary(description, &stack);
+}
+
+void ErrorDoubleFree::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: attempting double-free on %p in "
+ "thread T%d%s:\n",
+ addr_description.addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
+ second_free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary("double-free", &stack);
+}
+
+void ErrorNewDeleteSizeMismatch::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: new-delete-type-mismatch on %p in thread "
+ "T%d%s:\n",
+ addr_description.addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
+ Printf(
+ " size of the allocated type: %zd bytes;\n"
+ " size of the deallocated type: %zd bytes.\n",
+ addr_description.chunk_access.chunk_size, delete_size);
+ CHECK_GT(free_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary("new-delete-type-mismatch", &stack);
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+}
+
+void ErrorFreeNotMalloced::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ char tname[128];
+ Report(
+ "ERROR: AddressSanitizer: attempting free on address "
+ "which was not malloc()-ed: %p in thread T%d%s\n",
+ addr_description.Address(), tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
+ Printf("%s", d.EndWarning());
+ CHECK_GT(free_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary("bad-free", &stack);
+}
+
+void ErrorAllocTypeMismatch::Print() {
+ static const char *alloc_names[] = {"INVALID", "malloc", "operator new",
+ "operator new []"};
+ static const char *dealloc_names[] = {"INVALID", "free", "operator delete",
+ "operator delete []"};
+ CHECK_NE(alloc_type, dealloc_type);
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
+ alloc_names[alloc_type], dealloc_names[dealloc_type],
+ addr_description.addr);
+ Printf("%s", d.EndWarning());
+ CHECK_GT(dealloc_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary("alloc-dealloc-mismatch", &stack);
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
+}
+
+void ErrorMallocUsableSizeNotOwned::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
+ "pointer which is not owned: %p\n",
+ addr_description.Address());
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary("bad-malloc_usable_size", stack);
+}
+
+void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: attempting to call "
+ "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
+ addr_description.Address());
+ Printf("%s", d.EndWarning());
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
+}
+
+void ErrorStringFunctionMemoryRangesOverlap::Print() {
+ Decorator d;
+ char bug_type[100];
+ internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
+ Printf("%s", d.Warning());
+ Report(
+ "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
+ "overlap\n",
+ bug_type, addr1_description.Address(),
+ addr1_description.Address() + length1, addr2_description.Address(),
+ addr2_description.Address() + length2);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ stack->Print();
+ addr1_description.Print();
+ addr2_description.Print();
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ErrorStringFunctionSizeOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ const char *bug_type = "negative-size-param";
+ Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
+ Printf("%s", d.EndWarning());
+ scariness.Print();
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ErrorBadParamsToAnnotateContiguousContainer::Print() {
+ Report(
+ "ERROR: AddressSanitizer: bad parameters to "
+ "__sanitizer_annotate_contiguous_container:\n"
+ " beg : %p\n"
+ " end : %p\n"
+ " old_mid : %p\n"
+ " new_mid : %p\n",
+ beg, end, old_mid, new_mid);
+ uptr granularity = SHADOW_GRANULARITY;
+ if (!IsAligned(beg, granularity))
+ Report("ERROR: beg is not aligned by %d\n", granularity);
+ stack->Print();
+ ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
+}
+
+void ErrorODRViolation::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: odr-violation (%p):\n", global1.beg);
+ Printf("%s", d.EndWarning());
+ InternalScopedString g1_loc(256), g2_loc(256);
+ PrintGlobalLocation(&g1_loc, global1);
+ PrintGlobalLocation(&g2_loc, global2);
+ Printf(" [1] size=%zd '%s' %s\n", global1.size,
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ Printf(" [2] size=%zd '%s' %s\n", global2.size,
+ MaybeDemangleGlobalName(global2.name), g2_loc.data());
+ if (stack_id1 && stack_id2) {
+ Printf("These globals were registered at these points:\n");
+ Printf(" [1]:\n");
+ StackDepotGet(stack_id1).Print();
+ Printf(" [2]:\n");
+ StackDepotGet(stack_id2).Print();
+ }
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_odr_violation=0\n");
+ InternalScopedString error_msg(256);
+ error_msg.append("odr-violation: global '%s' at %s",
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ ReportErrorSummary(error_msg.data());
+}
+
+void ErrorInvalidPointerPair::Print() {
+ const char *bug_type = "invalid-pointer-pair";
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n",
+ addr1_description.Address(), addr2_description.Address());
+ Printf("%s", d.EndWarning());
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+ addr1_description.Print();
+ addr2_description.Print();
+ ReportErrorSummary(bug_type, &stack);
+}
+
+static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
+ return s[-1] > 127 && s[1] > 127;
+}
+
+ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
+ bool is_write_, uptr access_size_)
+ : ErrorBase(tid),
+ addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false),
+ pc(pc_),
+ bp(bp_),
+ sp(sp_),
+ access_size(access_size_),
+ is_write(is_write_),
+ shadow_val(0) {
+ scariness.Clear();
+ if (access_size) {
+ if (access_size <= 9) {
+ char desr[] = "?-byte";
+ desr[0] = '0' + access_size;
+ scariness.Scare(access_size + access_size / 2, desr);
+ } else if (access_size >= 10) {
+ scariness.Scare(15, "multi-byte");
+ }
+ is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read");
+
+ // Determine the error type.
+ bug_descr = "unknown-crash";
+ if (AddrIsInMem(addr)) {
+ u8 *shadow_addr = (u8 *)MemToShadow(addr);
+ // If we are accessing 16 bytes, look at the second shadow byte.
+ if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
+ // If we are in the partial right redzone, look at the next shadow byte.
+ if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
+ bool far_from_bounds = false;
+ shadow_val = *shadow_addr;
+ int bug_type_score = 0;
+ // For use-after-frees reads are almost as bad as writes.
+ int read_after_free_bonus = 0;
+ switch (shadow_val) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanArrayCookieMagic:
+ bug_descr = "heap-buffer-overflow";
+ bug_type_score = 10;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanHeapFreeMagic:
+ bug_descr = "heap-use-after-free";
+ bug_type_score = 20;
+ if (!is_write) read_after_free_bonus = 18;
+ break;
+ case kAsanStackLeftRedzoneMagic:
+ bug_descr = "stack-buffer-underflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanInitializationOrderMagic:
+ bug_descr = "initialization-order-fiasco";
+ bug_type_score = 1;
+ break;
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ bug_descr = "stack-buffer-overflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanStackAfterReturnMagic:
+ bug_descr = "stack-use-after-return";
+ bug_type_score = 30;
+ if (!is_write) read_after_free_bonus = 18;
+ break;
+ case kAsanUserPoisonedMemoryMagic:
+ bug_descr = "use-after-poison";
+ bug_type_score = 20;
+ break;
+ case kAsanContiguousContainerOOBMagic:
+ bug_descr = "container-overflow";
+ bug_type_score = 10;
+ break;
+ case kAsanStackUseAfterScopeMagic:
+ bug_descr = "stack-use-after-scope";
+ bug_type_score = 10;
+ break;
+ case kAsanGlobalRedzoneMagic:
+ bug_descr = "global-buffer-overflow";
+ bug_type_score = 10;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanIntraObjectRedzone:
+ bug_descr = "intra-object-overflow";
+ bug_type_score = 10;
+ break;
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ bug_descr = "dynamic-stack-buffer-overflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ }
+ scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr);
+ if (far_from_bounds) scariness.Scare(10, "far-from-bounds");
+ }
+ }
+}
+
+static void PrintContainerOverflowHint() {
+ Printf("HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_container_overflow=0.\n"
+ "If you suspect a false positive see also: "
+ "https://github.com/google/sanitizers/wiki/"
+ "AddressSanitizerContainerOverflow.\n");
+}
+
+static void PrintShadowByte(InternalScopedString *str, const char *before,
+ u8 byte, const char *after = "\n") {
+ PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
+}
+
+static void PrintLegend(InternalScopedString *str) {
+ str->append(
+ "Shadow byte legend (one shadow byte represents %d "
+ "application bytes):\n",
+ (int)SHADOW_GRANULARITY);
+ PrintShadowByte(str, " Addressable: ", 0);
+ str->append(" Partially addressable: ");
+ for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
+ str->append("\n");
+ PrintShadowByte(str, " Heap left redzone: ",
+ kAsanHeapLeftRedzoneMagic);
+ PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
+ PrintShadowByte(str, " Stack left redzone: ",
+ kAsanStackLeftRedzoneMagic);
+ PrintShadowByte(str, " Stack mid redzone: ",
+ kAsanStackMidRedzoneMagic);
+ PrintShadowByte(str, " Stack right redzone: ",
+ kAsanStackRightRedzoneMagic);
+ PrintShadowByte(str, " Stack after return: ",
+ kAsanStackAfterReturnMagic);
+ PrintShadowByte(str, " Stack use after scope: ",
+ kAsanStackUseAfterScopeMagic);
+ PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
+ PrintShadowByte(str, " Global init order: ",
+ kAsanInitializationOrderMagic);
+ PrintShadowByte(str, " Poisoned by user: ",
+ kAsanUserPoisonedMemoryMagic);
+ PrintShadowByte(str, " Container overflow: ",
+ kAsanContiguousContainerOOBMagic);
+ PrintShadowByte(str, " Array cookie: ",
+ kAsanArrayCookieMagic);
+ PrintShadowByte(str, " Intra object redzone: ",
+ kAsanIntraObjectRedzone);
+ PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
+ PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
+}
+
+static void PrintShadowBytes(InternalScopedString *str, const char *before,
+ u8 *bytes, u8 *guilty, uptr n) {
+ Decorator d;
+ if (before) str->append("%s%p:", before, bytes);
+ for (uptr i = 0; i < n; i++) {
+ u8 *p = bytes + i;
+ const char *before =
+ p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
+ const char *after = p == guilty ? "]" : "";
+ PrintShadowByte(str, before, *p, after);
+ }
+ str->append("\n");
+}
+
+static void PrintShadowMemoryForAddress(uptr addr) {
+ if (!AddrIsInMem(addr)) return;
+ uptr shadow_addr = MemToShadow(addr);
+ const uptr n_bytes_per_row = 16;
+ uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
+ InternalScopedString str(4096 * 8);
+ str.append("Shadow bytes around the buggy address:\n");
+ for (int i = -5; i <= 5; i++) {
+ const char *prefix = (i == 0) ? "=>" : " ";
+ PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
+ (u8 *)shadow_addr, n_bytes_per_row);
+ }
+ if (flags()->print_legend) PrintLegend(&str);
+ Printf("%s", str.data());
+}
+
+void ErrorGeneric::Print() {
+ Decorator d;
+ Printf("%s", d.Warning());
+ uptr addr = addr_description.Address();
+ Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
+ bug_descr, (void *)addr, pc, bp, sp);
+ Printf("%s", d.EndWarning());
+
+ char tname[128];
+ Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
+ access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
+ (void *)addr, tid,
+ ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess());
+
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+
+ // Pass bug_descr because we have a special case for
+ // initialization-order-fiasco
+ addr_description.Print(bug_descr);
+ if (shadow_val == kAsanContiguousContainerOOBMagic)
+ PrintContainerOverflowHint();
+ ReportErrorSummary(bug_descr, &stack);
+ PrintShadowMemoryForAddress(addr);
+}
+
+} // namespace __asan
diff --git a/libsanitizer/asan/asan_errors.h b/libsanitizer/asan/asan_errors.h
new file mode 100644
index 00000000000..6262dcf3506
--- /dev/null
+++ b/libsanitizer/asan/asan_errors.h
@@ -0,0 +1,376 @@
+//===-- asan_errors.h -------------------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for error structures.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ERRORS_H
+#define ASAN_ERRORS_H
+
+#include "asan_descriptions.h"
+#include "asan_scariness_score.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __asan {
+
+struct ErrorBase {
+ ErrorBase() = default;
+ explicit ErrorBase(u32 tid_) : tid(tid_) {}
+ ScarinessScoreBase scariness;
+ u32 tid;
+};
+
+struct ErrorStackOverflow : ErrorBase {
+ uptr addr, pc, bp, sp;
+ // ErrorStackOverflow never owns the context.
+ void *context;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStackOverflow() = default;
+ ErrorStackOverflow(u32 tid, const SignalContext &sig)
+ : ErrorBase(tid),
+ addr(sig.addr),
+ pc(sig.pc),
+ bp(sig.bp),
+ sp(sig.sp),
+ context(sig.context) {
+ scariness.Clear();
+ scariness.Scare(10, "stack-overflow");
+ }
+ void Print();
+};
+
+struct ErrorDeadlySignal : ErrorBase {
+ uptr addr, pc, bp, sp;
+ // ErrorDeadlySignal never owns the context.
+ void *context;
+ int signo;
+ SignalContext::WriteFlag write_flag;
+ bool is_memory_access;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorDeadlySignal() = default;
+ ErrorDeadlySignal(u32 tid, const SignalContext &sig, int signo_)
+ : ErrorBase(tid),
+ addr(sig.addr),
+ pc(sig.pc),
+ bp(sig.bp),
+ sp(sig.sp),
+ context(sig.context),
+ signo(signo_),
+ write_flag(sig.write_flag),
+ is_memory_access(sig.is_memory_access) {
+ scariness.Clear();
+ if (is_memory_access) {
+ if (addr < GetPageSizeCached()) {
+ scariness.Scare(10, "null-deref");
+ } else if (addr == pc) {
+ scariness.Scare(60, "wild-jump");
+ } else if (write_flag == SignalContext::WRITE) {
+ scariness.Scare(30, "wild-addr-write");
+ } else if (write_flag == SignalContext::READ) {
+ scariness.Scare(20, "wild-addr-read");
+ } else {
+ scariness.Scare(25, "wild-addr");
+ }
+ } else {
+ scariness.Scare(10, "signal");
+ }
+ }
+ void Print();
+};
+
+struct ErrorDoubleFree : ErrorBase {
+ // ErrorDoubleFree doesn't own the stack trace.
+ const BufferedStackTrace *second_free_stack;
+ HeapAddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorDoubleFree() = default;
+ ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
+ : ErrorBase(tid), second_free_stack(stack) {
+ CHECK_GT(second_free_stack->size, 0);
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(42, "double-free");
+ }
+ void Print();
+};
+
+struct ErrorNewDeleteSizeMismatch : ErrorBase {
+ // ErrorNewDeleteSizeMismatch doesn't own the stack trace.
+ const BufferedStackTrace *free_stack;
+ HeapAddressDescription addr_description;
+ uptr delete_size;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorNewDeleteSizeMismatch() = default;
+ ErrorNewDeleteSizeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
+ uptr delete_size_)
+ : ErrorBase(tid), free_stack(stack), delete_size(delete_size_) {
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(10, "new-delete-type-mismatch");
+ }
+ void Print();
+};
+
+struct ErrorFreeNotMalloced : ErrorBase {
+ // ErrorFreeNotMalloced doesn't own the stack trace.
+ const BufferedStackTrace *free_stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorFreeNotMalloced() = default;
+ ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
+ : ErrorBase(tid),
+ free_stack(stack),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ scariness.Scare(40, "bad-free");
+ }
+ void Print();
+};
+
+struct ErrorAllocTypeMismatch : ErrorBase {
+ // ErrorAllocTypeMismatch doesn't own the stack trace.
+ const BufferedStackTrace *dealloc_stack;
+ HeapAddressDescription addr_description;
+ AllocType alloc_type, dealloc_type;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorAllocTypeMismatch() = default;
+ ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
+ AllocType alloc_type_, AllocType dealloc_type_)
+ : ErrorBase(tid),
+ dealloc_stack(stack),
+ alloc_type(alloc_type_),
+ dealloc_type(dealloc_type_) {
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ scariness.Clear();
+ scariness.Scare(10, "alloc-dealloc-mismatch");
+ };
+ void Print();
+};
+
+struct ErrorMallocUsableSizeNotOwned : ErrorBase {
+ // ErrorMallocUsableSizeNotOwned doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorMallocUsableSizeNotOwned() = default;
+ ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ }
+ void Print();
+};
+
+struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
+ // ErrorSanitizerGetAllocatedSizeNotOwned doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorSanitizerGetAllocatedSizeNotOwned() = default;
+ ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
+ uptr addr)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {
+ scariness.Clear();
+ }
+ void Print();
+};
+
+struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
+ // ErrorStringFunctionMemoryRangesOverlap doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ uptr length1, length2;
+ AddressDescription addr1_description;
+ AddressDescription addr2_description;
+ const char *function;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStringFunctionMemoryRangesOverlap() = default;
+ ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
+ uptr addr1, uptr length1_, uptr addr2,
+ uptr length2_, const char *function_)
+ : ErrorBase(tid),
+ stack(stack_),
+ length1(length1_),
+ length2(length2_),
+ addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false),
+ addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false),
+ function(function_) {
+ char bug_type[100];
+ internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
+ scariness.Clear();
+ scariness.Scare(10, bug_type);
+ }
+ void Print();
+};
+
+struct ErrorStringFunctionSizeOverflow : ErrorBase {
+ // ErrorStringFunctionSizeOverflow doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ uptr size;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorStringFunctionSizeOverflow() = default;
+ ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
+ uptr addr, uptr size_)
+ : ErrorBase(tid),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false),
+ size(size_) {
+ scariness.Clear();
+ scariness.Scare(10, "negative-size-param");
+ }
+ void Print();
+};
+
+struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
+ // ErrorBadParamsToAnnotateContiguousContainer doesn't own the stack trace.
+ const BufferedStackTrace *stack;
+ uptr beg, end, old_mid, new_mid;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorBadParamsToAnnotateContiguousContainer() = default;
+ // PS4: Do we want an AddressDescription for beg?
+ ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
+ BufferedStackTrace *stack_,
+ uptr beg_, uptr end_,
+ uptr old_mid_, uptr new_mid_)
+ : ErrorBase(tid),
+ stack(stack_),
+ beg(beg_),
+ end(end_),
+ old_mid(old_mid_),
+ new_mid(new_mid_) {}
+ void Print();
+};
+
+struct ErrorODRViolation : ErrorBase {
+ __asan_global global1, global2;
+ u32 stack_id1, stack_id2;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorODRViolation() = default;
+ ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
+ const __asan_global *g2, u32 stack_id2_)
+ : ErrorBase(tid),
+ global1(*g1),
+ global2(*g2),
+ stack_id1(stack_id1_),
+ stack_id2(stack_id2_) {}
+ void Print();
+};
+
+struct ErrorInvalidPointerPair : ErrorBase {
+ uptr pc, bp, sp;
+ AddressDescription addr1_description;
+ AddressDescription addr2_description;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorInvalidPointerPair() = default;
+ ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
+ uptr p2)
+ : ErrorBase(tid),
+ pc(pc_),
+ bp(bp_),
+ sp(sp_),
+ addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
+ addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {}
+ void Print();
+};
+
+struct ErrorGeneric : ErrorBase {
+ AddressDescription addr_description;
+ uptr pc, bp, sp;
+ uptr access_size;
+ const char *bug_descr;
+ bool is_write;
+ u8 shadow_val;
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ ErrorGeneric() = default;
+ ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
+ uptr access_size_);
+ void Print();
+};
+
+// clang-format off
+#define ASAN_FOR_EACH_ERROR_KIND(macro) \
+ macro(StackOverflow) \
+ macro(DeadlySignal) \
+ macro(DoubleFree) \
+ macro(NewDeleteSizeMismatch) \
+ macro(FreeNotMalloced) \
+ macro(AllocTypeMismatch) \
+ macro(MallocUsableSizeNotOwned) \
+ macro(SanitizerGetAllocatedSizeNotOwned) \
+ macro(StringFunctionMemoryRangesOverlap) \
+ macro(StringFunctionSizeOverflow) \
+ macro(BadParamsToAnnotateContiguousContainer) \
+ macro(ODRViolation) \
+ macro(InvalidPointerPair) \
+ macro(Generic)
+// clang-format on
+
+#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name,
+#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name;
+#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \
+ ErrorDescription(Error##name const &e) : kind(kErrorKind##name), name(e) {}
+#define ASAN_ERROR_DESCRIPTION_PRINT(name) \
+ case kErrorKind##name: \
+ return name.Print();
+
+enum ErrorKind {
+ kErrorKindInvalid = 0,
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND)
+};
+
+struct ErrorDescription {
+ ErrorKind kind;
+ // We're using a tagged union because it allows us to have a trivially
+ // copiable type and use the same structures as the public interface.
+ //
+ // We can add a wrapper around it to make it "more c++-like", but that would
+ // add a lot of code and the benefit wouldn't be that big.
+ union {
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
+ };
+
+ ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
+
+ bool IsValid() { return kind != kErrorKindInvalid; }
+ void Print() {
+ switch (kind) {
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT)
+ case kErrorKindInvalid:
+ CHECK(0);
+ }
+ CHECK(0);
+ }
+};
+
+#undef ASAN_FOR_EACH_ERROR_KIND
+#undef ASAN_DEFINE_ERROR_KIND
+#undef ASAN_ERROR_DESCRIPTION_MEMBER
+#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR
+#undef ASAN_ERROR_DESCRIPTION_PRINT
+
+} // namespace __asan
+
+#endif // ASAN_ERRORS_H
diff --git a/libsanitizer/asan/asan_fake_stack.cc b/libsanitizer/asan/asan_fake_stack.cc
index de190d11a16..bf7566a334e 100644
--- a/libsanitizer/asan/asan_fake_stack.cc
+++ b/libsanitizer/asan/asan_fake_stack.cc
@@ -29,7 +29,7 @@ ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (class_id <= 6) {
- for (uptr i = 0; i < (1U << class_id); i++) {
+ for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
shadow[i] = magic;
// Make sure this does not become memset.
SanitizerBreakOptimization(nullptr);
@@ -98,7 +98,7 @@ FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
// if the signal arrives between checking and setting flags[pos], the
// signal handler's fake stack will start from a different hint_position
// and so will not touch this particular byte. So, it is safe to do this
- // with regular non-atimic load and store (at least I was not able to make
+ // with regular non-atomic load and store (at least I was not able to make
// this code crash).
if (flags[pos]) continue;
flags[pos] = 1;
@@ -119,7 +119,7 @@ uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
uptr class_id = (ptr - beg) >> stack_size_log;
uptr base = beg + (class_id << stack_size_log);
CHECK_LE(base, ptr);
- CHECK_LT(ptr, base + (1UL << stack_size_log));
+ CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
uptr res = base + pos * BytesInSizeClass(class_id);
*frame_end = res + BytesInSizeClass(class_id);
diff --git a/libsanitizer/asan/asan_fake_stack.h b/libsanitizer/asan/asan_fake_stack.h
index 550a86e2972..6ac61ddd24e 100644
--- a/libsanitizer/asan/asan_fake_stack.h
+++ b/libsanitizer/asan/asan_fake_stack.h
@@ -50,7 +50,7 @@ struct FakeFrame {
// Allocate() flips the appropriate allocation flag atomically, thus achieving
// async-signal safety.
// This allocator does not have quarantine per se, but it tries to allocate the
-// frames in round robin fasion to maximize the delay between a deallocation
+// frames in round robin fashion to maximize the delay between a deallocation
// and the next allocation.
class FakeStack {
static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
@@ -67,12 +67,12 @@ class FakeStack {
// stack_size_log is at least 15 (stack_size >= 32K).
static uptr SizeRequiredForFlags(uptr stack_size_log) {
- return 1UL << (stack_size_log + 1 - kMinStackFrameSizeLog);
+ return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog);
}
// Each size class occupies stack_size bytes.
static uptr SizeRequiredForFrames(uptr stack_size_log) {
- return (1ULL << stack_size_log) * kNumberOfSizeClasses;
+ return (((uptr)1) << stack_size_log) * kNumberOfSizeClasses;
}
// Number of bytes requires for the whole object.
@@ -89,20 +89,20 @@ class FakeStack {
// and so on.
static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
uptr t = kNumberOfSizeClasses - 1 - class_id;
- const uptr all_ones = (1 << (kNumberOfSizeClasses - 1)) - 1;
+ const uptr all_ones = (((uptr)1) << (kNumberOfSizeClasses - 1)) - 1;
return ((all_ones >> t) << t) << (stack_size_log - 15);
}
static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
- return 1UL << (stack_size_log - kMinStackFrameSizeLog - class_id);
+ return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
}
- // Divide n by the numbe of frames in size class.
+ // Divide n by the number of frames in size class.
static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
return n & (NumberOfFrames(stack_size_log, class_id) - 1);
}
- // The the pointer to the flags of the given class_id.
+ // The pointer to the flags of the given class_id.
u8 *GetFlags(uptr stack_size_log, uptr class_id) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
FlagsOffset(stack_size_log, class_id);
@@ -112,7 +112,8 @@ class FakeStack {
u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
return reinterpret_cast<u8 *>(this) + kFlagsOffset +
SizeRequiredForFlags(stack_size_log) +
- (1 << stack_size_log) * class_id + BytesInSizeClass(class_id) * pos;
+ (((uptr)1) << stack_size_log) * class_id +
+ BytesInSizeClass(class_id) * pos;
}
// Allocate the fake frame.
@@ -135,7 +136,7 @@ class FakeStack {
// Number of bytes in a fake frame of this size class.
static uptr BytesInSizeClass(uptr class_id) {
- return 1UL << (class_id + kMinStackFrameSizeLog);
+ return ((uptr)1) << (class_id + kMinStackFrameSizeLog);
}
// The fake frame is guaranteed to have a right redzone.
@@ -157,7 +158,7 @@ class FakeStack {
static const uptr kFlagsOffset = 4096; // This is were the flags begin.
// Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
COMPILER_CHECK(kNumberOfSizeClasses == 11);
- static const uptr kMaxStackMallocSize = 1 << kMaxStackFrameSizeLog;
+ static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog;
uptr hint_position_[kNumberOfSizeClasses];
uptr stack_size_log_;
diff --git a/libsanitizer/asan/asan_flags.cc b/libsanitizer/asan/asan_flags.cc
index 9a68ad4eaa8..c18174beed0 100644
--- a/libsanitizer/asan/asan_flags.cc
+++ b/libsanitizer/asan/asan_flags.cc
@@ -114,15 +114,7 @@ void InitializeFlags() {
ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS"));
#endif
- // Let activation flags override current settings. On Android they come
- // from a system property. On other platforms this is no-op.
- if (!flags()->start_deactivated) {
- char buf[100];
- GetExtraActivationFlags(buf, sizeof(buf));
- asan_parser.ParseString(buf);
- }
-
- SetVerbosity(common_flags()->verbosity);
+ InitializeCommonFlags();
// TODO(eugenis): dump all flags at verbosity>=2?
if (Verbosity()) ReportUnrecognizedFlags();
@@ -165,6 +157,14 @@ void InitializeFlags() {
(ASAN_LOW_MEMORY) ? 1UL << 6 : 1UL << 8;
f->quarantine_size_mb = kDefaultQuarantineSizeMb;
}
+ if (!f->replace_str && common_flags()->intercept_strlen) {
+ Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
+ "Use intercept_strlen=0 to disable it.");
+ }
+ if (!f->replace_str && common_flags()->intercept_strchr) {
+ Report("WARNING: strchr* interceptors are enabled even though "
+ "replace_str=0. Use intercept_strchr=0 to disable them.");
+ }
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_flags.inc b/libsanitizer/asan/asan_flags.inc
index 563b464bff1..34493eededf 100644
--- a/libsanitizer/asan/asan_flags.inc
+++ b/libsanitizer/asan/asan_flags.inc
@@ -41,10 +41,7 @@ ASAN_FLAG(
"If set, uses custom wrappers and replacements for libc string functions "
"to find more errors.")
ASAN_FLAG(bool, replace_intrin, true,
- "If set, uses custom wrappers for memset/memcpy/memmove intinsics.")
-ASAN_FLAG(bool, mac_ignore_invalid_free, false,
- "Ignore invalid free() calls to work around some bugs. Used on OS X "
- "only.")
+ "If set, uses custom wrappers for memset/memcpy/memmove intrinsics.")
ASAN_FLAG(bool, detect_stack_use_after_return, false,
"Enables stack-use-after-return checking at run-time.")
ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
@@ -78,6 +75,8 @@ ASAN_FLAG(bool, print_stats, false,
"Print various statistics after printing an error message or if "
"atexit=1.")
ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
+ASAN_FLAG(bool, print_scariness, false,
+ "Print the scariness score. Experimental.")
ASAN_FLAG(bool, atexit, false,
"If set, prints ASan exit stats even after program terminates "
"successfully.")
@@ -97,15 +96,15 @@ ASAN_FLAG(bool, poison_array_cookie, true,
"Poison (or not) the array cookie after operator new[].")
// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
-// https://code.google.com/p/address-sanitizer/issues/detail?id=131
-// https://code.google.com/p/address-sanitizer/issues/detail?id=309
+// https://github.com/google/sanitizers/issues/131
+// https://github.com/google/sanitizers/issues/309
// TODO(glider,timurrrr): Fix known issues and enable this back.
ASAN_FLAG(bool, alloc_dealloc_mismatch,
- (SANITIZER_MAC == 0) && (SANITIZER_WINDOWS == 0),
+ !SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
"Report errors on malloc/delete, new/free, new/delete[], etc.")
ASAN_FLAG(bool, new_delete_type_mismatch, true,
- "Report errors on mismatch betwen size of new and delete.")
+ "Report errors on mismatch between size of new and delete.")
ASAN_FLAG(
bool, strict_init_order, false,
"If true, assume that dynamic initializers can never access globals from "
@@ -124,8 +123,8 @@ ASAN_FLAG(
"The bigger the value the harder we try.")
ASAN_FLAG(
bool, detect_container_overflow, true,
- "If true, honor the container overflow annotations. "
- "See https://code.google.com/p/address-sanitizer/wiki/ContainerOverflow")
+ "If true, honor the container overflow annotations. See "
+ "https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow")
ASAN_FLAG(int, detect_odr_violation, 2,
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
@@ -136,3 +135,5 @@ ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "
"(WARNING: USE AT YOUR OWN RISK!)")
+ASAN_FLAG(bool, use_odr_indicator, false,
+ "Use special ODR indicator symbol for ODR violation detection")
diff --git a/libsanitizer/asan/asan_globals.cc b/libsanitizer/asan/asan_globals.cc
index f3531cbd133..007fce72be0 100644
--- a/libsanitizer/asan/asan_globals.cc
+++ b/libsanitizer/asan/asan_globals.cc
@@ -23,6 +23,7 @@
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
namespace __asan {
@@ -121,16 +122,68 @@ int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
return res;
}
-bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr) {
- Global g = {};
- if (GetGlobalsForAddress(addr, &g, nullptr, 1)) {
- internal_strncpy(descr->name, g.name, descr->name_size);
- descr->region_address = g.beg;
- descr->region_size = g.size;
- descr->region_kind = "global";
- return true;
+enum GlobalSymbolState {
+ UNREGISTERED = 0,
+ REGISTERED = 1
+};
+
+// Check ODR violation for given global G via special ODR indicator. We use
+// this method in case compiler instruments global variables through their
+// local aliases.
+static void CheckODRViolationViaIndicator(const Global *g) {
+ u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
+ if (*odr_indicator == UNREGISTERED) {
+ *odr_indicator = REGISTERED;
+ return;
+ }
+ // If *odr_indicator is DEFINED, some module have already registered
+ // externally visible symbol with the same name. This is an ODR violation.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->odr_indicator == l->g->odr_indicator &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
+}
+
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g) {
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->beg == l->g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
}
- return false;
+}
+
+// Clang provides two different ways for global variables protection:
+// it can poison the global itself or its private alias. In former
+// case we may poison same symbol multiple times, that can help us to
+// cheaply detect ODR violation: if we try to poison an already poisoned
+// global, we have ODR violation error.
+// In latter case, we poison each symbol exactly once, so we use special
+// indicator symbol to perform similar check.
+// In either case, compiler provides a special odr_indicator field to Global
+// structure, that can contain two kinds of values:
+// 1) Non-zero value. In this case, odr_indicator is an address of
+// corresponding indicator variable for given global.
+// 2) Zero. This means that we don't use private aliases for global variables
+// and can freely check ODR violation with the first method.
+//
+// This routine chooses between two different methods of ODR violation
+// detection.
+static inline bool UseODRIndicator(const Global *g) {
+ // Use ODR indicator method iff use_odr_indicator flag is set and
+ // indicator symbol address is not 0.
+ return flags()->use_odr_indicator && g->odr_indicator > 0;
}
// Register a global variable.
@@ -142,24 +195,24 @@ static void RegisterGlobal(const Global *g) {
ReportGlobal(*g, "Added");
CHECK(flags()->report_globals);
CHECK(AddrIsInMem(g->beg));
- CHECK(AddrIsAlignedByGranularity(g->beg));
+ if (!AddrIsAlignedByGranularity(g->beg)) {
+ Report("The following global variable is not properly aligned.\n");
+ Report("This may happen if another global with the same name\n");
+ Report("resides in another non-instrumented module.\n");
+ Report("Or the global comes from a C file built w/o -fno-common.\n");
+ Report("In either case this is likely an ODR violation bug,\n");
+ Report("but AddressSanitizer can not provide more details.\n");
+ ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g));
+ CHECK(AddrIsAlignedByGranularity(g->beg));
+ }
CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
- // This "ODR violation" detection is fundamentally incompatible with
- // how GCC registers globals. Disable as useless until rewritten upstream.
- if (0 && flags()->detect_odr_violation) {
+ if (flags()->detect_odr_violation) {
// Try detecting ODR (One Definition Rule) violation, i.e. the situation
// where two globals with the same name are defined in different modules.
- if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
- // This check may not be enough: if the first global is much larger
- // the entire redzone of the second global may be within the first global.
- for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
- if (g->beg == l->g->beg &&
- (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
- !IsODRViolationSuppressed(g->name))
- ReportODRViolation(g, FindRegistrationSite(g),
- l->g, FindRegistrationSite(l->g));
- }
- }
+ if (UseODRIndicator(g))
+ CheckODRViolationViaIndicator(g);
+ else
+ CheckODRViolationViaPoisoning(g);
}
if (CanPoisonMemory())
PoisonRedZones(*g);
@@ -190,6 +243,12 @@ static void UnregisterGlobal(const Global *g) {
// We unpoison the shadow memory for the global but we do not remove it from
// the list because that would require O(n^2) time with the current list
// implementation. It might not be worth doing anyway.
+
+ // Release ODR indicator.
+ if (UseODRIndicator(g)) {
+ u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
+ *odr_indicator = UNREGISTERED;
+ }
}
void StopInitOrderChecking() {
@@ -207,11 +266,70 @@ void StopInitOrderChecking() {
}
}
+static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
+
+const char *MaybeDemangleGlobalName(const char *name) {
+ // We can spoil names of globals with C linkage, so use an heuristic
+ // approach to check if the name should be demangled.
+ bool should_demangle = false;
+ if (name[0] == '_' && name[1] == 'Z')
+ should_demangle = true;
+ else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
+ should_demangle = true;
+
+ return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
+}
+
+// Check if the global is a zero-terminated ASCII string. If so, print it.
+void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
+ for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
+ unsigned char c = *(unsigned char *)p;
+ if (c == '\0' || !IsASCII(c)) return;
+ }
+ if (*(char *)(g.beg + g.size - 1) != '\0') return;
+ str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
+}
+
+static const char *GlobalFilename(const __asan_global &g) {
+ const char *res = g.module_name;
+ // Prefer the filename from source location, if is available.
+ if (g.location) res = g.location->filename;
+ CHECK(res);
+ return res;
+}
+
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
+ str->append("%s", GlobalFilename(g));
+ if (!g.location) return;
+ if (g.location->line_no) str->append(":%d", g.location->line_no);
+ if (g.location->column_no) str->append(":%d", g.location->column_no);
+}
+
} // namespace __asan
// ---------------------- Interface ---------------- {{{1
using namespace __asan; // NOLINT
+
+// Apply __asan_register_globals to all globals found in the same loaded
+// executable or shared library as `flag'. The flag tracks whether globals have
+// already been registered or not for this image.
+void __asan_register_image_globals(uptr *flag) {
+ if (*flag)
+ return;
+ AsanApplyToGlobals(__asan_register_globals, flag);
+ *flag = 1;
+}
+
+// This mirrors __asan_register_image_globals.
+void __asan_unregister_image_globals(uptr *flag) {
+ if (!*flag)
+ return;
+ AsanApplyToGlobals(__asan_unregister_globals, flag);
+ *flag = 0;
+}
+
// Register an array of globals.
void __asan_register_globals(__asan_global *globals, uptr n) {
if (!flags()->report_globals) return;
diff --git a/libsanitizer/asan/asan_init_version.h b/libsanitizer/asan/asan_init_version.h
index 2cda18849dd..51e8324fc13 100644
--- a/libsanitizer/asan/asan_init_version.h
+++ b/libsanitizer/asan/asan_init_version.h
@@ -17,16 +17,20 @@ extern "C" {
// Every time the ASan ABI changes we also change the version number in the
// __asan_init function name. Objects built with incompatible ASan ABI
// versions will not link with run-time.
+ //
// Changes between ABI versions:
// v1=>v2: added 'module_name' to __asan_global
// v2=>v3: stack frame description (created by the compiler)
- // contains the function PC as the 3-rd field (see
- // DescribeAddressIfStack).
- // v3=>v4: added '__asan_global_source_location' to __asan_global.
+ // contains the function PC as the 3rd field (see
+ // DescribeAddressIfStack)
+ // v3=>v4: added '__asan_global_source_location' to __asan_global
// v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
- // __asan_stack_free_ functions.
+ // __asan_stack_free_ functions
// v5=>v6: changed the name of the version check symbol
- #define __asan_version_mismatch_check __asan_version_mismatch_check_v6
+ // v6=>v7: added 'odr_indicator' to __asan_global
+ // v7=>v8: added '__asan_(un)register_image_globals' functions for dead
+ // stripping support on Mach-O platforms
+ #define __asan_version_mismatch_check __asan_version_mismatch_check_v8
}
#endif // ASAN_INIT_VERSION_H
diff --git a/libsanitizer/asan/asan_interceptors.cc b/libsanitizer/asan/asan_interceptors.cc
index 356f2c02897..743abe51481 100644
--- a/libsanitizer/asan/asan_interceptors.cc
+++ b/libsanitizer/asan/asan_interceptors.cc
@@ -19,6 +19,7 @@
#include "asan_stack.h"
#include "asan_stats.h"
#include "asan_suppressions.h"
+#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#if SANITIZER_POSIX
@@ -108,7 +109,7 @@ static inline bool RangesOverlap(const char *offset1, uptr length1,
} while (0)
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
-#if ASAN_INTERCEPT_STRNLEN
+#if SANITIZER_INTERCEPT_STRNLEN
if (REAL(strnlen)) {
return REAL(strnlen)(s, maxlen);
}
@@ -141,6 +142,8 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
(void) ctx; \
#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ ASAN_INTERCEPT_FUNC_VER(name, ver)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
ASAN_WRITE_RANGE(ctx, ptr, size)
#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
@@ -176,7 +179,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} while (false)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
// Strict init-order checking is dlopen-hostile:
-// https://code.google.com/p/address-sanitizer/issues/detail?id=178
+// https://github.com/google/sanitizers/issues/178
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
if (flags()->strict_init_order) { \
StopInitOrderChecking(); \
@@ -193,6 +196,10 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} else { \
*begin = *end = 0; \
}
+// Asan needs custom handling of these:
+#undef SANITIZER_INTERCEPT_MEMSET
+#undef SANITIZER_INTERCEPT_MEMMOVE
+#undef SANITIZER_INTERCEPT_MEMCPY
#include "sanitizer_common/sanitizer_common_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
@@ -216,6 +223,7 @@ struct ThreadStartParam {
atomic_uintptr_t is_registered;
};
+#if ASAN_INTERCEPT_PTHREAD_CREATE
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
AsanThread *t = nullptr;
@@ -226,7 +234,6 @@ static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
return t->ThreadStart(GetTid(), &param->is_registered);
}
-#if ASAN_INTERCEPT_PTHREAD_CREATE
INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) {
EnsureMainThreadIDIsCorrect();
@@ -240,7 +247,17 @@ INTERCEPTOR(int, pthread_create, void *thread,
ThreadStartParam param;
atomic_store(&param.t, 0, memory_order_relaxed);
atomic_store(&param.is_registered, 0, memory_order_relaxed);
- int result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
+ int result;
+ {
+ // Ignore all allocations made by pthread_create: thread stack/TLS may be
+ // stored by pthread for future reuse even after thread destruction, and
+ // the linked list it's stored in doesn't even hold valid pointers to the
+ // objects, the latter are calculated by obscure pointer arithmetic.
+#if CAN_SANITIZE_LEAKS
+ __lsan::ScopedInterceptorDisabler disabler;
+#endif
+ result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
+ }
if (result == 0) {
u32 current_tid = GetCurrentTidOrInvalid();
AsanThread *t =
@@ -269,7 +286,8 @@ DEFINE_REAL_PTHREAD_FUNCTIONS
#if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
- if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
+ if (!IsHandledDeadlySignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
return REAL(bsd_signal)(signum, handler);
}
return 0;
@@ -277,7 +295,8 @@ INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
#endif
INTERCEPTOR(void*, signal, int signum, void *handler) {
- if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
+ if (!IsHandledDeadlySignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
return REAL(signal)(signum, handler);
}
return nullptr;
@@ -285,7 +304,8 @@ INTERCEPTOR(void*, signal, int signum, void *handler) {
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
- if (!IsDeadlySignal(signum) || common_flags()->allow_user_segv_handler) {
+ if (!IsHandledDeadlySignal(signum) ||
+ common_flags()->allow_user_segv_handler) {
return REAL(sigaction)(signum, act, oldact);
}
return 0;
@@ -451,25 +471,6 @@ INTERCEPTOR(void*, memset, void *block, int c, uptr size) {
ASAN_MEMSET_IMPL(ctx, block, c, size);
}
-INTERCEPTOR(char*, strchr, const char *str, int c) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, strchr);
- if (UNLIKELY(!asan_inited)) return internal_strchr(str, c);
- // strchr is called inside create_purgeable_zone() when MallocGuardEdges=1 is
- // used.
- if (asan_init_is_running) {
- return REAL(strchr)(str, c);
- }
- ENSURE_ASAN_INITED();
- char *result = REAL(strchr)(str, c);
- if (flags()->replace_str) {
- uptr len = REAL(strlen)(str);
- uptr bytes_read = (result ? result - str : len) + 1;
- ASAN_READ_STRING_OF_LEN(ctx, str, len, bytes_read);
- }
- return result;
-}
-
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
@@ -547,7 +548,6 @@ INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
return REAL(strcpy)(to, from); // NOLINT
}
-#if ASAN_INTERCEPT_STRDUP
INTERCEPTOR(char*, strdup, const char *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, strdup);
@@ -562,29 +562,28 @@ INTERCEPTOR(char*, strdup, const char *s) {
REAL(memcpy)(new_mem, s, length + 1);
return reinterpret_cast<char*>(new_mem);
}
-#endif
-INTERCEPTOR(SIZE_T, strlen, const char *s) {
+#if ASAN_INTERCEPT___STRDUP
+INTERCEPTOR(char*, __strdup, const char *s) {
void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, strlen);
- if (UNLIKELY(!asan_inited)) return internal_strlen(s);
- // strlen is called from malloc_default_purgeable_zone()
- // in __asan::ReplaceSystemAlloc() on Mac.
- if (asan_init_is_running) {
- return REAL(strlen)(s);
- }
+ ASAN_INTERCEPTOR_ENTER(ctx, strdup);
+ if (UNLIKELY(!asan_inited)) return internal_strdup(s);
ENSURE_ASAN_INITED();
- SIZE_T length = REAL(strlen)(s);
+ uptr length = REAL(strlen)(s);
if (flags()->replace_str) {
ASAN_READ_RANGE(ctx, s, length + 1);
}
- return length;
+ GET_STACK_TRACE_MALLOC;
+ void *new_mem = asan_malloc(length + 1, &stack);
+ REAL(memcpy)(new_mem, s, length + 1);
+ return reinterpret_cast<char*>(new_mem);
}
+#endif // ASAN_INTERCEPT___STRDUP
INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
void *ctx;
ASAN_INTERCEPTOR_ENTER(ctx, wcslen);
- SIZE_T length = REAL(wcslen)(s);
+ SIZE_T length = internal_wcslen(s);
if (!asan_init_is_running) {
ENSURE_ASAN_INITED();
ASAN_READ_RANGE(ctx, s, (length + 1) * sizeof(wchar_t));
@@ -605,19 +604,6 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
return REAL(strncpy)(to, from, size);
}
-#if ASAN_INTERCEPT_STRNLEN
-INTERCEPTOR(uptr, strnlen, const char *s, uptr maxlen) {
- void *ctx;
- ASAN_INTERCEPTOR_ENTER(ctx, strnlen);
- ENSURE_ASAN_INITED();
- uptr length = REAL(strnlen)(s, maxlen);
- if (flags()->replace_str) {
- ASAN_READ_RANGE(ctx, s, Min(length + 1, maxlen));
- }
- return length;
-}
-#endif // ASAN_INTERCEPT_STRNLEN
-
INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
char **endptr, int base) {
void *ctx;
@@ -700,12 +686,12 @@ INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
}
#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
+#if ASAN_INTERCEPT___CXA_ATEXIT
static void AtCxaAtexit(void *unused) {
(void)unused;
StopInitOrderChecking();
}
-#if ASAN_INTERCEPT___CXA_ATEXIT
INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
void *dso_handle) {
#if SANITIZER_MAC
@@ -732,7 +718,7 @@ INTERCEPTOR(int, fork, void) {
namespace __asan {
void InitializeAsanInterceptors() {
static bool was_called_once;
- CHECK(was_called_once == false);
+ CHECK(!was_called_once);
was_called_once = true;
InitializeCommonInterceptors();
@@ -740,22 +726,22 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(memmove);
ASAN_INTERCEPT_FUNC(memset);
if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+ // In asan, REAL(memmove) is not used, but it is used in msan.
ASAN_INTERCEPT_FUNC(memcpy);
+ } else {
+ ASSIGN_REAL(memcpy, memmove);
}
+ CHECK(REAL(memcpy));
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
- ASAN_INTERCEPT_FUNC(strchr);
ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
- ASAN_INTERCEPT_FUNC(strlen);
ASAN_INTERCEPT_FUNC(wcslen);
ASAN_INTERCEPT_FUNC(strncat);
ASAN_INTERCEPT_FUNC(strncpy);
-#if ASAN_INTERCEPT_STRDUP
ASAN_INTERCEPT_FUNC(strdup);
-#endif
-#if ASAN_INTERCEPT_STRNLEN
- ASAN_INTERCEPT_FUNC(strnlen);
+#if ASAN_INTERCEPT___STRDUP
+ ASAN_INTERCEPT_FUNC(__strdup);
#endif
#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
ASAN_INTERCEPT_FUNC(index);
diff --git a/libsanitizer/asan/asan_interceptors.h b/libsanitizer/asan/asan_interceptors.h
index 46c74176ed3..7053bb7faf5 100644
--- a/libsanitizer/asan/asan_interceptors.h
+++ b/libsanitizer/asan/asan_interceptors.h
@@ -21,14 +21,12 @@
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
# define ASAN_INTERCEPT__LONGJMP 1
-# define ASAN_INTERCEPT_STRDUP 1
# define ASAN_INTERCEPT_INDEX 1
# define ASAN_INTERCEPT_PTHREAD_CREATE 1
# define ASAN_INTERCEPT_FORK 1
#else
# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
# define ASAN_INTERCEPT__LONGJMP 0
-# define ASAN_INTERCEPT_STRDUP 0
# define ASAN_INTERCEPT_INDEX 0
# define ASAN_INTERCEPT_PTHREAD_CREATE 0
# define ASAN_INTERCEPT_FORK 0
@@ -40,12 +38,6 @@
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
-#if !SANITIZER_MAC
-# define ASAN_INTERCEPT_STRNLEN 1
-#else
-# define ASAN_INTERCEPT_STRNLEN 0
-#endif
-
#if SANITIZER_LINUX && !SANITIZER_ANDROID
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
@@ -78,6 +70,12 @@
# define ASAN_INTERCEPT___CXA_ATEXIT 0
#endif
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define ASAN_INTERCEPT___STRDUP 1
+#else
+# define ASAN_INTERCEPT___STRDUP 0
+#endif
+
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
diff --git a/libsanitizer/asan/asan_interface_internal.h b/libsanitizer/asan/asan_interface_internal.h
index 079da9ca522..05605a8065f 100644
--- a/libsanitizer/asan/asan_interface_internal.h
+++ b/libsanitizer/asan/asan_interface_internal.h
@@ -21,6 +21,8 @@
#include "asan_init_version.h"
using __sanitizer::uptr;
+using __sanitizer::u64;
+using __sanitizer::u32;
extern "C" {
// This function should be called at the very beginning of the process,
@@ -52,8 +54,17 @@ extern "C" {
uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
__asan_global_source_location *location; // Source location of a global,
// or NULL if it is unknown.
+ uptr odr_indicator; // The address of the ODR indicator symbol.
};
+ // These functions can be called on some platforms to find globals in the same
+ // loaded image as `flag' and apply __asan_(un)register_globals to them,
+ // filtering out redundant calls.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_register_image_globals(uptr *flag);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unregister_image_globals(uptr *flag);
+
// These two functions should be called by the instrumented code.
// 'globals' is an array of structures describing 'n' globals.
SANITIZER_INTERFACE_ATTRIBUTE
@@ -68,6 +79,20 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __asan_after_dynamic_init();
+ // Sets bytes of the given range of the shadow memory into specific value.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_00(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f1(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f2(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f3(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f5(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f8(uptr addr, uptr size);
+
// These two functions are used by instrumented code in the
// use-after-scope mode. They mark memory for local variables as
// unaddressable when they leave scope and addressable before the
@@ -145,6 +170,9 @@ extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ const char* __asan_default_options();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern uptr __asan_shadow_memory_dynamic_address;
+
// Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
SANITIZER_INTERFACE_ATTRIBUTE
extern int __asan_option_detect_stack_use_after_return;
diff --git a/libsanitizer/asan/asan_internal.h b/libsanitizer/asan/asan_internal.h
index e31f2648b1a..15a28ff777b 100644
--- a/libsanitizer/asan/asan_internal.h
+++ b/libsanitizer/asan/asan_internal.h
@@ -34,9 +34,9 @@
// If set, values like allocator chunk size, as well as defaults for some flags
// will be changed towards less memory overhead.
#ifndef ASAN_LOW_MEMORY
-#if SANITIZER_WORDSIZE == 32
+# if SANITIZER_IOS || (SANITIZER_WORDSIZE == 32)
# define ASAN_LOW_MEMORY 1
-#else
+# else
# define ASAN_LOW_MEMORY 0
# endif
#endif
@@ -60,6 +60,12 @@ using __sanitizer::StackTrace;
void AsanInitFromRtl();
+// asan_win.cc
+void InitializePlatformExceptionHandlers();
+
+// asan_win.cc / asan_posix.cc
+const char *DescribeSignalOrException(int signo);
+
// asan_rtl.cc
void NORETURN ShowStatsAndAbort();
@@ -71,10 +77,15 @@ void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
+// Support function for __asan_(un)register_image_globals. Searches for the
+// loaded image containing `needle' and then enumerates all global metadata
+// structures declared in that image, applying `op' (e.g.,
+// __asan_(un)register_globals) to them.
+typedef void (*globals_op_fptr)(__asan_global *, uptr);
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle);
+
void AsanOnDeadlySignal(int, void *siginfo, void *context);
-void DisableReexec();
-void MaybeReexec();
void ReadContextStack(void *context, uptr *stack, uptr *ssize);
void StopInitOrderChecking();
@@ -95,16 +106,24 @@ void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
bool PlatformHasDifferentMemcpyAndMemmove();
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
(PlatformHasDifferentMemcpyAndMemmove())
+#elif SANITIZER_WINDOWS64
+# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
#else
# define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
#endif // SANITIZER_MAC
// Add convenient macro for interface functions that may be represented as
// weak hooks.
-#define ASAN_MALLOC_HOOK(ptr, size) \
- if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size)
-#define ASAN_FREE_HOOK(ptr) \
- if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr)
+#define ASAN_MALLOC_HOOK(ptr, size) \
+ do { \
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \
+ RunMallocHooks(ptr, size); \
+ } while (false)
+#define ASAN_FREE_HOOK(ptr) \
+ do { \
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \
+ RunFreeHooks(ptr); \
+ } while (false)
#define ASAN_ON_ERROR() \
if (&__asan_on_error) __asan_on_error()
@@ -112,15 +131,12 @@ extern int asan_inited;
// Used to avoid infinite recursion in __asan_init().
extern bool asan_init_is_running;
extern void (*death_callback)(void);
-
// These magic values are written to shadow for better error reporting.
const int kAsanHeapLeftRedzoneMagic = 0xfa;
-const int kAsanHeapRightRedzoneMagic = 0xfb;
const int kAsanHeapFreeMagic = 0xfd;
const int kAsanStackLeftRedzoneMagic = 0xf1;
const int kAsanStackMidRedzoneMagic = 0xf2;
const int kAsanStackRightRedzoneMagic = 0xf3;
-const int kAsanStackPartialRedzoneMagic = 0xf4;
const int kAsanStackAfterReturnMagic = 0xf5;
const int kAsanInitializationOrderMagic = 0xf6;
const int kAsanUserPoisonedMemoryMagic = 0xf7;
diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc
index 4e47d5a0496..9f058df71a5 100644
--- a/libsanitizer/asan/asan_linux.cc
+++ b/libsanitizer/asan/asan_linux.cc
@@ -67,20 +67,17 @@ asan_rt_version_t __asan_rt_version;
namespace __asan {
void InitializePlatformInterceptors() {}
-
-void DisableReexec() {
- // No need to re-exec on Linux.
-}
-
-void MaybeReexec() {
- // No need to re-exec on Linux.
-}
+void InitializePlatformExceptionHandlers() {}
void *AsanDoesNotSupportStaticLinkage() {
// This will fail to link with -static.
return &_DYNAMIC; // defined in link.h
}
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ UNIMPLEMENTED();
+}
+
#if SANITIZER_ANDROID
// FIXME: should we do anything for Android?
void AsanCheckDynamicRTPrereqs() {}
diff --git a/libsanitizer/asan/asan_mac.cc b/libsanitizer/asan/asan_mac.cc
index ab3c6560825..4bf79be1728 100644
--- a/libsanitizer/asan/asan_mac.cc
+++ b/libsanitizer/asan/asan_mac.cc
@@ -22,18 +22,11 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mac.h"
-#if !SANITIZER_IOS
-#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
-#else
-extern "C" {
- extern char ***_NSGetArgv(void);
-}
-#endif
-
-#include <dlfcn.h> // for dladdr()
+#include <dlfcn.h>
#include <fcntl.h>
#include <libkern/OSAtomic.h>
#include <mach-o/dyld.h>
+#include <mach-o/getsect.h>
#include <mach-o/loader.h>
#include <pthread.h>
#include <stdlib.h> // for free()
@@ -43,193 +36,26 @@ extern "C" {
#include <sys/ucontext.h>
#include <unistd.h>
+// from <crt_externs.h>, but we don't have that file on iOS
+extern "C" {
+ extern char ***_NSGetArgv(void);
+ extern char ***_NSGetEnviron(void);
+}
+
namespace __asan {
void InitializePlatformInterceptors() {}
+void InitializePlatformExceptionHandlers() {}
bool PlatformHasDifferentMemcpyAndMemmove() {
// On OS X 10.7 memcpy() and memmove() are both resolved
// into memmove$VARIANT$sse42.
- // See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
+ // See also https://github.com/google/sanitizers/issues/34.
// TODO(glider): need to check dynamically that memcpy() and memmove() are
// actually the same function.
return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
}
-extern "C"
-void __asan_init();
-
-static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
-LowLevelAllocator allocator_for_env;
-
-// Change the value of the env var |name|, leaking the original value.
-// If |name_value| is NULL, the variable is deleted from the environment,
-// otherwise the corresponding "NAME=value" string is replaced with
-// |name_value|.
-void LeakyResetEnv(const char *name, const char *name_value) {
- char **env = GetEnviron();
- uptr name_len = internal_strlen(name);
- while (*env != 0) {
- uptr len = internal_strlen(*env);
- if (len > name_len) {
- const char *p = *env;
- if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
- // Match.
- if (name_value) {
- // Replace the old value with the new one.
- *env = const_cast<char*>(name_value);
- } else {
- // Shift the subsequent pointers back.
- char **del = env;
- do {
- del[0] = del[1];
- } while (*del++);
- }
- }
- }
- env++;
- }
-}
-
-static bool reexec_disabled = false;
-
-void DisableReexec() {
- reexec_disabled = true;
-}
-
-extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
-static const double kMinDyldVersionWithAutoInterposition = 360.0;
-
-bool DyldNeedsEnvVariable() {
- // Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
- // still may want use them on older systems. On older Darwin platforms, dyld
- // doesn't export dyldVersionNumber symbol and we simply return true.
- if (!&dyldVersionNumber) return true;
- // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
- // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
- // GetMacosVersion() doesn't work for the simulator. Let's instead check
- // `dyldVersionNumber`, which is exported by dyld, against a known version
- // number from the first OS release where this appeared.
- return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
-}
-
-void MaybeReexec() {
- if (reexec_disabled) return;
-
- // Make sure the dynamic ASan runtime library is preloaded so that the
- // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
- // ourselves.
- Dl_info info;
- CHECK(dladdr((void*)((uptr)__asan_init), &info));
- char *dyld_insert_libraries =
- const_cast<char*>(GetEnv(kDyldInsertLibraries));
- uptr old_env_len = dyld_insert_libraries ?
- internal_strlen(dyld_insert_libraries) : 0;
- uptr fname_len = internal_strlen(info.dli_fname);
- const char *dylib_name = StripModuleName(info.dli_fname);
- uptr dylib_name_len = internal_strlen(dylib_name);
-
- bool lib_is_in_env =
- dyld_insert_libraries && REAL(strstr)(dyld_insert_libraries, dylib_name);
- if (DyldNeedsEnvVariable() && !lib_is_in_env) {
- // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
- // library.
- char program_name[1024];
- uint32_t buf_size = sizeof(program_name);
- _NSGetExecutablePath(program_name, &buf_size);
- char *new_env = const_cast<char*>(info.dli_fname);
- if (dyld_insert_libraries) {
- // Append the runtime dylib name to the existing value of
- // DYLD_INSERT_LIBRARIES.
- new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
- internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
- new_env[old_env_len] = ':';
- // Copy fname_len and add a trailing zero.
- internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
- fname_len + 1);
- // Ok to use setenv() since the wrappers don't depend on the value of
- // asan_inited.
- setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
- } else {
- // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
- setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
- }
- VReport(1, "exec()-ing the program with\n");
- VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
- VReport(1, "to enable ASan wrappers.\n");
- execv(program_name, *_NSGetArgv());
-
- // We get here only if execv() failed.
- Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
- "which is required for ASan to work. ASan tried to set the "
- "environment variable and re-execute itself, but execv() failed, "
- "possibly because of sandbox restrictions. Make sure to launch the "
- "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
- CHECK("execv failed" && 0);
- }
-
- if (!lib_is_in_env)
- return;
-
- // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
- // the dylib from the environment variable, because interceptors are installed
- // and we don't want our children to inherit the variable.
-
- uptr env_name_len = internal_strlen(kDyldInsertLibraries);
- // Allocate memory to hold the previous env var name, its value, the '='
- // sign and the '\0' char.
- char *new_env = (char*)allocator_for_env.Allocate(
- old_env_len + 2 + env_name_len);
- CHECK(new_env);
- internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
- internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
- new_env[env_name_len] = '=';
- char *new_env_pos = new_env + env_name_len + 1;
-
- // Iterate over colon-separated pieces of |dyld_insert_libraries|.
- char *piece_start = dyld_insert_libraries;
- char *piece_end = NULL;
- char *old_env_end = dyld_insert_libraries + old_env_len;
- do {
- if (piece_start[0] == ':') piece_start++;
- piece_end = REAL(strchr)(piece_start, ':');
- if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
- if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
- uptr piece_len = piece_end - piece_start;
-
- char *filename_start =
- (char *)internal_memrchr(piece_start, '/', piece_len);
- uptr filename_len = piece_len;
- if (filename_start) {
- filename_start += 1;
- filename_len = piece_len - (filename_start - piece_start);
- } else {
- filename_start = piece_start;
- }
-
- // If the current piece isn't the runtime library name,
- // append it to new_env.
- if ((dylib_name_len != filename_len) ||
- (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
- if (new_env_pos != new_env + env_name_len + 1) {
- new_env_pos[0] = ':';
- new_env_pos++;
- }
- internal_strncpy(new_env_pos, piece_start, piece_len);
- new_env_pos += piece_len;
- }
- // Move on to the next piece.
- piece_start = piece_end;
- } while (piece_start < old_env_end);
-
- // Can't use setenv() here, because it requires the allocator to be
- // initialized.
- // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
- // a separate function called after InitializeAllocator().
- if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
- LeakyResetEnv(kDyldInsertLibraries, new_env);
-}
-
// No-op. Mac does not support static linkage anyway.
void *AsanDoesNotSupportStaticLinkage() {
return 0;
@@ -241,6 +67,30 @@ void AsanCheckDynamicRTPrereqs() {}
// No-op. Mac does not support static linkage anyway.
void AsanCheckIncompatibleRT() {}
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ // Find the Mach-O header for the image containing the needle
+ Dl_info info;
+ int err = dladdr(needle, &info);
+ if (err == 0) return;
+
+#if __LP64__
+ const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase;
+#else
+ const struct mach_header *mh = (struct mach_header *)info.dli_fbase;
+#endif
+
+ // Look up the __asan_globals section in that image and register its globals
+ unsigned long size = 0;
+ __asan_global *globals = (__asan_global *)getsectiondata(
+ mh,
+ "__DATA", "__asan_globals",
+ &size);
+
+ if (!globals) return;
+ if (size % sizeof(__asan_global) != 0) return;
+ op(globals, size / sizeof(__asan_global));
+}
+
void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
UNIMPLEMENTED();
}
diff --git a/libsanitizer/asan/asan_malloc_linux.cc b/libsanitizer/asan/asan_malloc_linux.cc
index bfe72af69e6..cc50a388495 100644
--- a/libsanitizer/asan/asan_malloc_linux.cc
+++ b/libsanitizer/asan/asan_malloc_linux.cc
@@ -76,7 +76,13 @@ INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
- void *new_ptr = asan_malloc(size, &stack);
+ void *new_ptr;
+ if (UNLIKELY(!asan_inited)) {
+ new_ptr = AllocateFromLocalPool(size);
+ } else {
+ copy_size = size;
+ new_ptr = asan_malloc(copy_size, &stack);
+ }
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
@@ -96,7 +102,7 @@ INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
GET_STACK_TRACE_MALLOC;
void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
- DTLS_on_libc_memalign(res, size * boundary);
+ DTLS_on_libc_memalign(res, size);
return res;
}
diff --git a/libsanitizer/asan/asan_malloc_mac.cc b/libsanitizer/asan/asan_malloc_mac.cc
index 33ccbf09470..1ca665d84c5 100644
--- a/libsanitizer/asan/asan_malloc_mac.cc
+++ b/libsanitizer/asan/asan_malloc_mac.cc
@@ -52,10 +52,6 @@ using namespace __asan;
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
GET_STACK_TRACE_FREE; \
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
-#define COMMON_MALLOC_IGNORE_INVALID_FREE flags()->mac_ignore_invalid_free
-#define COMMON_MALLOC_REPORT_FREE_UNALLOCATED(ptr, zone_ptr, zone_name) \
- GET_STACK_TRACE_FREE; \
- WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
#define COMMON_MALLOC_NAMESPACE __asan
#include "sanitizer_common/sanitizer_malloc_mac.inc"
diff --git a/libsanitizer/asan/asan_malloc_win.cc b/libsanitizer/asan/asan_malloc_win.cc
index f2ab188bf5b..f38cd054bb0 100644
--- a/libsanitizer/asan/asan_malloc_win.cc
+++ b/libsanitizer/asan/asan_malloc_win.cc
@@ -12,6 +12,8 @@
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_WINDOWS
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
#include "asan_allocator.h"
#include "asan_interceptors.h"
@@ -47,6 +49,11 @@ void _free_dbg(void *ptr, int) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
+void _free_base(void *ptr) {
+ free(ptr);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
void cfree(void *ptr) {
CHECK(!"cfree() should not be used on Windows");
}
@@ -58,6 +65,11 @@ void *malloc(size_t size) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
+void *_malloc_base(size_t size) {
+ return malloc(size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_malloc_dbg(size_t size, int, const char *, int) {
return malloc(size);
}
@@ -69,6 +81,11 @@ void *calloc(size_t nmemb, size_t size) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
+void *_calloc_base(size_t nmemb, size_t size) {
+ return calloc(nmemb, size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
return calloc(nmemb, size);
}
@@ -91,6 +108,11 @@ void *_realloc_dbg(void *ptr, size_t size, int) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
+void *_realloc_base(void *ptr, size_t size) {
+ return realloc(ptr, size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
void *_recalloc(void *p, size_t n, size_t elem_size) {
if (!p)
return calloc(n, elem_size);
@@ -101,7 +123,12 @@ void *_recalloc(void *p, size_t n, size_t elem_size) {
}
ALLOCATION_FUNCTION_ATTRIBUTE
-size_t _msize(void *ptr) {
+void *_recalloc_base(void *p, size_t n, size_t elem_size) {
+ return _recalloc(p, n, elem_size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+size_t _msize(const void *ptr) {
GET_CURRENT_PC_BP_SP;
(void)sp;
return asan_malloc_usable_size(ptr, pc, bp);
@@ -137,38 +164,90 @@ int _CrtSetReportMode(int, int) {
}
} // extern "C"
+INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags,
+ SIZE_T dwBytes) {
+ GET_STACK_TRACE_MALLOC;
+ void *p = asan_malloc(dwBytes, &stack);
+ // Reading MSDN suggests that the *entire* usable allocation is zeroed out.
+ // Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY.
+ // https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083
+ if (dwFlags == HEAP_ZERO_MEMORY)
+ internal_memset(p, 0, asan_mz_size(p));
+ else
+ CHECK(dwFlags == 0 && "unsupported heap flags");
+ return p;
+}
+
+INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) {
+ CHECK(dwFlags == 0 && "unsupported heap flags");
+ GET_STACK_TRACE_FREE;
+ asan_free(lpMem, &stack, FROM_MALLOC);
+ return true;
+}
+
+INTERCEPTOR_WINAPI(LPVOID, HeapReAlloc, HANDLE hHeap, DWORD dwFlags,
+ LPVOID lpMem, SIZE_T dwBytes) {
+ GET_STACK_TRACE_MALLOC;
+ // Realloc should never reallocate in place.
+ if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY)
+ return nullptr;
+ CHECK(dwFlags == 0 && "unsupported heap flags");
+ return asan_realloc(lpMem, dwBytes, &stack);
+}
+
+INTERCEPTOR_WINAPI(SIZE_T, HeapSize, HANDLE hHeap, DWORD dwFlags,
+ LPCVOID lpMem) {
+ CHECK(dwFlags == 0 && "unsupported heap flags");
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ return asan_malloc_usable_size(lpMem, pc, bp);
+}
+
namespace __asan {
+
+static void TryToOverrideFunction(const char *fname, uptr new_func) {
+ // Failure here is not fatal. The CRT may not be present, and different CRT
+ // versions use different symbols.
+ if (!__interception::OverrideFunction(fname, new_func))
+ VPrintf(2, "Failed to override function %s\n", fname);
+}
+
void ReplaceSystemMalloc() {
#if defined(ASAN_DYNAMIC)
- // We don't check the result because CRT might not be used in the process.
- __interception::OverrideFunction("free", (uptr)free);
- __interception::OverrideFunction("malloc", (uptr)malloc);
- __interception::OverrideFunction("_malloc_crt", (uptr)malloc);
- __interception::OverrideFunction("calloc", (uptr)calloc);
- __interception::OverrideFunction("_calloc_crt", (uptr)calloc);
- __interception::OverrideFunction("realloc", (uptr)realloc);
- __interception::OverrideFunction("_realloc_crt", (uptr)realloc);
- __interception::OverrideFunction("_recalloc", (uptr)_recalloc);
- __interception::OverrideFunction("_recalloc_crt", (uptr)_recalloc);
- __interception::OverrideFunction("_msize", (uptr)_msize);
- __interception::OverrideFunction("_expand", (uptr)_expand);
-
- // Override different versions of 'operator new' and 'operator delete'.
- // No need to override the nothrow versions as they just wrap the throw
- // versions.
- // FIXME: Unfortunately, MSVC miscompiles the statements that take the
- // addresses of the array versions of these operators,
- // see https://connect.microsoft.com/VisualStudio/feedbackdetail/view/946992
- // We might want to try to work around this by [inline] assembly or compiling
- // parts of the RTL with Clang.
- void *(*op_new)(size_t sz) = operator new;
- void (*op_delete)(void *p) = operator delete;
- void *(*op_array_new)(size_t sz) = operator new[];
- void (*op_array_delete)(void *p) = operator delete[];
- __interception::OverrideFunction("??2@YAPAXI@Z", (uptr)op_new);
- __interception::OverrideFunction("??3@YAXPAX@Z", (uptr)op_delete);
- __interception::OverrideFunction("??_U@YAPAXI@Z", (uptr)op_array_new);
- __interception::OverrideFunction("??_V@YAXPAX@Z", (uptr)op_array_delete);
+ TryToOverrideFunction("free", (uptr)free);
+ TryToOverrideFunction("_free_base", (uptr)free);
+ TryToOverrideFunction("malloc", (uptr)malloc);
+ TryToOverrideFunction("_malloc_base", (uptr)malloc);
+ TryToOverrideFunction("_malloc_crt", (uptr)malloc);
+ TryToOverrideFunction("calloc", (uptr)calloc);
+ TryToOverrideFunction("_calloc_base", (uptr)calloc);
+ TryToOverrideFunction("_calloc_crt", (uptr)calloc);
+ TryToOverrideFunction("realloc", (uptr)realloc);
+ TryToOverrideFunction("_realloc_base", (uptr)realloc);
+ TryToOverrideFunction("_realloc_crt", (uptr)realloc);
+ TryToOverrideFunction("_recalloc", (uptr)_recalloc);
+ TryToOverrideFunction("_recalloc_base", (uptr)_recalloc);
+ TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
+ TryToOverrideFunction("_msize", (uptr)_msize);
+ TryToOverrideFunction("_expand", (uptr)_expand);
+ TryToOverrideFunction("_expand_base", (uptr)_expand);
+
+ // Recent versions of ucrtbase.dll appear to be built with PGO and LTCG, which
+ // enable cross-module inlining. This means our _malloc_base hook won't catch
+ // all CRT allocations. This code here patches the import table of
+ // ucrtbase.dll so that all attempts to use the lower-level win32 heap
+ // allocation API will be directed to ASan's heap. We don't currently
+ // intercept all calls to HeapAlloc. If we did, we would have to check on
+ // HeapFree whether the pointer came from ASan of from the system.
+#define INTERCEPT_UCRT_FUNCTION(func) \
+ if (!INTERCEPT_FUNCTION_DLLIMPORT("ucrtbase.dll", \
+ "api-ms-win-core-heap-l1-1-0.dll", func)) \
+ VPrintf(2, "Failed to intercept ucrtbase.dll import %s\n", #func);
+ INTERCEPT_UCRT_FUNCTION(HeapAlloc);
+ INTERCEPT_UCRT_FUNCTION(HeapFree);
+ INTERCEPT_UCRT_FUNCTION(HeapReAlloc);
+ INTERCEPT_UCRT_FUNCTION(HeapSize);
+#undef INTERCEPT_UCRT_FUNCTION
#endif
}
} // namespace __asan
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index b584cfa2f3c..b9fa5f79481 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -15,7 +15,7 @@
#include "asan_internal.h"
// The full explanation of the memory mapping could be found here:
-// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm
+// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
//
// Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000:
// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem ||
@@ -85,6 +85,20 @@
// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
//
+// Default Linux/S390 mapping:
+// || `[0x30000000, 0x7fffffff]` || HighMem ||
+// || `[0x26000000, 0x2fffffff]` || HighShadow ||
+// || `[0x24000000, 0x25ffffff]` || ShadowGap ||
+// || `[0x20000000, 0x23ffffff]` || LowShadow ||
+// || `[0x00000000, 0x1fffffff]` || LowMem ||
+//
+// Default Linux/SystemZ mapping:
+// || `[0x14000000000000, 0x1fffffffffffff]` || HighMem ||
+// || `[0x12800000000000, 0x13ffffffffffff]` || HighShadow ||
+// || `[0x12000000000000, 0x127fffffffffff]` || ShadowGap ||
+// || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow ||
+// || `[0x00000000000000, 0x0fffffffffffff]` || LowMem ||
+//
// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
@@ -109,17 +123,19 @@
// || `[0x00000000, 0x2fffffff]` || LowMem ||
static const u64 kDefaultShadowScale = 3;
+static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
-static const u64 kIosShadowOffset64 = 0x130000000;
+static const u64 kIosShadowOffset64 = 0x120200000;
static const u64 kIosSimShadowOffset32 = 1ULL << 30;
static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
+static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
@@ -136,28 +152,36 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
# elif SANITIZER_WINDOWS
# define SHADOW_OFFSET kWindowsShadowOffset32
-# elif SANITIZER_IOSSIM
-# define SHADOW_OFFSET kIosSimShadowOffset32
# elif SANITIZER_IOS
-# define SHADOW_OFFSET kIosShadowOffset32
+# if SANITIZER_IOSSIM
+# define SHADOW_OFFSET kIosSimShadowOffset32
+# else
+# define SHADOW_OFFSET kIosShadowOffset32
+# endif
# else
# define SHADOW_OFFSET kDefaultShadowOffset32
# endif
#else
-# if defined(__aarch64__)
+# if SANITIZER_IOS
+# if SANITIZER_IOSSIM
+# define SHADOW_OFFSET kIosSimShadowOffset64
+# else
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# endif
+# elif defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
# elif defined(__powerpc64__)
# define SHADOW_OFFSET kPPC64_ShadowOffset64
+# elif defined(__s390x__)
+# define SHADOW_OFFSET kSystemZ_ShadowOffset64
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# elif defined(__mips64)
# define SHADOW_OFFSET kMIPS64_ShadowOffset64
-# elif SANITIZER_IOSSIM
-# define SHADOW_OFFSET kIosSimShadowOffset64
-# elif SANITIZER_IOS
-# define SHADOW_OFFSET kIosShadowOffset64
+# elif SANITIZER_WINDOWS64
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# else
# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
# endif
@@ -243,9 +267,25 @@ static inline bool AddrIsInMidMem(uptr a) {
return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
}
+static inline bool AddrIsInShadowGap(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ if (kMidMemBeg) {
+ if (a <= kShadowGapEnd)
+ return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
+ return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
+ (a >= kShadowGap3Beg && a <= kShadowGap3End);
+ }
+ // In zero-based shadow mode we treat addresses near zero as addresses
+ // in shadow gap as well.
+ if (SHADOW_OFFSET == 0)
+ return a <= kShadowGapEnd;
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
static inline bool AddrIsInMem(uptr a) {
PROFILE_ASAN_MAPPING();
- return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a);
+ return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
+ (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
}
static inline uptr MemToShadow(uptr p) {
@@ -269,21 +309,6 @@ static inline bool AddrIsInShadow(uptr a) {
return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
}
-static inline bool AddrIsInShadowGap(uptr a) {
- PROFILE_ASAN_MAPPING();
- if (kMidMemBeg) {
- if (a <= kShadowGapEnd)
- return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
- return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
- (a >= kShadowGap3Beg && a <= kShadowGap3End);
- }
- // In zero-based shadow mode we treat addresses near zero as addresses
- // in shadow gap as well.
- if (SHADOW_OFFSET == 0)
- return a <= kShadowGapEnd;
- return a >= kShadowGapBeg && a <= kShadowGapEnd;
-}
-
static inline bool AddrIsAlignedByGranularity(uptr a) {
PROFILE_ASAN_MAPPING();
return (a & (SHADOW_GRANULARITY - 1)) == 0;
diff --git a/libsanitizer/asan/asan_memory_profile.cc b/libsanitizer/asan/asan_memory_profile.cc
new file mode 100644
index 00000000000..5a2578596ad
--- /dev/null
+++ b/libsanitizer/asan/asan_memory_profile.cc
@@ -0,0 +1,98 @@
+//===-- asan_memory_profile.cc.cc -----------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file implements __sanitizer_print_memory_profile.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "lsan/lsan_common.h"
+#include "asan/asan_allocator.h"
+
+#if CAN_SANITIZE_LEAKS
+
+namespace __asan {
+
+struct AllocationSite {
+ u32 id;
+ uptr total_size;
+ uptr count;
+};
+
+class HeapProfile {
+ public:
+ HeapProfile() : allocations_(1024) {}
+ void Insert(u32 id, uptr size) {
+ total_allocated_ += size;
+ total_count_++;
+ // Linear lookup will be good enough for most cases (although not all).
+ for (uptr i = 0; i < allocations_.size(); i++) {
+ if (allocations_[i].id == id) {
+ allocations_[i].total_size += size;
+ allocations_[i].count++;
+ return;
+ }
+ }
+ allocations_.push_back({id, size, 1});
+ }
+
+ void Print(uptr top_percent) {
+ InternalSort(&allocations_, allocations_.size(),
+ [](const AllocationSite &a, const AllocationSite &b) {
+ return a.total_size > b.total_size;
+ });
+ CHECK(total_allocated_);
+ uptr total_shown = 0;
+ Printf("Live Heap Allocations: %zd bytes from %zd allocations; "
+ "showing top %zd%%\n", total_allocated_, total_count_, top_percent);
+ for (uptr i = 0; i < allocations_.size(); i++) {
+ auto &a = allocations_[i];
+ Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
+ a.total_size * 100 / total_allocated_, a.count);
+ StackDepotGet(a.id).Print();
+ total_shown += a.total_size;
+ if (total_shown * 100 / total_allocated_ > top_percent)
+ break;
+ }
+ }
+
+ private:
+ uptr total_allocated_ = 0;
+ uptr total_count_ = 0;
+ InternalMmapVector<AllocationSite> allocations_;
+};
+
+static void ChunkCallback(uptr chunk, void *arg) {
+ HeapProfile *hp = reinterpret_cast<HeapProfile*>(arg);
+ AsanChunkView cv = FindHeapChunkByAllocBeg(chunk);
+ if (!cv.IsAllocated()) return;
+ u32 id = cv.GetAllocStackId();
+ if (!id) return;
+ hp->Insert(id, cv.UsedSize());
+}
+
+static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
+ void *argument) {
+ HeapProfile hp;
+ __lsan::ForEachChunk(ChunkCallback, &hp);
+ hp.Print(reinterpret_cast<uptr>(argument));
+}
+
+} // namespace __asan
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_memory_profile(uptr top_percent) {
+ __sanitizer::StopTheWorld(__asan::MemoryProfileCB, (void*)top_percent);
+}
+} // extern "C"
+
+#endif // CAN_SANITIZE_LEAKS
diff --git a/libsanitizer/asan/asan_new_delete.cc b/libsanitizer/asan/asan_new_delete.cc
index bf7a34ac87b..2c4642cb27e 100644
--- a/libsanitizer/asan/asan_new_delete.cc
+++ b/libsanitizer/asan/asan_new_delete.cc
@@ -18,9 +18,25 @@
#include <stddef.h>
-// C++ operators can't have visibility attributes on Windows.
+// C++ operators can't have dllexport attributes on Windows. We export them
+// anyway by passing extra -export flags to the linker, which is exactly that
+// dllexport would normally do. We need to export them in order to make the
+// VS2015 dynamic CRT (MD) work.
#if SANITIZER_WINDOWS
# define CXX_OPERATOR_ATTRIBUTE
+# ifdef _WIN64
+# pragma comment(linker, "/export:??2@YAPEAX_K@Z") // operator new
+# pragma comment(linker, "/export:??3@YAXPEAX@Z") // operator delete
+# pragma comment(linker, "/export:??3@YAXPEAX_K@Z") // sized operator delete
+# pragma comment(linker, "/export:??_U@YAPEAX_K@Z") // operator new[]
+# pragma comment(linker, "/export:??_V@YAXPEAX@Z") // operator delete[]
+# else
+# pragma comment(linker, "/export:??2@YAPAXI@Z") // operator new
+# pragma comment(linker, "/export:??3@YAXPAX@Z") // operator delete
+# pragma comment(linker, "/export:??3@YAXPAXI@Z") // sized operator delete
+# pragma comment(linker, "/export:??_U@YAPAXI@Z") // operator new[]
+# pragma comment(linker, "/export:??_V@YAXPAX@Z") // operator delete[]
+# endif
#else
# define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
#endif
@@ -37,7 +53,7 @@ using namespace __asan; // NOLINT
#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
// This code has issues on OSX.
-// See https://code.google.com/p/address-sanitizer/issues/detail?id=131.
+// See https://github.com/google/sanitizers/issues/131.
// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
namespace std {
diff --git a/libsanitizer/asan/asan_poisoning.cc b/libsanitizer/asan/asan_poisoning.cc
index 39f74879b38..8fe2bd42bdb 100644
--- a/libsanitizer/asan/asan_poisoning.cc
+++ b/libsanitizer/asan/asan_poisoning.cc
@@ -67,7 +67,7 @@ void FlushUnneededASanShadowMemory(uptr p, uptr size) {
uptr page_size = GetPageSizeCached();
uptr shadow_beg = RoundUpTo(MemToShadow(p), page_size);
uptr shadow_end = RoundDownTo(MemToShadow(p + size), page_size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg);
}
void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
@@ -100,7 +100,7 @@ using namespace __asan; // NOLINT
// that user program (un)poisons the memory it owns. It poisons memory
// conservatively, and unpoisons progressively to make sure asan shadow
// mapping invariant is preserved (see detailed mapping description here:
-// http://code.google.com/p/address-sanitizer/wiki/AddressSanitizerAlgorithm).
+// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
//
// * if user asks to poison region [left, right), the program poisons
// at least [left, AlignDown(right)).
@@ -115,9 +115,9 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We can only poison memory if the byte in end.offset is unaddressable.
// No need to re-poison memory if it is poisoned already.
if (value > 0 && value <= end.offset) {
@@ -129,7 +129,7 @@ void __asan_poison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
// Mark bytes from beg.offset as unaddressable.
if (beg.value == 0) {
@@ -155,9 +155,9 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
ShadowSegmentEndpoint beg(beg_addr);
ShadowSegmentEndpoint end(end_addr);
if (beg.chunk == end.chunk) {
- CHECK(beg.offset < end.offset);
+ CHECK_LT(beg.offset, end.offset);
s8 value = beg.value;
- CHECK(value == end.value);
+ CHECK_EQ(value, end.value);
// We unpoison memory bytes up to enbytes up to end.offset if it is not
// unpoisoned already.
if (value != 0) {
@@ -165,7 +165,7 @@ void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
}
return;
}
- CHECK(beg.chunk < end.chunk);
+ CHECK_LT(beg.chunk, end.chunk);
if (beg.offset > 0) {
*beg.chunk = 0;
beg.chunk++;
@@ -312,6 +312,30 @@ static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
}
}
+void __asan_set_shadow_00(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0, size);
+}
+
+void __asan_set_shadow_f1(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf1, size);
+}
+
+void __asan_set_shadow_f2(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf2, size);
+}
+
+void __asan_set_shadow_f3(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf3, size);
+}
+
+void __asan_set_shadow_f5(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf5, size);
+}
+
+void __asan_set_shadow_f8(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf8, size);
+}
+
void __asan_poison_stack_memory(uptr addr, uptr size) {
VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
PoisonAlignedStackMemory(addr, size, true);
@@ -341,7 +365,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
&stack);
}
CHECK_LE(end - beg,
- FIRST_32_SECOND_64(1UL << 30, 1UL << 34)); // Sanity check.
+ FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
@@ -352,7 +376,7 @@ void __sanitizer_annotate_contiguous_container(const void *beg_p,
// Make a quick sanity check that we are indeed in this state.
//
// FIXME: Two of these three checks are disabled until we fix
- // https://code.google.com/p/address-sanitizer/issues/detail?id=258.
+ // https://github.com/google/sanitizers/issues/258.
// if (d1 != d2)
// CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
if (a + granularity <= d1)
diff --git a/libsanitizer/asan/asan_poisoning.h b/libsanitizer/asan/asan_poisoning.h
index 30e39e9077c..4ddcbc314d4 100644
--- a/libsanitizer/asan/asan_poisoning.h
+++ b/libsanitizer/asan/asan_poisoning.h
@@ -84,7 +84,7 @@ ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
}
}
-// Calls __sanitizer::FlushUnneededShadowMemory() on
+// Calls __sanitizer::ReleaseMemoryToOS() on
// [MemToShadow(p), MemToShadow(p+size)] with proper rounding.
void FlushUnneededASanShadowMemory(uptr p, uptr size);
diff --git a/libsanitizer/asan/asan_posix.cc b/libsanitizer/asan/asan_posix.cc
index 5b532e950bb..532afb37f1a 100644
--- a/libsanitizer/asan/asan_posix.cc
+++ b/libsanitizer/asan/asan_posix.cc
@@ -31,17 +31,39 @@
namespace __asan {
+const char *DescribeSignalOrException(int signo) {
+ switch (signo) {
+ case SIGFPE:
+ return "FPE";
+ case SIGILL:
+ return "ILL";
+ case SIGABRT:
+ return "ABRT";
+ default:
+ return "SEGV";
+ }
+}
+
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
int code = (int)((siginfo_t*)siginfo)->si_code;
- // Write the first message using the bullet-proof write.
- if (18 != internal_write(2, "ASAN:DEADLYSIGNAL\n", 18)) Die();
+ // Write the first message using fd=2, just in case.
+ // It may actually fail to write in case stderr is closed.
+ internal_write(2, "ASAN:DEADLYSIGNAL\n", 18);
SignalContext sig = SignalContext::Create(siginfo, context);
// Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
// probably a stack overflow.
+#ifdef __s390__
+ // On s390, the fault address in siginfo points to start of the page, not
+ // to the precise word that was accessed. Mask off the low bits of sp to
+ // take it into account.
+ bool IsStackAccess = sig.addr >= (sig.sp & ~0xFFF) &&
+ sig.addr < sig.sp + 0xFFFF;
+#else
bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
+#endif
#if __powerpc__
// Large stack frames can be allocated with e.g.
@@ -73,10 +95,8 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
// unaligned memory access.
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(sig);
- else if (signo == SIGFPE)
- ReportDeadlySignal("FPE", sig);
else
- ReportDeadlySignal("SEGV", sig);
+ ReportDeadlySignal(signo, sig);
}
// ---------------------- TSD ---------------- {{{1
diff --git a/libsanitizer/asan/asan_report.cc b/libsanitizer/asan/asan_report.cc
index 7f7eaf515e7..84d67646b40 100644
--- a/libsanitizer/asan/asan_report.cc
+++ b/libsanitizer/asan/asan_report.cc
@@ -10,10 +10,13 @@
// This file contains error reporting code.
//===----------------------------------------------------------------------===//
+#include "asan_errors.h"
#include "asan_flags.h"
+#include "asan_descriptions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_report.h"
+#include "asan_scariness_score.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
@@ -28,88 +31,31 @@ namespace __asan {
static void (*error_report_callback)(const char*);
static char *error_message_buffer = nullptr;
static uptr error_message_buffer_pos = 0;
-static uptr error_message_buffer_size = 0;
-
-struct ReportData {
- uptr pc;
- uptr sp;
- uptr bp;
- uptr addr;
- bool is_write;
- uptr access_size;
- const char *description;
-};
-
-static bool report_happened = false;
-static ReportData report_data = {};
+static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED);
+static const unsigned kAsanBuggyPcPoolSize = 25;
+static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
void AppendToErrorMessageBuffer(const char *buffer) {
- if (error_message_buffer) {
- uptr length = internal_strlen(buffer);
- CHECK_GE(error_message_buffer_size, error_message_buffer_pos);
- uptr remaining = error_message_buffer_size - error_message_buffer_pos;
- internal_strncpy(error_message_buffer + error_message_buffer_pos,
- buffer, remaining);
- error_message_buffer[error_message_buffer_size - 1] = '\0';
- // FIXME: reallocate the buffer instead of truncating the message.
- error_message_buffer_pos += Min(remaining, length);
+ BlockingMutexLock l(&error_message_buf_mutex);
+ if (!error_message_buffer) {
+ error_message_buffer =
+ (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__);
+ error_message_buffer_pos = 0;
}
+ uptr length = internal_strlen(buffer);
+ RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos);
+ uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos;
+ internal_strncpy(error_message_buffer + error_message_buffer_pos,
+ buffer, remaining);
+ error_message_buffer[kErrorMessageBufferSize - 1] = '\0';
+ // FIXME: reallocate the buffer instead of truncating the message.
+ error_message_buffer_pos += Min(remaining, length);
}
-// ---------------------- Decorator ------------------------------ {{{1
-class Decorator: public __sanitizer::SanitizerCommonDecorator {
- public:
- Decorator() : SanitizerCommonDecorator() { }
- const char *Access() { return Blue(); }
- const char *EndAccess() { return Default(); }
- const char *Location() { return Green(); }
- const char *EndLocation() { return Default(); }
- const char *Allocation() { return Magenta(); }
- const char *EndAllocation() { return Default(); }
-
- const char *ShadowByte(u8 byte) {
- switch (byte) {
- case kAsanHeapLeftRedzoneMagic:
- case kAsanHeapRightRedzoneMagic:
- case kAsanArrayCookieMagic:
- return Red();
- case kAsanHeapFreeMagic:
- return Magenta();
- case kAsanStackLeftRedzoneMagic:
- case kAsanStackMidRedzoneMagic:
- case kAsanStackRightRedzoneMagic:
- case kAsanStackPartialRedzoneMagic:
- return Red();
- case kAsanStackAfterReturnMagic:
- return Magenta();
- case kAsanInitializationOrderMagic:
- return Cyan();
- case kAsanUserPoisonedMemoryMagic:
- case kAsanContiguousContainerOOBMagic:
- case kAsanAllocaLeftMagic:
- case kAsanAllocaRightMagic:
- return Blue();
- case kAsanStackUseAfterScopeMagic:
- return Magenta();
- case kAsanGlobalRedzoneMagic:
- return Red();
- case kAsanInternalHeapMagic:
- return Yellow();
- case kAsanIntraObjectRedzone:
- return Yellow();
- default:
- return Default();
- }
- }
- const char *EndShadowByte() { return Default(); }
- const char *MemoryByte() { return Magenta(); }
- const char *EndMemoryByte() { return Default(); }
-};
-
// ---------------------- Helper functions ----------------------- {{{1
-static void PrintMemoryByte(InternalScopedString *str, const char *before,
- u8 byte, bool in_shadow, const char *after = "\n") {
+void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
+ bool in_shadow, const char *after) {
Decorator d;
str->append("%s%s%x%x%s%s", before,
in_shadow ? d.ShadowByte(byte) : d.MemoryByte(),
@@ -117,99 +63,6 @@ static void PrintMemoryByte(InternalScopedString *str, const char *before,
in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after);
}
-static void PrintShadowByte(InternalScopedString *str, const char *before,
- u8 byte, const char *after = "\n") {
- PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
-}
-
-static void PrintShadowBytes(InternalScopedString *str, const char *before,
- u8 *bytes, u8 *guilty, uptr n) {
- Decorator d;
- if (before) str->append("%s%p:", before, bytes);
- for (uptr i = 0; i < n; i++) {
- u8 *p = bytes + i;
- const char *before =
- p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
- const char *after = p == guilty ? "]" : "";
- PrintShadowByte(str, before, *p, after);
- }
- str->append("\n");
-}
-
-static void PrintLegend(InternalScopedString *str) {
- str->append(
- "Shadow byte legend (one shadow byte represents %d "
- "application bytes):\n",
- (int)SHADOW_GRANULARITY);
- PrintShadowByte(str, " Addressable: ", 0);
- str->append(" Partially addressable: ");
- for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
- str->append("\n");
- PrintShadowByte(str, " Heap left redzone: ",
- kAsanHeapLeftRedzoneMagic);
- PrintShadowByte(str, " Heap right redzone: ",
- kAsanHeapRightRedzoneMagic);
- PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
- PrintShadowByte(str, " Stack left redzone: ",
- kAsanStackLeftRedzoneMagic);
- PrintShadowByte(str, " Stack mid redzone: ",
- kAsanStackMidRedzoneMagic);
- PrintShadowByte(str, " Stack right redzone: ",
- kAsanStackRightRedzoneMagic);
- PrintShadowByte(str, " Stack partial redzone: ",
- kAsanStackPartialRedzoneMagic);
- PrintShadowByte(str, " Stack after return: ",
- kAsanStackAfterReturnMagic);
- PrintShadowByte(str, " Stack use after scope: ",
- kAsanStackUseAfterScopeMagic);
- PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
- PrintShadowByte(str, " Global init order: ",
- kAsanInitializationOrderMagic);
- PrintShadowByte(str, " Poisoned by user: ",
- kAsanUserPoisonedMemoryMagic);
- PrintShadowByte(str, " Container overflow: ",
- kAsanContiguousContainerOOBMagic);
- PrintShadowByte(str, " Array cookie: ",
- kAsanArrayCookieMagic);
- PrintShadowByte(str, " Intra object redzone: ",
- kAsanIntraObjectRedzone);
- PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
- PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
- PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
-}
-
-void MaybeDumpInstructionBytes(uptr pc) {
- if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
- return;
- InternalScopedString str(1024);
- str.append("First 16 instruction bytes at pc: ");
- if (IsAccessibleMemoryRange(pc, 16)) {
- for (int i = 0; i < 16; ++i) {
- PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/false, " ");
- }
- str.append("\n");
- } else {
- str.append("unaccessible\n");
- }
- Report("%s", str.data());
-}
-
-static void PrintShadowMemoryForAddress(uptr addr) {
- if (!AddrIsInMem(addr)) return;
- uptr shadow_addr = MemToShadow(addr);
- const uptr n_bytes_per_row = 16;
- uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
- InternalScopedString str(4096 * 8);
- str.append("Shadow bytes around the buggy address:\n");
- for (int i = -5; i <= 5; i++) {
- const char *prefix = (i == 0) ? "=>" : " ";
- PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
- (u8 *)shadow_addr, n_bytes_per_row);
- }
- if (flags()->print_legend) PrintLegend(&str);
- Printf("%s", str.data());
-}
-
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
const char *zone_name) {
if (zone_ptr) {
@@ -225,191 +78,8 @@ static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
}
}
-static void DescribeThread(AsanThread *t) {
- if (t)
- DescribeThread(t->context());
-}
-
// ---------------------- Address Descriptions ------------------- {{{1
-static bool IsASCII(unsigned char c) {
- return /*0x00 <= c &&*/ c <= 0x7F;
-}
-
-static const char *MaybeDemangleGlobalName(const char *name) {
- // We can spoil names of globals with C linkage, so use an heuristic
- // approach to check if the name should be demangled.
- bool should_demangle = false;
- if (name[0] == '_' && name[1] == 'Z')
- should_demangle = true;
- else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
- should_demangle = true;
-
- return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
-}
-
-// Check if the global is a zero-terminated ASCII string. If so, print it.
-static void PrintGlobalNameIfASCII(InternalScopedString *str,
- const __asan_global &g) {
- for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
- unsigned char c = *(unsigned char*)p;
- if (c == '\0' || !IsASCII(c)) return;
- }
- if (*(char*)(g.beg + g.size - 1) != '\0') return;
- str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
- (char *)g.beg);
-}
-
-static const char *GlobalFilename(const __asan_global &g) {
- const char *res = g.module_name;
- // Prefer the filename from source location, if is available.
- if (g.location)
- res = g.location->filename;
- CHECK(res);
- return res;
-}
-
-static void PrintGlobalLocation(InternalScopedString *str,
- const __asan_global &g) {
- str->append("%s", GlobalFilename(g));
- if (!g.location)
- return;
- if (g.location->line_no)
- str->append(":%d", g.location->line_no);
- if (g.location->column_no)
- str->append(":%d", g.location->column_no);
-}
-
-static void DescribeAddressRelativeToGlobal(uptr addr, uptr size,
- const __asan_global &g) {
- InternalScopedString str(4096);
- Decorator d;
- str.append("%s", d.Location());
- if (addr < g.beg) {
- str.append("%p is located %zd bytes to the left", (void *)addr,
- g.beg - addr);
- } else if (addr + size > g.beg + g.size) {
- if (addr < g.beg + g.size)
- addr = g.beg + g.size;
- str.append("%p is located %zd bytes to the right", (void *)addr,
- addr - (g.beg + g.size));
- } else {
- // Can it happen?
- str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
- }
- str.append(" of global variable '%s' defined in '",
- MaybeDemangleGlobalName(g.name));
- PrintGlobalLocation(&str, g);
- str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
- str.append("%s", d.EndLocation());
- PrintGlobalNameIfASCII(&str, g);
- Printf("%s", str.data());
-}
-
-static bool DescribeAddressIfGlobal(uptr addr, uptr size,
- const char *bug_type) {
- // Assume address is close to at most four globals.
- const int kMaxGlobalsInReport = 4;
- __asan_global globals[kMaxGlobalsInReport];
- u32 reg_sites[kMaxGlobalsInReport];
- int globals_num =
- GetGlobalsForAddress(addr, globals, reg_sites, ARRAY_SIZE(globals));
- if (globals_num == 0)
- return false;
- for (int i = 0; i < globals_num; i++) {
- DescribeAddressRelativeToGlobal(addr, size, globals[i]);
- if (0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
- reg_sites[i]) {
- Printf(" registered at:\n");
- StackDepotGet(reg_sites[i]).Print();
- }
- }
- return true;
-}
-
-bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr, bool print) {
- if (AddrIsInMem(addr))
- return false;
- const char *area_type = nullptr;
- if (AddrIsInShadowGap(addr)) area_type = "shadow gap";
- else if (AddrIsInHighShadow(addr)) area_type = "high shadow";
- else if (AddrIsInLowShadow(addr)) area_type = "low shadow";
- if (area_type != nullptr) {
- if (print) {
- Printf("Address %p is located in the %s area.\n", addr, area_type);
- } else {
- CHECK(descr);
- descr->region_kind = area_type;
- }
- return true;
- }
- CHECK(0 && "Address is not in memory and not in shadow?");
- return false;
-}
-
-// Return " (thread_name) " or an empty string if the name is empty.
-const char *ThreadNameWithParenthesis(AsanThreadContext *t, char buff[],
- uptr buff_len) {
- const char *name = t->name;
- if (name[0] == '\0') return "";
- buff[0] = 0;
- internal_strncat(buff, " (", 3);
- internal_strncat(buff, name, buff_len - 4);
- internal_strncat(buff, ")", 2);
- return buff;
-}
-
-const char *ThreadNameWithParenthesis(u32 tid, char buff[],
- uptr buff_len) {
- if (tid == kInvalidTid) return "";
- asanThreadRegistry().CheckLocked();
- AsanThreadContext *t = GetThreadContextByTidLocked(tid);
- return ThreadNameWithParenthesis(t, buff, buff_len);
-}
-
-static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
- uptr access_size, uptr prev_var_end,
- uptr next_var_beg) {
- uptr var_end = var.beg + var.size;
- uptr addr_end = addr + access_size;
- const char *pos_descr = nullptr;
- // If the variable [var.beg, var_end) is the nearest variable to the
- // current memory access, indicate it in the log.
- if (addr >= var.beg) {
- if (addr_end <= var_end)
- pos_descr = "is inside"; // May happen if this is a use-after-return.
- else if (addr < var_end)
- pos_descr = "partially overflows";
- else if (addr_end <= next_var_beg &&
- next_var_beg - addr_end >= addr - var_end)
- pos_descr = "overflows";
- } else {
- if (addr_end > var.beg)
- pos_descr = "partially underflows";
- else if (addr >= prev_var_end &&
- addr - prev_var_end >= var.beg - addr_end)
- pos_descr = "underflows";
- }
- InternalScopedString str(1024);
- str.append(" [%zd, %zd)", var.beg, var_end);
- // Render variable name.
- str.append(" '");
- for (uptr i = 0; i < var.name_len; ++i) {
- str.append("%c", var.name_pos[i]);
- }
- str.append("'");
- if (pos_descr) {
- Decorator d;
- // FIXME: we may want to also print the size of the access here,
- // but in case of accesses generated by memset it may be confusing.
- str.append("%s <== Memory access at offset %zd %s this variable%s\n",
- d.Location(), addr, pos_descr, d.EndLocation());
- } else {
- str.append("\n");
- }
- Printf("%s", str.data());
-}
-
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars) {
CHECK(frame_descr);
@@ -437,195 +107,17 @@ bool ParseFrameDescription(const char *frame_descr,
return true;
}
-bool DescribeAddressIfStack(uptr addr, uptr access_size) {
- AsanThread *t = FindThreadByStackAddress(addr);
- if (!t) return false;
-
- Decorator d;
- char tname[128];
- Printf("%s", d.Location());
- Printf("Address %p is located in stack of thread T%d%s", addr, t->tid(),
- ThreadNameWithParenthesis(t->tid(), tname, sizeof(tname)));
-
- // Try to fetch precise stack frame for this access.
- AsanThread::StackFrameAccess access;
- if (!t->GetStackFrameAccessByAddr(addr, &access)) {
- Printf("%s\n", d.EndLocation());
- return true;
- }
- Printf(" at offset %zu in frame%s\n", access.offset, d.EndLocation());
-
- // Now we print the frame where the alloca has happened.
- // We print this frame as a stack trace with one element.
- // The symbolizer may print more than one frame if inlining was involved.
- // The frame numbers may be different than those in the stack trace printed
- // previously. That's unfortunate, but I have no better solution,
- // especially given that the alloca may be from entirely different place
- // (e.g. use-after-scope, or different thread's stack).
-#if defined(__powerpc64__) && defined(__BIG_ENDIAN__)
- // On PowerPC64 ELFv1, the address of a function actually points to a
- // three-doubleword data structure with the first field containing
- // the address of the function's code.
- access.frame_pc = *reinterpret_cast<uptr *>(access.frame_pc);
-#endif
- access.frame_pc += 16;
- Printf("%s", d.EndLocation());
- StackTrace alloca_stack(&access.frame_pc, 1);
- alloca_stack.Print();
-
- InternalMmapVector<StackVarDescr> vars(16);
- if (!ParseFrameDescription(access.frame_descr, &vars)) {
- Printf("AddressSanitizer can't parse the stack frame "
- "descriptor: |%s|\n", access.frame_descr);
- // 'addr' is a stack address, so return true even if we can't parse frame
- return true;
- }
- uptr n_objects = vars.size();
- // Report the number of stack objects.
- Printf(" This frame has %zu object(s):\n", n_objects);
-
- // Report all objects in this frame.
- for (uptr i = 0; i < n_objects; i++) {
- uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
- uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
- PrintAccessAndVarIntersection(vars[i], access.offset, access_size,
- prev_var_end, next_var_beg);
- }
- Printf("HINT: this may be a false positive if your program uses "
- "some custom stack unwind mechanism or swapcontext\n");
- if (SANITIZER_WINDOWS)
- Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
- else
- Printf(" (longjmp and C++ exceptions *are* supported)\n");
-
- DescribeThread(t);
- return true;
-}
-
-static void DescribeAccessToHeapChunk(AsanChunkView chunk, uptr addr,
- uptr access_size) {
- sptr offset;
- Decorator d;
- InternalScopedString str(4096);
- str.append("%s", d.Location());
- if (chunk.AddrIsAtLeft(addr, access_size, &offset)) {
- str.append("%p is located %zd bytes to the left of", (void *)addr, offset);
- } else if (chunk.AddrIsAtRight(addr, access_size, &offset)) {
- if (offset < 0) {
- addr -= offset;
- offset = 0;
- }
- str.append("%p is located %zd bytes to the right of", (void *)addr, offset);
- } else if (chunk.AddrIsInside(addr, access_size, &offset)) {
- str.append("%p is located %zd bytes inside of", (void*)addr, offset);
- } else {
- str.append("%p is located somewhere around (this is AddressSanitizer bug!)",
- (void *)addr);
- }
- str.append(" %zu-byte region [%p,%p)\n", chunk.UsedSize(),
- (void *)(chunk.Beg()), (void *)(chunk.End()));
- str.append("%s", d.EndLocation());
- Printf("%s", str.data());
-}
-
-void DescribeHeapAddress(uptr addr, uptr access_size) {
- AsanChunkView chunk = FindHeapChunkByAddress(addr);
- if (!chunk.IsValid()) {
- Printf("AddressSanitizer can not describe address in more detail "
- "(wild memory access suspected).\n");
- return;
- }
- DescribeAccessToHeapChunk(chunk, addr, access_size);
- CHECK(chunk.AllocTid() != kInvalidTid);
- asanThreadRegistry().CheckLocked();
- AsanThreadContext *alloc_thread =
- GetThreadContextByTidLocked(chunk.AllocTid());
- StackTrace alloc_stack = chunk.GetAllocStack();
- char tname[128];
- Decorator d;
- AsanThreadContext *free_thread = nullptr;
- if (chunk.FreeTid() != kInvalidTid) {
- free_thread = GetThreadContextByTidLocked(chunk.FreeTid());
- Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
- free_thread->tid,
- ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
- d.EndAllocation());
- StackTrace free_stack = chunk.GetFreeStack();
- free_stack.Print();
- Printf("%spreviously allocated by thread T%d%s here:%s\n",
- d.Allocation(), alloc_thread->tid,
- ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
- d.EndAllocation());
- } else {
- Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
- alloc_thread->tid,
- ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
- d.EndAllocation());
- }
- alloc_stack.Print();
- DescribeThread(GetCurrentThread());
- if (free_thread)
- DescribeThread(free_thread);
- DescribeThread(alloc_thread);
-}
-
-static void DescribeAddress(uptr addr, uptr access_size, const char *bug_type) {
- // Check if this is shadow or shadow gap.
- if (DescribeAddressIfShadow(addr))
- return;
- CHECK(AddrIsInMem(addr));
- if (DescribeAddressIfGlobal(addr, access_size, bug_type))
- return;
- if (DescribeAddressIfStack(addr, access_size))
- return;
- // Assume it is a heap address.
- DescribeHeapAddress(addr, access_size);
-}
-
-// ------------------- Thread description -------------------- {{{1
-
-void DescribeThread(AsanThreadContext *context) {
- CHECK(context);
- asanThreadRegistry().CheckLocked();
- // No need to announce the main thread.
- if (context->tid == 0 || context->announced) {
- return;
- }
- context->announced = true;
- char tname[128];
- InternalScopedString str(1024);
- str.append("Thread T%d%s", context->tid,
- ThreadNameWithParenthesis(context->tid, tname, sizeof(tname)));
- if (context->parent_tid == kInvalidTid) {
- str.append(" created by unknown thread\n");
- Printf("%s", str.data());
- return;
- }
- str.append(
- " created by T%d%s here:\n", context->parent_tid,
- ThreadNameWithParenthesis(context->parent_tid, tname, sizeof(tname)));
- Printf("%s", str.data());
- StackDepotGet(context->stack_id).Print();
- // Recursively described parent thread if needed.
- if (flags()->print_full_thread_history) {
- AsanThreadContext *parent_context =
- GetThreadContextByTidLocked(context->parent_tid);
- DescribeThread(parent_context);
- }
-}
-
// -------------------- Different kinds of reports ----------------- {{{1
// Use ScopedInErrorReport to run common actions just before and
// immediately after printing error report.
class ScopedInErrorReport {
public:
- explicit ScopedInErrorReport(ReportData *report = nullptr,
- bool fatal = false) {
+ explicit ScopedInErrorReport(bool fatal = false) {
halt_on_error_ = fatal || flags()->halt_on_error;
if (lock_.TryLock()) {
- StartReporting(report);
+ StartReporting();
return;
}
@@ -667,10 +159,13 @@ class ScopedInErrorReport {
lock_.Lock();
}
- StartReporting(report);
+ StartReporting();
}
~ScopedInErrorReport() {
+ ASAN_ON_ERROR();
+ if (current_error_.IsValid()) current_error_.Print();
+
// Make sure the current thread is announced.
DescribeThread(GetCurrentThread());
// We may want to grab this lock again when printing stats.
@@ -678,9 +173,30 @@ class ScopedInErrorReport {
// Print memory stats.
if (flags()->print_stats)
__asan_print_accumulated_stats();
+
+ if (common_flags()->print_cmdline)
+ PrintCmdline();
+
+ // Copy the message buffer so that we could start logging without holding a
+ // lock that gets aquired during printing.
+ InternalScopedBuffer<char> buffer_copy(kErrorMessageBufferSize);
+ {
+ BlockingMutexLock l(&error_message_buf_mutex);
+ internal_memcpy(buffer_copy.data(),
+ error_message_buffer, kErrorMessageBufferSize);
+ }
+
+ LogFullErrorReport(buffer_copy.data());
+
if (error_report_callback) {
- error_report_callback(error_message_buffer);
+ error_report_callback(buffer_copy.data());
}
+
+ // In halt_on_error = false mode, reset the current error object (before
+ // unlocking).
+ if (!halt_on_error_)
+ internal_memset(&current_error_, 0, sizeof(current_error_));
+
CommonSanitizerReportMutex.Unlock();
reporting_thread_tid_ = kInvalidTid;
lock_.Unlock();
@@ -690,11 +206,18 @@ class ScopedInErrorReport {
}
}
+ void ReportError(const ErrorDescription &description) {
+ // Can only report one error per ScopedInErrorReport.
+ CHECK_EQ(current_error_.kind, kErrorKindInvalid);
+ current_error_ = description;
+ }
+
+ static ErrorDescription &CurrentError() {
+ return current_error_;
+ }
+
private:
- void StartReporting(ReportData *report) {
- if (report) report_data = *report;
- report_happened = true;
- ASAN_ON_ERROR();
+ void StartReporting() {
// Make sure the registry and sanitizer report mutexes are locked while
// we're printing an error report.
// We can lock them only here to avoid self-deadlock in case of
@@ -708,154 +231,69 @@ class ScopedInErrorReport {
static StaticSpinMutex lock_;
static u32 reporting_thread_tid_;
+ // Error currently being reported. This enables the destructor to interact
+ // with the debugger and point it to an error description.
+ static ErrorDescription current_error_;
bool halt_on_error_;
};
StaticSpinMutex ScopedInErrorReport::lock_;
-u32 ScopedInErrorReport::reporting_thread_tid_;
+u32 ScopedInErrorReport::reporting_thread_tid_ = kInvalidTid;
+ErrorDescription ScopedInErrorReport::current_error_;
void ReportStackOverflow(const SignalContext &sig) {
- ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report(
- "ERROR: AddressSanitizer: stack-overflow on address %p"
- " (pc %p bp %p sp %p T%d)\n",
- (void *)sig.addr, (void *)sig.pc, (void *)sig.bp, (void *)sig.sp,
- GetCurrentTidOrInvalid());
- Printf("%s", d.EndWarning());
- GET_STACK_TRACE_SIGNAL(sig);
- stack.Print();
- ReportErrorSummary("stack-overflow", &stack);
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorStackOverflow error(GetCurrentTidOrInvalid(), sig);
+ in_report.ReportError(error);
}
-void ReportDeadlySignal(const char *description, const SignalContext &sig) {
- ScopedInErrorReport in_report(/*report*/nullptr, /*fatal*/true);
- Decorator d;
- Printf("%s", d.Warning());
- Report(
- "ERROR: AddressSanitizer: %s on unknown address %p"
- " (pc %p bp %p sp %p T%d)\n",
- description, (void *)sig.addr, (void *)sig.pc, (void *)sig.bp,
- (void *)sig.sp, GetCurrentTidOrInvalid());
- if (sig.pc < GetPageSizeCached()) {
- Report("Hint: pc points to the zero page.\n");
- }
- Printf("%s", d.EndWarning());
- GET_STACK_TRACE_SIGNAL(sig);
- stack.Print();
- MaybeDumpInstructionBytes(sig.pc);
- Printf("AddressSanitizer can not provide additional info.\n");
- ReportErrorSummary(description, &stack);
+void ReportDeadlySignal(int signo, const SignalContext &sig) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig, signo);
+ in_report.ReportError(error);
}
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: attempting double-free on %p in "
- "thread T%d%s:\n",
- addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("double-free", &stack);
+ ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr);
+ in_report.ReportError(error);
}
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: new-delete-type-mismatch on %p in "
- "thread T%d%s:\n",
- addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
- Printf(" size of the allocated type: %zd bytes;\n"
- " size of the deallocated type: %zd bytes.\n",
- asan_mz_size(reinterpret_cast<void*>(addr)), delete_size);
- CHECK_GT(free_stack->size, 0);
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("new-delete-type-mismatch", &stack);
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+ ErrorNewDeleteSizeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
+ delete_size);
+ in_report.ReportError(error);
}
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- char tname[128];
- u32 curr_tid = GetCurrentTidOrInvalid();
- Report("ERROR: AddressSanitizer: attempting free on address "
- "which was not malloc()-ed: %p in thread T%d%s\n", addr,
- curr_tid, ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-free", &stack);
+ ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr);
+ in_report.ReportError(error);
}
void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
AllocType alloc_type,
AllocType dealloc_type) {
- static const char *alloc_names[] =
- {"INVALID", "malloc", "operator new", "operator new []"};
- static const char *dealloc_names[] =
- {"INVALID", "free", "operator delete", "operator delete []"};
- CHECK_NE(alloc_type, dealloc_type);
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: alloc-dealloc-mismatch (%s vs %s) on %p\n",
- alloc_names[alloc_type], dealloc_names[dealloc_type], addr);
- Printf("%s", d.EndWarning());
- CHECK_GT(free_stack->size, 0);
- GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
- stack.Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("alloc-dealloc-mismatch", &stack);
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
+ ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
+ alloc_type, dealloc_type);
+ in_report.ReportError(error);
}
void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: attempting to call "
- "malloc_usable_size() for pointer which is "
- "not owned: %p\n", addr);
- Printf("%s", d.EndWarning());
- stack->Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-malloc_usable_size", stack);
+ ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr);
+ in_report.ReportError(error);
}
void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: attempting to call "
- "__sanitizer_get_allocated_size() for pointer which is "
- "not owned: %p\n", addr);
- Printf("%s", d.EndWarning());
- stack->Print();
- DescribeHeapAddress(addr, 1);
- ReportErrorSummary("bad-__sanitizer_get_allocated_size", stack);
+ ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack,
+ addr);
+ in_report.ReportError(error);
}
void ReportStringFunctionMemoryRangesOverlap(const char *function,
@@ -863,94 +301,43 @@ void ReportStringFunctionMemoryRangesOverlap(const char *function,
const char *offset2, uptr length2,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- char bug_type[100];
- internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s: "
- "memory ranges [%p,%p) and [%p, %p) overlap\n", \
- bug_type, offset1, offset1 + length1, offset2, offset2 + length2);
- Printf("%s", d.EndWarning());
- stack->Print();
- DescribeAddress((uptr)offset1, length1, bug_type);
- DescribeAddress((uptr)offset2, length2, bug_type);
- ReportErrorSummary(bug_type, stack);
+ ErrorStringFunctionMemoryRangesOverlap error(
+ GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2,
+ length2, function);
+ in_report.ReportError(error);
}
void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Decorator d;
- const char *bug_type = "negative-size-param";
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s: (size=%zd)\n", bug_type, size);
- Printf("%s", d.EndWarning());
- stack->Print();
- DescribeAddress(offset, size, bug_type);
- ReportErrorSummary(bug_type, stack);
+ ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset,
+ size);
+ in_report.ReportError(error);
}
void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
uptr old_mid, uptr new_mid,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
- Report("ERROR: AddressSanitizer: bad parameters to "
- "__sanitizer_annotate_contiguous_container:\n"
- " beg : %p\n"
- " end : %p\n"
- " old_mid : %p\n"
- " new_mid : %p\n",
- beg, end, old_mid, new_mid);
- uptr granularity = SHADOW_GRANULARITY;
- if (!IsAligned(beg, granularity))
- Report("ERROR: beg is not aligned by %d\n", granularity);
- stack->Print();
- ReportErrorSummary("bad-__sanitizer_annotate_contiguous_container", stack);
+ ErrorBadParamsToAnnotateContiguousContainer error(
+ GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid);
+ in_report.ReportError(error);
}
void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2) {
ScopedInErrorReport in_report;
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: odr-violation (%p):\n", g1->beg);
- Printf("%s", d.EndWarning());
- InternalScopedString g1_loc(256), g2_loc(256);
- PrintGlobalLocation(&g1_loc, *g1);
- PrintGlobalLocation(&g2_loc, *g2);
- Printf(" [1] size=%zd '%s' %s\n", g1->size,
- MaybeDemangleGlobalName(g1->name), g1_loc.data());
- Printf(" [2] size=%zd '%s' %s\n", g2->size,
- MaybeDemangleGlobalName(g2->name), g2_loc.data());
- if (stack_id1 && stack_id2) {
- Printf("These globals were registered at these points:\n");
- Printf(" [1]:\n");
- StackDepotGet(stack_id1).Print();
- Printf(" [2]:\n");
- StackDepotGet(stack_id2).Print();
- }
- Report("HINT: if you don't care about these errors you may set "
- "ASAN_OPTIONS=detect_odr_violation=0\n");
- InternalScopedString error_msg(256);
- error_msg.append("odr-violation: global '%s' at %s",
- MaybeDemangleGlobalName(g1->name), g1_loc.data());
- ReportErrorSummary(error_msg.data());
+ ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2,
+ stack_id2);
+ in_report.ReportError(error);
}
// ----------------------- CheckForInvalidPointerPair ----------- {{{1
-static NOINLINE void
-ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, uptr a1, uptr a2) {
+static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp,
+ uptr a1, uptr a2) {
ScopedInErrorReport in_report;
- const char *bug_type = "invalid-pointer-pair";
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: invalid-pointer-pair: %p %p\n", a1, a2);
- Printf("%s", d.EndWarning());
- GET_STACK_TRACE_FATAL(pc, bp);
- stack.Print();
- DescribeAddress(a1, 1, bug_type);
- DescribeAddress(a2, 1, bug_type);
- ReportErrorSummary(bug_type, &stack);
+ ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2);
+ in_report.ReportError(error);
}
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
@@ -959,26 +346,15 @@ static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
uptr a2 = reinterpret_cast<uptr>(p2);
AsanChunkView chunk1 = FindHeapChunkByAddress(a1);
AsanChunkView chunk2 = FindHeapChunkByAddress(a2);
- bool valid1 = chunk1.IsValid();
- bool valid2 = chunk2.IsValid();
- if ((valid1 != valid2) || (valid1 && valid2 && !chunk1.Eq(chunk2))) {
- GET_CALLER_PC_BP_SP; \
+ bool valid1 = chunk1.IsAllocated();
+ bool valid2 = chunk2.IsAllocated();
+ if (!valid1 || !valid2 || !chunk1.Eq(chunk2)) {
+ GET_CALLER_PC_BP_SP;
return ReportInvalidPointerPair(pc, bp, sp, a1, a2);
}
}
// ----------------------- Mac-specific reports ----------------- {{{1
-void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
- BufferedStackTrace *stack) {
- // Just print a warning here.
- Printf("free_common(%p) -- attempting to free unallocated memory.\n"
- "AddressSanitizer is ignoring this error on Mac OS now.\n",
- addr);
- PrintZoneForPointer(addr, zone_ptr, zone_name);
- stack->Print();
- DescribeHeapAddress(addr, 1);
-}
-
void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
BufferedStackTrace *stack) {
ScopedInErrorReport in_report;
@@ -987,22 +363,26 @@ void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
addr);
PrintZoneForPointer(addr, zone_ptr, zone_name);
stack->Print();
- DescribeHeapAddress(addr, 1);
+ DescribeAddressIfHeap(addr);
}
-void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
- BufferedStackTrace *stack) {
- ScopedInErrorReport in_report;
- Printf("cf_realloc(%p) -- attempting to realloc unallocated memory.\n"
- "This is an unrecoverable problem, exiting now.\n",
- addr);
- PrintZoneForPointer(addr, zone_ptr, zone_name);
- stack->Print();
- DescribeHeapAddress(addr, 1);
+// -------------- SuppressErrorReport -------------- {{{1
+// Avoid error reports duplicating for ASan recover mode.
+static bool SuppressErrorReport(uptr pc) {
+ if (!common_flags()->suppress_equal_pcs) return false;
+ for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) {
+ uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]);
+ if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp,
+ pc, memory_order_relaxed))
+ return false;
+ if (cmp == pc) return true;
+ }
+ Die();
}
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal) {
+ if (!fatal && SuppressErrorReport(pc)) return;
ENABLE_FRAME_POINTER;
// Optimization experiments.
@@ -1014,87 +394,10 @@ void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
// The reaction to a non-zero value of exp is to be defined.
(void)exp;
- // Determine the error type.
- const char *bug_descr = "unknown-crash";
- if (AddrIsInMem(addr)) {
- u8 *shadow_addr = (u8*)MemToShadow(addr);
- // If we are accessing 16 bytes, look at the second shadow byte.
- if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY)
- shadow_addr++;
- // If we are in the partial right redzone, look at the next shadow byte.
- if (*shadow_addr > 0 && *shadow_addr < 128)
- shadow_addr++;
- switch (*shadow_addr) {
- case kAsanHeapLeftRedzoneMagic:
- case kAsanHeapRightRedzoneMagic:
- case kAsanArrayCookieMagic:
- bug_descr = "heap-buffer-overflow";
- break;
- case kAsanHeapFreeMagic:
- bug_descr = "heap-use-after-free";
- break;
- case kAsanStackLeftRedzoneMagic:
- bug_descr = "stack-buffer-underflow";
- break;
- case kAsanInitializationOrderMagic:
- bug_descr = "initialization-order-fiasco";
- break;
- case kAsanStackMidRedzoneMagic:
- case kAsanStackRightRedzoneMagic:
- case kAsanStackPartialRedzoneMagic:
- bug_descr = "stack-buffer-overflow";
- break;
- case kAsanStackAfterReturnMagic:
- bug_descr = "stack-use-after-return";
- break;
- case kAsanUserPoisonedMemoryMagic:
- bug_descr = "use-after-poison";
- break;
- case kAsanContiguousContainerOOBMagic:
- bug_descr = "container-overflow";
- break;
- case kAsanStackUseAfterScopeMagic:
- bug_descr = "stack-use-after-scope";
- break;
- case kAsanGlobalRedzoneMagic:
- bug_descr = "global-buffer-overflow";
- break;
- case kAsanIntraObjectRedzone:
- bug_descr = "intra-object-overflow";
- break;
- case kAsanAllocaLeftMagic:
- case kAsanAllocaRightMagic:
- bug_descr = "dynamic-stack-buffer-overflow";
- break;
- }
- }
-
- ReportData report = { pc, sp, bp, addr, (bool)is_write, access_size,
- bug_descr };
- ScopedInErrorReport in_report(&report, fatal);
-
- Decorator d;
- Printf("%s", d.Warning());
- Report("ERROR: AddressSanitizer: %s on address "
- "%p at pc %p bp %p sp %p\n",
- bug_descr, (void*)addr, pc, bp, sp);
- Printf("%s", d.EndWarning());
-
- u32 curr_tid = GetCurrentTidOrInvalid();
- char tname[128];
- Printf("%s%s of size %zu at %p thread T%d%s%s\n",
- d.Access(),
- access_size ? (is_write ? "WRITE" : "READ") : "ACCESS",
- access_size, (void*)addr, curr_tid,
- ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)),
- d.EndAccess());
-
- GET_STACK_TRACE_FATAL(pc, bp);
- stack.Print();
-
- DescribeAddress(addr, access_size, bug_descr);
- ReportErrorSummary(bug_descr, &stack);
- PrintShadowMemoryForAddress(addr);
+ ScopedInErrorReport in_report(fatal);
+ ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write,
+ access_size);
+ in_report.ReportError(error);
}
} // namespace __asan
@@ -1110,52 +413,62 @@ void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
}
void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
+ BlockingMutexLock l(&error_message_buf_mutex);
error_report_callback = callback;
- if (callback) {
- error_message_buffer_size = 1 << 16;
- error_message_buffer =
- (char*)MmapOrDie(error_message_buffer_size, __func__);
- error_message_buffer_pos = 0;
- }
}
void __asan_describe_address(uptr addr) {
// Thread registry must be locked while we're describing an address.
asanThreadRegistry().Lock();
- DescribeAddress(addr, 1, "");
+ PrintAddressDescription(addr, 1, "");
asanThreadRegistry().Unlock();
}
int __asan_report_present() {
- return report_happened ? 1 : 0;
+ return ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric;
}
uptr __asan_get_report_pc() {
- return report_data.pc;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.pc;
+ return 0;
}
uptr __asan_get_report_bp() {
- return report_data.bp;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.bp;
+ return 0;
}
uptr __asan_get_report_sp() {
- return report_data.sp;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.sp;
+ return 0;
}
uptr __asan_get_report_address() {
- return report_data.addr;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError()
+ .Generic.addr_description.Address();
+ return 0;
}
int __asan_get_report_access_type() {
- return report_data.is_write ? 1 : 0;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.is_write;
+ return 0;
}
uptr __asan_get_report_access_size() {
- return report_data.access_size;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.access_size;
+ return 0;
}
const char *__asan_get_report_description() {
- return report_data.description;
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.bug_descr;
+ return nullptr;
}
extern "C" {
diff --git a/libsanitizer/asan/asan_report.h b/libsanitizer/asan/asan_report.h
index f2a18155232..111b8400153 100644
--- a/libsanitizer/asan/asan_report.h
+++ b/libsanitizer/asan/asan_report.h
@@ -23,34 +23,28 @@ struct StackVarDescr {
uptr name_len;
};
-struct AddressDescription {
- char *name;
- uptr name_size;
- uptr region_address;
- uptr region_size;
- const char *region_kind;
-};
-
// Returns the number of globals close to the provided address and copies
// them to "globals" array.
int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
int max_globals);
-bool GetInfoForAddressIfGlobal(uptr addr, AddressDescription *descr);
+
+const char *MaybeDemangleGlobalName(const char *name);
+void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
+
+void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
+ bool in_shadow, const char *after = "\n");
+
// The following functions prints address description depending
// on the memory type (shadow/heap/stack/global).
-void DescribeHeapAddress(uptr addr, uptr access_size);
-bool DescribeAddressIfShadow(uptr addr, AddressDescription *descr = nullptr,
- bool print = true);
bool ParseFrameDescription(const char *frame_descr,
InternalMmapVector<StackVarDescr> *vars);
-bool DescribeAddressIfStack(uptr addr, uptr access_size);
-void DescribeThread(AsanThreadContext *context);
// Different kinds of error reports.
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal);
void ReportStackOverflow(const SignalContext &sig);
-void ReportDeadlySignal(const char *description, const SignalContext &sig);
+void ReportDeadlySignal(int signo, const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
@@ -75,8 +69,6 @@ void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
const __asan_global *g2, u32 stack_id2);
// Mac-specific errors and warnings.
-void WarnMacFreeUnallocated(uptr addr, uptr zone_ptr, const char *zone_name,
- BufferedStackTrace *stack);
void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr,
const char *zone_name,
BufferedStackTrace *stack);
diff --git a/libsanitizer/asan/asan_rtl.cc b/libsanitizer/asan/asan_rtl.cc
index 50b02453a2b..38009d2905f 100644
--- a/libsanitizer/asan/asan_rtl.cc
+++ b/libsanitizer/asan/asan_rtl.cc
@@ -30,6 +30,7 @@
#include "ubsan/ubsan_init.h"
#include "ubsan/ubsan_platform.h"
+uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
int __asan_option_detect_stack_use_after_return; // Global interface symbol.
uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
@@ -84,8 +85,8 @@ void ShowStatsAndAbort() {
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
- CHECK_EQ((beg % GetPageSizeCached()), 0);
- CHECK_EQ(((end + 1) % GetPageSizeCached()), 0);
+ CHECK_EQ((beg % GetMmapGranularity()), 0);
+ CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size, name);
@@ -261,6 +262,7 @@ static NOINLINE void force_interface_symbols() {
volatile int fake_condition = 0; // prevent dead condition elimination.
// __asan_report_* functions are noreturn, so we need a switch to prevent
// the compiler from removing any of them.
+ // clang-format off
switch (fake_condition) {
case 1: __asan_report_load1(0); break;
case 2: __asan_report_load2(0); break;
@@ -300,7 +302,14 @@ static NOINLINE void force_interface_symbols() {
case 37: __asan_unpoison_stack_memory(0, 0); break;
case 38: __asan_region_is_poisoned(0, 0); break;
case 39: __asan_describe_address(0); break;
+ case 40: __asan_set_shadow_00(0, 0); break;
+ case 41: __asan_set_shadow_f1(0, 0); break;
+ case 42: __asan_set_shadow_f2(0, 0); break;
+ case 43: __asan_set_shadow_f3(0, 0); break;
+ case 44: __asan_set_shadow_f5(0, 0); break;
+ case 45: __asan_set_shadow_f8(0, 0); break;
}
+ // clang-format on
}
static void asan_atexit() {
@@ -318,26 +327,39 @@ static void InitializeHighMemEnd() {
kHighMemEnd = GetMaxVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
- kHighMemEnd |= SHADOW_GRANULARITY * GetPageSizeCached() - 1;
+ kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
#endif // !ASAN_FIXED_MAPPING
- CHECK_EQ((kHighMemBeg % GetPageSizeCached()), 0);
+ CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
}
static void ProtectGap(uptr addr, uptr size) {
- if (!flags()->protect_shadow_gap)
+ if (!flags()->protect_shadow_gap) {
+ // The shadow gap is unprotected, so there is a chance that someone
+ // is actually using this memory. Which means it needs a shadow...
+ uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
+ uptr GapShadowEnd =
+ RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
+ if (Verbosity())
+ Printf("protect_shadow_gap=0:"
+ " not protecting shadow gap, allocating gap's shadow\n"
+ "|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
+ GapShadowEnd);
+ ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
+ "unprotected gap shadow");
return;
- void *res = MmapNoAccess(addr, size, "shadow gap");
+ }
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
if (addr == kZeroBaseShadowStart) {
- uptr step = GetPageSizeCached();
+ uptr step = GetMmapGranularity();
while (size > step && addr < kZeroBaseMaxShadowStart) {
addr += step;
size -= step;
- void *res = MmapNoAccess(addr, size, "shadow gap");
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
}
@@ -413,10 +435,13 @@ static void AsanInitInternal() {
AsanCheckIncompatibleRT();
AsanCheckDynamicRTPrereqs();
+ AvoidCVE_2016_2143();
SetCanPoisonMemory(flags()->poison_heap);
SetMallocContextSize(common_flags()->malloc_context_size);
+ InitializePlatformExceptionHandlers();
+
InitializeHighMemEnd();
// Make sure we are not statically linked.
@@ -429,7 +454,6 @@ static void AsanInitInternal() {
__sanitizer_set_report_path(common_flags()->log_path);
- // Enable UAR detection, if required.
__asan_option_detect_stack_use_after_return =
flags()->detect_stack_use_after_return;
@@ -448,7 +472,30 @@ static void AsanInitInternal() {
ReplaceSystemMalloc();
+ // Set the shadow memory address to uninitialized.
+ __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+
uptr shadow_start = kLowShadowBeg;
+ // Detect if a dynamic shadow address must used and find a available location
+ // when necessary. When dynamic address is used, the macro |kLowShadowBeg|
+ // expands to |__asan_shadow_memory_dynamic_address| which is
+ // |kDefaultShadowSentinel|.
+ if (shadow_start == kDefaultShadowSentinel) {
+ __asan_shadow_memory_dynamic_address = 0;
+ CHECK_EQ(0, kLowShadowBeg);
+
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = 8 * granularity;
+ uptr left_padding = granularity;
+ uptr space_size = kHighShadowEnd + left_padding;
+
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ }
+ // Update the shadow memory address (potentially) used by instrumentation.
+ __asan_shadow_memory_dynamic_address = shadow_start;
+
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
@@ -537,12 +584,12 @@ static void AsanInitInternal() {
force_interface_symbols(); // no-op.
SanitizerInitializeUnwinder();
-#if CAN_SANITIZE_LEAKS
- __lsan::InitCommonLsan();
- if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
- Atexit(__lsan::DoLeakCheck);
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::InitCommonLsan();
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ Atexit(__lsan::DoLeakCheck);
+ }
}
-#endif // CAN_SANITIZE_LEAKS
#if CAN_SANITIZE_UB
__ubsan::InitAsPlugin();
@@ -550,6 +597,15 @@ static void AsanInitInternal() {
InitializeSuppressions();
+ if (CAN_SANITIZE_LEAKS) {
+ // LateInitialize() calls dlsym, which can allocate an error string buffer
+ // in the TLS. Let's ignore the allocation to avoid reporting a leak.
+ __lsan::ScopedInterceptorDisabler disabler;
+ Symbolizer::LateInitialize();
+ } else {
+ Symbolizer::LateInitialize();
+ }
+
VReport(1, "AddressSanitizer Init done\n");
}
@@ -579,6 +635,9 @@ static AsanInitializer asan_initializer;
using namespace __asan; // NOLINT
void NOINLINE __asan_handle_no_return() {
+ if (asan_init_is_running)
+ return;
+
int local_stack;
AsanThread *curr_thread = GetCurrentThread();
uptr PageSize = GetPageSizeCached();
@@ -603,7 +662,7 @@ void NOINLINE __asan_handle_no_return() {
"stack top: %p; bottom %p; size: %p (%zd)\n"
"False positive error reports may follow\n"
"For details see "
- "http://code.google.com/p/address-sanitizer/issues/detail?id=189\n",
+ "https://github.com/google/sanitizers/issues/189\n",
top, bottom, top - bottom, top - bottom);
return;
}
diff --git a/libsanitizer/asan/asan_scariness_score.h b/libsanitizer/asan/asan_scariness_score.h
new file mode 100644
index 00000000000..d72fce69d64
--- /dev/null
+++ b/libsanitizer/asan/asan_scariness_score.h
@@ -0,0 +1,72 @@
+//===-- asan_scariness_score.h ----------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Compute the level of scariness of the error message.
+// Don't expect any deep science here, just a set of heuristics that suggest
+// that e.g. 1-byte-read-global-buffer-overflow is less scary than
+// 8-byte-write-stack-use-after-return.
+//
+// Every error report has one or more features, such as memory access size,
+// type (read or write), type of accessed memory (e.g. free-d heap, or a global
+// redzone), etc. Every such feature has an int score and a string description.
+// The overall score is the sum of all feature scores and the description
+// is a concatenation of feature descriptions.
+// Examples:
+// 17 (4-byte-read-heap-buffer-overflow)
+// 65 (multi-byte-write-stack-use-after-return)
+// 10 (null-deref)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_SCARINESS_SCORE_H
+#define ASAN_SCARINESS_SCORE_H
+
+#include "asan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+namespace __asan {
+struct ScarinessScoreBase {
+ void Clear() {
+ descr[0] = 0;
+ score = 0;
+ }
+ void Scare(int add_to_score, const char *reason) {
+ if (descr[0])
+ internal_strlcat(descr, "-", sizeof(descr));
+ internal_strlcat(descr, reason, sizeof(descr));
+ score += add_to_score;
+ };
+ int GetScore() const { return score; }
+ const char *GetDescription() const { return descr; }
+ void Print() {
+ if (score && flags()->print_scariness)
+ Printf("SCARINESS: %d (%s)\n", score, descr);
+ }
+ static void PrintSimple(int score, const char *descr) {
+ ScarinessScoreBase SSB;
+ SSB.Clear();
+ SSB.Scare(score, descr);
+ SSB.Print();
+ }
+
+ private:
+ int score;
+ char descr[1024];
+};
+
+struct ScarinessScore : ScarinessScoreBase {
+ ScarinessScore() {
+ Clear();
+ }
+};
+
+} // namespace __asan
+
+#endif // ASAN_SCARINESS_SCORE_H
diff --git a/libsanitizer/asan/asan_stack.h b/libsanitizer/asan/asan_stack.h
index 30dc5921303..2b8738156ea 100644
--- a/libsanitizer/asan/asan_stack.h
+++ b/libsanitizer/asan/asan_stack.h
@@ -46,7 +46,10 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
uptr stack_top = t->stack_top();
uptr stack_bottom = t->stack_bottom();
ScopedUnwinding unwind_scope(t);
- stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom, fast);
+ if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
+ stack->Unwind(max_depth, pc, bp, context, stack_top, stack_bottom,
+ fast);
+ }
} else if (!t && !fast) {
/* If GetCurrentThread() has failed, try to do slow unwind anyways. */
stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
diff --git a/libsanitizer/asan/asan_suppressions.cc b/libsanitizer/asan/asan_suppressions.cc
index c94fff0500a..1dc9d474240 100644
--- a/libsanitizer/asan/asan_suppressions.cc
+++ b/libsanitizer/asan/asan_suppressions.cc
@@ -87,6 +87,7 @@ bool IsStackTraceSuppressed(const StackTrace *stack) {
if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
+ CHECK(frames);
for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
const char *function_name = cur->info.function;
if (!function_name) {
diff --git a/libsanitizer/asan/asan_thread.cc b/libsanitizer/asan/asan_thread.cc
index 92f968bdee4..818e1261400 100644
--- a/libsanitizer/asan/asan_thread.cc
+++ b/libsanitizer/asan/asan_thread.cc
@@ -118,6 +118,77 @@ void AsanThread::Destroy() {
DTLS_Destroy();
}
+void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
+ uptr size) {
+ if (atomic_load(&stack_switching_, memory_order_relaxed)) {
+ Report("ERROR: starting fiber switch while in fiber switch\n");
+ Die();
+ }
+
+ next_stack_bottom_ = bottom;
+ next_stack_top_ = bottom + size;
+ atomic_store(&stack_switching_, 1, memory_order_release);
+
+ FakeStack *current_fake_stack = fake_stack_;
+ if (fake_stack_save)
+ *fake_stack_save = fake_stack_;
+ fake_stack_ = nullptr;
+ SetTLSFakeStack(nullptr);
+ // if fake_stack_save is null, the fiber will die, delete the fakestack
+ if (!fake_stack_save && current_fake_stack)
+ current_fake_stack->Destroy(this->tid());
+}
+
+void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
+ uptr *bottom_old,
+ uptr *size_old) {
+ if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
+ Report("ERROR: finishing a fiber switch that has not started\n");
+ Die();
+ }
+
+ if (fake_stack_save) {
+ SetTLSFakeStack(fake_stack_save);
+ fake_stack_ = fake_stack_save;
+ }
+
+ if (bottom_old)
+ *bottom_old = stack_bottom_;
+ if (size_old)
+ *size_old = stack_top_ - stack_bottom_;
+ stack_bottom_ = next_stack_bottom_;
+ stack_top_ = next_stack_top_;
+ atomic_store(&stack_switching_, 0, memory_order_release);
+ next_stack_top_ = 0;
+ next_stack_bottom_ = 0;
+}
+
+inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
+ if (!atomic_load(&stack_switching_, memory_order_acquire))
+ return StackBounds{stack_bottom_, stack_top_}; // NOLINT
+ char local;
+ const uptr cur_stack = (uptr)&local;
+ // Note: need to check next stack first, because FinishSwitchFiber
+ // may be in process of overwriting stack_top_/bottom_. But in such case
+ // we are already on the next stack.
+ if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
+ return StackBounds{next_stack_bottom_, next_stack_top_}; // NOLINT
+ return StackBounds{stack_bottom_, stack_top_}; // NOLINT
+}
+
+uptr AsanThread::stack_top() {
+ return GetStackBounds().top;
+}
+
+uptr AsanThread::stack_bottom() {
+ return GetStackBounds().bottom;
+}
+
+uptr AsanThread::stack_size() {
+ const auto bounds = GetStackBounds();
+ return bounds.top - bounds.bottom;
+}
+
// We want to create the FakeStack lazyly on the first use, but not eralier
// than the stack size is known and the procedure has to be async-signal safe.
FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
@@ -148,6 +219,8 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
}
void AsanThread::Init() {
+ next_stack_top_ = next_stack_bottom_ = 0;
+ atomic_store(&stack_switching_, false, memory_order_release);
fake_stack_ = nullptr; // Will be initialized lazily if needed.
CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls();
@@ -193,10 +266,12 @@ thread_return_t AsanThread::ThreadStart(
void AsanThread::SetThreadStackAndTls() {
uptr tls_size = 0;
- GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size_, &tls_begin_,
- &tls_size);
- stack_top_ = stack_bottom_ + stack_size_;
+ uptr stack_size = 0;
+ GetThreadStackAndTls(tid() == 0, const_cast<uptr *>(&stack_bottom_),
+ const_cast<uptr *>(&stack_size), &tls_begin_, &tls_size);
+ stack_top_ = stack_bottom_ + stack_size;
tls_end_ = tls_begin_ + tls_size;
+ dtls_ = DTLS_Get();
int local;
CHECK(AddrIsInStack((uptr)&local));
@@ -247,6 +322,11 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
return true;
}
+bool AsanThread::AddrIsInStack(uptr addr) {
+ const auto bounds = GetStackBounds();
+ return addr >= bounds.bottom && addr < bounds.top;
+}
+
static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
void *addr) {
AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
@@ -269,7 +349,7 @@ AsanThread *GetCurrentThread() {
// limits, so only do this magic on Android, and only if the found thread
// is the main thread.
AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
- if (ThreadStackContainsAddress(tctx, &context)) {
+ if (tctx && ThreadStackContainsAddress(tctx, &context)) {
SetCurrentThread(tctx->thread);
return tctx->thread;
}
@@ -320,8 +400,8 @@ __asan::AsanThread *GetAsanThreadByOsIDLocked(uptr os_id) {
// --- Implementation of LSan-specific functions --- {{{1
namespace __lsan {
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
- uptr *tls_begin, uptr *tls_end,
- uptr *cache_begin, uptr *cache_end) {
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
__asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
if (!t) return false;
*stack_begin = t->stack_bottom();
@@ -331,6 +411,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
// ASan doesn't keep allocator caches in TLS, so these are unused.
*cache_begin = 0;
*cache_end = 0;
+ *dtls = t->dtls();
return true;
}
@@ -353,3 +434,33 @@ void EnsureMainThreadIDIsCorrect() {
__asan::EnsureMainThreadIDIsCorrect();
}
} // namespace __lsan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
+ uptr size) {
+ AsanThread *t = GetCurrentThread();
+ if (!t) {
+ VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
+ return;
+ }
+ t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_finish_switch_fiber(void* fakestack,
+ const void **bottom_old,
+ uptr *size_old) {
+ AsanThread *t = GetCurrentThread();
+ if (!t) {
+ VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
+ return;
+ }
+ t->FinishSwitchFiber((FakeStack*)fakestack,
+ (uptr*)bottom_old,
+ (uptr*)size_old);
+}
+}
diff --git a/libsanitizer/asan/asan_thread.h b/libsanitizer/asan/asan_thread.h
index 27a3367d8e7..c51a58ad0bb 100644
--- a/libsanitizer/asan/asan_thread.h
+++ b/libsanitizer/asan/asan_thread.h
@@ -21,6 +21,10 @@
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
+namespace __sanitizer {
+struct DTLS;
+} // namespace __sanitizer
+
namespace __asan {
const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
@@ -60,11 +64,12 @@ class AsanThread {
thread_return_t ThreadStart(uptr os_id,
atomic_uintptr_t *signal_thread_is_registered);
- uptr stack_top() { return stack_top_; }
- uptr stack_bottom() { return stack_bottom_; }
- uptr stack_size() { return stack_size_; }
+ uptr stack_top();
+ uptr stack_bottom();
+ uptr stack_size();
uptr tls_begin() { return tls_begin_; }
uptr tls_end() { return tls_end_; }
+ DTLS *dtls() { return dtls_; }
u32 tid() { return context_->tid; }
AsanThreadContext *context() { return context_; }
void set_context(AsanThreadContext *context) { context_ = context; }
@@ -76,9 +81,7 @@ class AsanThread {
};
bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
- bool AddrIsInStack(uptr addr) {
- return addr >= stack_bottom_ && addr < stack_top_;
- }
+ bool AddrIsInStack(uptr addr);
void DeleteFakeStack(int tid) {
if (!fake_stack_) return;
@@ -88,13 +91,20 @@ class AsanThread {
t->Destroy(tid);
}
+ void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
+ void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
+ uptr *size_old);
+
bool has_fake_stack() {
- return (reinterpret_cast<uptr>(fake_stack_) > 1);
+ return !atomic_load(&stack_switching_, memory_order_relaxed) &&
+ (reinterpret_cast<uptr>(fake_stack_) > 1);
}
FakeStack *fake_stack() {
if (!__asan_option_detect_stack_use_after_return)
return nullptr;
+ if (atomic_load(&stack_switching_, memory_order_relaxed))
+ return nullptr;
if (!has_fake_stack())
return AsyncSignalSafeLazyInitFakeStack();
return fake_stack_;
@@ -120,16 +130,27 @@ class AsanThread {
void ClearShadowForThreadStackAndTLS();
FakeStack *AsyncSignalSafeLazyInitFakeStack();
+ struct StackBounds {
+ uptr bottom;
+ uptr top;
+ };
+ StackBounds GetStackBounds() const;
+
AsanThreadContext *context_;
thread_callback_t start_routine_;
void *arg_;
+
uptr stack_top_;
uptr stack_bottom_;
- // stack_size_ == stack_top_ - stack_bottom_;
- // It needs to be set in a async-signal-safe manner.
- uptr stack_size_;
+ // these variables are used when the thread is about to switch stack
+ uptr next_stack_top_;
+ uptr next_stack_bottom_;
+ // true if switching is in progress
+ atomic_uint8_t stack_switching_;
+
uptr tls_begin_;
uptr tls_end_;
+ DTLS *dtls_;
FakeStack *fake_stack_;
AsanThreadLocalMallocStorage malloc_storage_;
diff --git a/libsanitizer/asan/asan_win.cc b/libsanitizer/asan/asan_win.cc
index 6c12523498a..efd82bfc3b0 100644
--- a/libsanitizer/asan/asan_win.cc
+++ b/libsanitizer/asan/asan_win.cc
@@ -22,6 +22,7 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
+#include "asan_mapping.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
@@ -34,7 +35,13 @@ int __asan_should_detect_stack_use_after_return() {
return __asan_option_detect_stack_use_after_return;
}
-// -------------------- A workaround for the abscence of weak symbols ----- {{{
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_shadow_memory_dynamic_address() {
+ __asan_init();
+ return __asan_shadow_memory_dynamic_address;
+}
+
+// -------------------- A workaround for the absence of weak symbols ----- {{{
// We don't have a direct equivalent of weak symbols when using MSVC, but we can
// use the /alternatename directive to tell the linker to default a specific
// symbol to a specific value, which works nicely for allocator hooks and
@@ -44,21 +51,49 @@ void __sanitizer_default_free_hook(void *ptr) { }
const char* __asan_default_default_options() { return ""; }
const char* __asan_default_default_suppressions() { return ""; }
void __asan_default_on_error() {}
+// 64-bit msvc will not prepend an underscore for symbols.
+#ifdef _WIN64
+#pragma comment(linker, "/alternatename:__sanitizer_malloc_hook=__sanitizer_default_malloc_hook") // NOLINT
+#pragma comment(linker, "/alternatename:__sanitizer_free_hook=__sanitizer_default_free_hook") // NOLINT
+#pragma comment(linker, "/alternatename:__asan_default_options=__asan_default_default_options") // NOLINT
+#pragma comment(linker, "/alternatename:__asan_default_suppressions=__asan_default_default_suppressions") // NOLINT
+#pragma comment(linker, "/alternatename:__asan_on_error=__asan_default_on_error") // NOLINT
+#else
#pragma comment(linker, "/alternatename:___sanitizer_malloc_hook=___sanitizer_default_malloc_hook") // NOLINT
#pragma comment(linker, "/alternatename:___sanitizer_free_hook=___sanitizer_default_free_hook") // NOLINT
#pragma comment(linker, "/alternatename:___asan_default_options=___asan_default_default_options") // NOLINT
#pragma comment(linker, "/alternatename:___asan_default_suppressions=___asan_default_default_suppressions") // NOLINT
#pragma comment(linker, "/alternatename:___asan_on_error=___asan_default_on_error") // NOLINT
+#endif
// }}}
} // extern "C"
-// ---------------------- Windows-specific inteceptors ---------------- {{{
+// ---------------------- Windows-specific interceptors ---------------- {{{
+INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
+ CHECK(REAL(RtlRaiseException));
+ // This is a noreturn function, unless it's one of the exceptions raised to
+ // communicate with the debugger, such as the one from OutputDebugString.
+ if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C)
+ __asan_handle_no_return();
+ REAL(RtlRaiseException)(ExceptionRecord);
+}
+
INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
CHECK(REAL(RaiseException));
__asan_handle_no_return();
REAL(RaiseException)(a, b, c, d);
}
+#ifdef _WIN64
+
+INTERCEPTOR_WINAPI(int, __C_specific_handler, void *a, void *b, void *c, void *d) { // NOLINT
+ CHECK(REAL(__C_specific_handler));
+ __asan_handle_no_return();
+ return REAL(__C_specific_handler)(a, b, c, d);
+}
+
+#else
+
INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
CHECK(REAL(_except_handler3));
__asan_handle_no_return();
@@ -74,6 +109,7 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
__asan_handle_no_return();
return REAL(_except_handler4)(a, b, c, d);
}
+#endif
static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
AsanThread *t = (AsanThread*)arg;
@@ -99,52 +135,33 @@ INTERCEPTOR_WINAPI(DWORD, CreateThread,
asan_thread_start, t, thr_flags, tid);
}
-namespace {
-BlockingMutex mu_for_thread_tracking(LINKER_INITIALIZED);
-
-void EnsureWorkerThreadRegistered() {
- // FIXME: GetCurrentThread relies on TSD, which might not play well with
- // system thread pools. We might want to use something like reference
- // counting to zero out GetCurrentThread() underlying storage when the last
- // work item finishes? Or can we disable reclaiming of threads in the pool?
- BlockingMutexLock l(&mu_for_thread_tracking);
- if (__asan::GetCurrentThread())
- return;
-
- AsanThread *t = AsanThread::Create(
- /* start_routine */ nullptr, /* arg */ nullptr,
- /* parent_tid */ -1, /* stack */ nullptr, /* detached */ true);
- t->Init();
- asanThreadRegistry().StartThread(t->tid(), 0, 0);
- SetCurrentThread(t);
-}
-} // namespace
-
-INTERCEPTOR_WINAPI(DWORD, NtWaitForWorkViaWorkerFactory, DWORD a, DWORD b) {
- // NtWaitForWorkViaWorkerFactory is called from system worker pool threads to
- // query work scheduled by BindIoCompletionCallback, QueueUserWorkItem, etc.
- // System worker pool threads are created at arbitraty point in time and
- // without using CreateThread, so we wrap NtWaitForWorkViaWorkerFactory
- // instead and don't register a specific parent_tid/stack.
- EnsureWorkerThreadRegistered();
- return REAL(NtWaitForWorkViaWorkerFactory)(a, b);
-}
-
// }}}
namespace __asan {
void InitializePlatformInterceptors() {
ASAN_INTERCEPT_FUNC(CreateThread);
- ASAN_INTERCEPT_FUNC(RaiseException);
+
+#ifdef _WIN64
+ ASAN_INTERCEPT_FUNC(__C_specific_handler);
+#else
ASAN_INTERCEPT_FUNC(_except_handler3);
ASAN_INTERCEPT_FUNC(_except_handler4);
+#endif
+
+ // Try to intercept kernel32!RaiseException, and if that fails, intercept
+ // ntdll!RtlRaiseException instead.
+ if (!::__interception::OverrideFunction("RaiseException",
+ (uptr)WRAP(RaiseException),
+ (uptr *)&REAL(RaiseException))) {
+ CHECK(::__interception::OverrideFunction("RtlRaiseException",
+ (uptr)WRAP(RtlRaiseException),
+ (uptr *)&REAL(RtlRaiseException)));
+ }
+}
- // NtWaitForWorkViaWorkerFactory is always linked dynamically.
- CHECK(::__interception::OverrideFunction(
- "NtWaitForWorkViaWorkerFactory",
- (uptr)WRAP(NtWaitForWorkViaWorkerFactory),
- (uptr *)&REAL(NtWaitForWorkViaWorkerFactory)));
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ UNIMPLEMENTED();
}
// ---------------------- TSD ---------------- {{{
@@ -173,14 +190,6 @@ void PlatformTSDDtor(void *tsd) {
// }}}
// ---------------------- Various stuff ---------------- {{{
-void DisableReexec() {
- // No need to re-exec on Windows.
-}
-
-void MaybeReexec() {
- // No need to re-exec on Windows.
-}
-
void *AsanDoesNotSupportStaticLinkage() {
#if defined(_DEBUG)
#error Please build the runtime with a non-debug CRT: /MD or /MT
@@ -200,20 +209,95 @@ void AsanOnDeadlySignal(int, void *siginfo, void *context) {
UNIMPLEMENTED();
}
+#if SANITIZER_WINDOWS64
+// Exception handler for dealing with shadow memory.
+static LONG CALLBACK
+ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
+ uptr page_size = GetPageSizeCached();
+ // Only handle access violations.
+ if (exception_pointers->ExceptionRecord->ExceptionCode !=
+ EXCEPTION_ACCESS_VIOLATION) {
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // Only handle access violations that land within the shadow memory.
+ uptr addr =
+ (uptr)(exception_pointers->ExceptionRecord->ExceptionInformation[1]);
+
+ // Check valid shadow range.
+ if (!AddrIsInShadow(addr)) return EXCEPTION_CONTINUE_SEARCH;
+
+ // This is an access violation while trying to read from the shadow. Commit
+ // the relevant page and let execution continue.
+
+ // Determine the address of the page that is being accessed.
+ uptr page = RoundDownTo(addr, page_size);
+
+ // Query the existing page.
+ MEMORY_BASIC_INFORMATION mem_info = {};
+ if (::VirtualQuery((LPVOID)page, &mem_info, sizeof(mem_info)) == 0)
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ // Commit the page.
+ uptr result =
+ (uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE);
+ if (result != page) return EXCEPTION_CONTINUE_SEARCH;
+
+ // The page mapping succeeded, so continue execution as usual.
+ return EXCEPTION_CONTINUE_EXECUTION;
+}
+
+#endif
+
+void InitializePlatformExceptionHandlers() {
+#if SANITIZER_WINDOWS64
+ // On Win64, we map memory on demand with access violation handler.
+ // Install our exception handler.
+ CHECK(AddVectoredExceptionHandler(TRUE, &ShadowExceptionHandler));
+#endif
+}
+
static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
+// Check based on flags if we should report this exception.
+static bool ShouldReportDeadlyException(unsigned code) {
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_IN_PAGE_ERROR:
+ return common_flags()->handle_segv;
+ case EXCEPTION_BREAKPOINT:
+ case EXCEPTION_ILLEGAL_INSTRUCTION: {
+ return common_flags()->handle_sigill;
+ }
+ }
+ return false;
+}
+
+// Return the textual name for this exception.
+const char *DescribeSignalOrException(int signo) {
+ unsigned code = signo;
+ // Get the string description of the exception if this is a known deadly
+ // exception.
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ return "access-violation";
+ case EXCEPTION_IN_PAGE_ERROR:
+ return "in-page-error";
+ case EXCEPTION_BREAKPOINT:
+ return "breakpoint";
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ return "illegal-instruction";
+ }
+ return nullptr;
+}
+
static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
CONTEXT *context = info->ContextRecord;
- if (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ||
- exception_record->ExceptionCode == EXCEPTION_IN_PAGE_ERROR) {
- const char *description =
- (exception_record->ExceptionCode == EXCEPTION_ACCESS_VIOLATION)
- ? "access-violation"
- : "in-page-error";
+ if (ShouldReportDeadlyException(exception_record->ExceptionCode)) {
SignalContext sig = SignalContext::Create(exception_record, context);
- ReportDeadlySignal(description, sig);
+ ReportDeadlySignal(exception_record->ExceptionCode, sig);
}
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
@@ -248,10 +332,16 @@ int __asan_set_seh_filter() {
}
#if !ASAN_DYNAMIC
-// Put a pointer to __asan_set_seh_filter at the end of the global list
-// of C initializers, after the default EH is set by the CRT.
-#pragma section(".CRT$XIZ", long, read) // NOLINT
-__declspec(allocate(".CRT$XIZ"))
+// The CRT runs initializers in this order:
+// - C initializers, from XIA to XIZ
+// - C++ initializers, from XCA to XCZ
+// Prior to 2015, the CRT set the unhandled exception filter at priority XIY,
+// near the end of C initialization. Starting in 2015, it was moved to the
+// beginning of C++ initialization. We set our priority to XCAB to run
+// immediately after the CRT runs. This way, our exception filter is called
+// first and we can delegate to their filter if appropriate.
+#pragma section(".CRT$XCAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XCAB"))
int (*__intercept_seh)() = __asan_set_seh_filter;
#endif
// }}}
diff --git a/libsanitizer/asan/asan_win_dll_thunk.cc b/libsanitizer/asan/asan_win_dll_thunk.cc
index 691aaf36fd0..f7c9a37bf79 100644
--- a/libsanitizer/asan/asan_win_dll_thunk.cc
+++ b/libsanitizer/asan/asan_win_dll_thunk.cc
@@ -10,16 +10,16 @@
// This file defines a family of thunks that should be statically linked into
// the DLLs that have ASan instrumentation in order to delegate the calls to the
// shared runtime that lives in the main binary.
-// See https://code.google.com/p/address-sanitizer/issues/detail?id=209 for the
-// details.
+// See https://github.com/google/sanitizers/issues/209 for the details.
//===----------------------------------------------------------------------===//
-// Only compile this code when buidling asan_dll_thunk.lib
+// Only compile this code when building asan_dll_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DLL_THUNK
#include "asan_init_version.h"
#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
// ---------- Function interception helper functions and macros ----------- {{{1
extern "C" {
@@ -28,6 +28,8 @@ void *__stdcall GetProcAddress(void *module, const char *proc_name);
void abort();
}
+using namespace __sanitizer;
+
static uptr getRealProcAddressOrDie(const char *name) {
uptr ret =
__interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
@@ -196,9 +198,11 @@ static void InterceptHooks();
// Don't use the INTERFACE_FUNCTION machinery for this function as we actually
// want to call it in the __asan_init interceptor.
WRAP_W_V(__asan_should_detect_stack_use_after_return)
+WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
extern "C" {
int __asan_option_detect_stack_use_after_return;
+ uptr __asan_shadow_memory_dynamic_address;
// Manually wrap __asan_init as we need to initialize
// __asan_option_detect_stack_use_after_return afterwards.
@@ -212,7 +216,8 @@ extern "C" {
fn();
__asan_option_detect_stack_use_after_return =
(__asan_should_detect_stack_use_after_return() != 0);
-
+ __asan_shadow_memory_dynamic_address =
+ (uptr)__asan_get_shadow_memory_dynamic_address();
InterceptHooks();
}
}
@@ -255,6 +260,13 @@ INTERFACE_FUNCTION(__asan_memcpy);
INTERFACE_FUNCTION(__asan_memset);
INTERFACE_FUNCTION(__asan_memmove);
+INTERFACE_FUNCTION(__asan_set_shadow_00);
+INTERFACE_FUNCTION(__asan_set_shadow_f1);
+INTERFACE_FUNCTION(__asan_set_shadow_f2);
+INTERFACE_FUNCTION(__asan_set_shadow_f3);
+INTERFACE_FUNCTION(__asan_set_shadow_f5);
+INTERFACE_FUNCTION(__asan_set_shadow_f8);
+
INTERFACE_FUNCTION(__asan_alloca_poison);
INTERFACE_FUNCTION(__asan_allocas_unpoison);
@@ -309,8 +321,6 @@ INTERFACE_FUNCTION(__sanitizer_cov_init)
INTERFACE_FUNCTION(__sanitizer_cov_module_init)
INTERFACE_FUNCTION(__sanitizer_cov_trace_basic_block)
INTERFACE_FUNCTION(__sanitizer_cov_trace_func_enter)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_cmp)
-INTERFACE_FUNCTION(__sanitizer_cov_trace_switch)
INTERFACE_FUNCTION(__sanitizer_cov_with_check)
INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
INTERFACE_FUNCTION(__sanitizer_get_coverage_guards)
@@ -324,6 +334,8 @@ INTERFACE_FUNCTION(__sanitizer_get_total_unique_coverage)
INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
INTERFACE_FUNCTION(__sanitizer_maybe_open_cov_file)
INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
+INTERFACE_FUNCTION(__sanitizer_symbolize_global)
INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
INTERFACE_FUNCTION(__sanitizer_ptr_sub)
INTERFACE_FUNCTION(__sanitizer_report_error_summary)
@@ -333,6 +345,7 @@ INTERFACE_FUNCTION(__sanitizer_update_counter_bitset_and_clear_counters)
INTERFACE_FUNCTION(__sanitizer_sandbox_on_notify)
INTERFACE_FUNCTION(__sanitizer_set_death_callback)
INTERFACE_FUNCTION(__sanitizer_set_report_path)
+INTERFACE_FUNCTION(__sanitizer_set_report_fd)
INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
@@ -340,23 +353,31 @@ INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
+INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
// TODO(timurrrr): Add more interface functions on the as-needed basis.
// ----------------- Memory allocation functions ---------------------
WRAP_V_W(free)
+WRAP_V_W(_free_base)
WRAP_V_WW(_free_dbg)
WRAP_W_W(malloc)
+WRAP_W_W(_malloc_base)
WRAP_W_WWWW(_malloc_dbg)
WRAP_W_WW(calloc)
+WRAP_W_WW(_calloc_base)
WRAP_W_WWWWW(_calloc_dbg)
WRAP_W_WWW(_calloc_impl)
WRAP_W_WW(realloc)
+WRAP_W_WW(_realloc_base)
WRAP_W_WWW(_realloc_dbg)
WRAP_W_WWW(_recalloc)
+WRAP_W_WWW(_recalloc_base)
WRAP_W_W(_msize)
WRAP_W_W(_expand)
@@ -369,6 +390,10 @@ WRAP_W_W(_expand_dbg)
INTERCEPT_LIBRARY_FUNCTION(atoi);
INTERCEPT_LIBRARY_FUNCTION(atol);
+
+#ifdef _WIN64
+INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
+#else
INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
// _except_handler4 checks -GS cookie which is different for each module, so we
@@ -377,10 +402,13 @@ INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
__asan_handle_no_return();
return REAL(_except_handler4)(a, b, c, d);
}
+#endif
INTERCEPT_LIBRARY_FUNCTION(frexp);
INTERCEPT_LIBRARY_FUNCTION(longjmp);
+#if SANITIZER_INTERCEPT_MEMCHR
INTERCEPT_LIBRARY_FUNCTION(memchr);
+#endif
INTERCEPT_LIBRARY_FUNCTION(memcmp);
INTERCEPT_LIBRARY_FUNCTION(memcpy);
INTERCEPT_LIBRARY_FUNCTION(memmove);
@@ -390,12 +418,14 @@ INTERCEPT_LIBRARY_FUNCTION(strchr);
INTERCEPT_LIBRARY_FUNCTION(strcmp);
INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
INTERCEPT_LIBRARY_FUNCTION(strcspn);
+INTERCEPT_LIBRARY_FUNCTION(strdup);
INTERCEPT_LIBRARY_FUNCTION(strlen);
INTERCEPT_LIBRARY_FUNCTION(strncat);
INTERCEPT_LIBRARY_FUNCTION(strncmp);
INTERCEPT_LIBRARY_FUNCTION(strncpy);
INTERCEPT_LIBRARY_FUNCTION(strnlen);
INTERCEPT_LIBRARY_FUNCTION(strpbrk);
+INTERCEPT_LIBRARY_FUNCTION(strrchr);
INTERCEPT_LIBRARY_FUNCTION(strspn);
INTERCEPT_LIBRARY_FUNCTION(strstr);
INTERCEPT_LIBRARY_FUNCTION(strtol);
@@ -405,7 +435,9 @@ INTERCEPT_LIBRARY_FUNCTION(wcslen);
// is defined.
void InterceptHooks() {
INTERCEPT_HOOKS();
+#ifndef _WIN64
INTERCEPT_FUNCTION(_except_handler4);
+#endif
}
// We want to call __asan_init before C/C++ initializers/constructors are
diff --git a/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc
index f7358539a1c..8989159cc52 100644
--- a/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc
+++ b/libsanitizer/asan/asan_win_dynamic_runtime_thunk.cc
@@ -1,4 +1,4 @@
-//===-- asan_win_uar_thunk.cc ---------------------------------------------===//
+//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
@@ -14,11 +14,11 @@
// This includes:
// - forwarding the detect_stack_use_after_return runtime option
// - working around deficiencies of the MD runtime
-// - installing a custom SEH handlerx
+// - installing a custom SEH handler
//
//===----------------------------------------------------------------------===//
-// Only compile this code when buidling asan_dynamic_runtime_thunk.lib
+// Only compile this code when building asan_dynamic_runtime_thunk.lib
// Using #ifdef rather than relying on Makefiles etc.
// simplifies the build procedure.
#ifdef ASAN_DYNAMIC_RUNTIME_THUNK
@@ -27,7 +27,7 @@
// First, declare CRT sections we'll be using in this file
#pragma section(".CRT$XID", long, read) // NOLINT
-#pragma section(".CRT$XIZ", long, read) // NOLINT
+#pragma section(".CRT$XCAB", long, read) // NOLINT
#pragma section(".CRT$XTW", long, read) // NOLINT
#pragma section(".CRT$XTY", long, read) // NOLINT
@@ -40,12 +40,16 @@
// attribute adds __imp_ prefix to the symbol name of a variable.
// Since in general we don't know if a given TU is going to be used
// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
-// just to work around this issue, let's clone the a variable that is
-// constant after initialization anyways.
+// just to work around this issue, let's clone the variable that is constant
+// after initialization anyways.
extern "C" {
__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
int __asan_option_detect_stack_use_after_return =
__asan_should_detect_stack_use_after_return();
+
+__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
+void* __asan_shadow_memory_dynamic_address =
+ __asan_get_shadow_memory_dynamic_address();
}
////////////////////////////////////////////////////////////////////////////////
@@ -57,6 +61,7 @@ int __asan_option_detect_stack_use_after_return =
// using atexit() that calls a small subset of C terminators
// where LLVM global_dtors is placed. Fingers crossed, no other C terminators
// are there.
+extern "C" int __cdecl atexit(void (__cdecl *f)(void));
extern "C" void __cdecl _initterm(void *a, void *b);
namespace {
@@ -70,6 +75,7 @@ void UnregisterGlobals() {
int ScheduleUnregisterGlobals() {
return atexit(UnregisterGlobals);
}
+} // namespace
// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
// atexit() is initialized (.CRT$XIC). As this is executed before C++
@@ -78,8 +84,6 @@ int ScheduleUnregisterGlobals() {
__declspec(allocate(".CRT$XID"))
int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
-} // namespace
-
////////////////////////////////////////////////////////////////////////////////
// ASan SEH handling.
// We need to set the ASan-specific SEH handler at the end of CRT initialization
@@ -90,7 +94,8 @@ static int SetSEHFilter() { return __asan_set_seh_filter(); }
// Unfortunately, putting a pointer to __asan_set_seh_filter into
// __asan_intercept_seh gets optimized out, so we have to use an extra function.
-__declspec(allocate(".CRT$XIZ")) int (*__asan_seh_interceptor)() = SetSEHFilter;
+__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
+ SetSEHFilter;
}
#endif // ASAN_DYNAMIC_RUNTIME_THUNK
diff --git a/libsanitizer/asan/libtool-version b/libsanitizer/asan/libtool-version
index 7e838a5740b..0f14ee34185 100644
--- a/libsanitizer/asan/libtool-version
+++ b/libsanitizer/asan/libtool-version
@@ -3,4 +3,4 @@
# a separate file so that version updates don't involve re-running
# automake.
# CURRENT:REVISION:AGE
-3:0:0
+4:0:0
diff --git a/libsanitizer/builtins/assembly.h b/libsanitizer/builtins/assembly.h
new file mode 100644
index 00000000000..5e36b5a5edf
--- /dev/null
+++ b/libsanitizer/builtins/assembly.h
@@ -0,0 +1,169 @@
+/* ===-- assembly.h - compiler-rt assembler support macros -----------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is dual licensed under the MIT and the University of Illinois Open
+ * Source Licenses. See LICENSE.TXT for details.
+ *
+ * ===----------------------------------------------------------------------===
+ *
+ * This file defines macros for use in compiler-rt assembler source.
+ * This file is not part of the interface of this library.
+ *
+ * ===----------------------------------------------------------------------===
+ */
+
+#ifndef COMPILERRT_ASSEMBLY_H
+#define COMPILERRT_ASSEMBLY_H
+
+#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
+#define SEPARATOR @
+#else
+#define SEPARATOR ;
+#endif
+
+#if defined(__APPLE__)
+#define HIDDEN(name) .private_extern name
+#define LOCAL_LABEL(name) L_##name
+// tell linker it can break up file at label boundaries
+#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
+#define SYMBOL_IS_FUNC(name)
+#define CONST_SECTION .const
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#elif defined(__ELF__)
+
+#define HIDDEN(name) .hidden name
+#define LOCAL_LABEL(name) .L_##name
+#define FILE_LEVEL_DIRECTIVE
+#if defined(__arm__)
+#define SYMBOL_IS_FUNC(name) .type name,%function
+#else
+#define SYMBOL_IS_FUNC(name) .type name,@function
+#endif
+#define CONST_SECTION .section .rodata
+
+#if defined(__GNU__) || defined(__ANDROID__) || defined(__FreeBSD__)
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
+#else
+#define NO_EXEC_STACK_DIRECTIVE
+#endif
+
+#else // !__APPLE__ && !__ELF__
+
+#define HIDDEN(name)
+#define LOCAL_LABEL(name) .L ## name
+#define FILE_LEVEL_DIRECTIVE
+#define SYMBOL_IS_FUNC(name) \
+ .def name SEPARATOR \
+ .scl 2 SEPARATOR \
+ .type 32 SEPARATOR \
+ .endef
+#define CONST_SECTION .section .rdata,"rd"
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#endif
+
+#if defined(__arm__)
+#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
+#define ARM_HAS_BX
+#endif
+#if !defined(__ARM_FEATURE_CLZ) && \
+ (__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
+#define __ARM_FEATURE_CLZ
+#endif
+
+#ifdef ARM_HAS_BX
+#define JMP(r) bx r
+#define JMPc(r, c) bx##c r
+#else
+#define JMP(r) mov pc, r
+#define JMPc(r, c) mov##c pc, r
+#endif
+
+// pop {pc} can't switch Thumb mode on ARMv4T
+#if __ARM_ARCH >= 5
+#define POP_PC() pop {pc}
+#else
+#define POP_PC() \
+ pop {ip}; \
+ JMP(ip)
+#endif
+
+#if __ARM_ARCH_ISA_THUMB == 2
+#define IT(cond) it cond
+#define ITT(cond) itt cond
+#else
+#define IT(cond)
+#define ITT(cond)
+#endif
+
+#if __ARM_ARCH_ISA_THUMB == 2
+#define WIDE(op) op.w
+#else
+#define WIDE(op) op
+#endif
+#endif
+
+#define GLUE2(a, b) a##b
+#define GLUE(a, b) GLUE2(a, b)
+#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
+
+#ifdef VISIBILITY_HIDDEN
+#define DECLARE_SYMBOL_VISIBILITY(name) \
+ HIDDEN(SYMBOL_NAME(name)) SEPARATOR
+#else
+#define DECLARE_SYMBOL_VISIBILITY(name)
+#endif
+
+#define DEFINE_COMPILERRT_FUNCTION(name) \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
+ .thumb_func SEPARATOR \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ .globl name SEPARATOR \
+ SYMBOL_IS_FUNC(name) SEPARATOR \
+ HIDDEN(name) SEPARATOR \
+ name:
+
+#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
+ .set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
+
+#if defined(__ARM_EABI__)
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
+ DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
+#else
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
+#endif
+
+#ifdef __ELF__
+#define END_COMPILERRT_FUNCTION(name) \
+ .size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
+#else
+#define END_COMPILERRT_FUNCTION(name)
+#endif
+
+#endif /* COMPILERRT_ASSEMBLY_H */
diff --git a/libsanitizer/configure b/libsanitizer/configure
index 97218bb3c41..b6c715b09f7 100755
--- a/libsanitizer/configure
+++ b/libsanitizer/configure
@@ -604,6 +604,7 @@ ac_subst_vars='am__EXEEXT_FALSE
am__EXEEXT_TRUE
LTLIBOBJS
LIBOBJS
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS
TSAN_TARGET_DEPENDENT_OBJECTS
LIBBACKTRACE_SUPPORTED_FALSE
LIBBACKTRACE_SUPPORTED_TRUE
@@ -12027,7 +12028,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 12030 "configure"
+#line 12031 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -12133,7 +12134,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<_LT_EOF
-#line 12136 "configure"
+#line 12137 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@@ -16498,6 +16499,7 @@ fi
+
cat >confcache <<\_ACEOF
# This file is a shell script that caches the results of configure
# tests run on this system so they can be shared between configure
diff --git a/libsanitizer/configure.ac b/libsanitizer/configure.ac
index 93192e15971..e9018498074 100644
--- a/libsanitizer/configure.ac
+++ b/libsanitizer/configure.ac
@@ -375,5 +375,6 @@ _EOF
fi
AC_SUBST([TSAN_TARGET_DEPENDENT_OBJECTS])
+AC_SUBST([SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS])
AC_OUTPUT
diff --git a/libsanitizer/configure.tgt b/libsanitizer/configure.tgt
index 05ead765e62..8d884f4bf52 100644
--- a/libsanitizer/configure.tgt
+++ b/libsanitizer/configure.tgt
@@ -20,12 +20,14 @@
# Filter out unsupported systems.
TSAN_TARGET_DEPENDENT_OBJECTS=
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=
case "${target}" in
x86_64-*-linux* | i?86-*-linux*)
if test x$ac_cv_sizeof_void_p = x8; then
TSAN_SUPPORTED=yes
LSAN_SUPPORTED=yes
TSAN_TARGET_DEPENDENT_OBJECTS=tsan_rtl_amd64.lo
+ SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS=sanitizer_linux_x86_64.lo
fi
;;
powerpc*-*-linux*)
diff --git a/libsanitizer/include/sanitizer/allocator_interface.h b/libsanitizer/include/sanitizer/allocator_interface.h
index 97a72a258eb..d4801e9ff0c 100644
--- a/libsanitizer/include/sanitizer/allocator_interface.h
+++ b/libsanitizer/include/sanitizer/allocator_interface.h
@@ -57,6 +57,23 @@ extern "C" {
deallocation of "ptr". */
void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
void __sanitizer_free_hook(const volatile void *ptr);
+
+ /* Installs a pair of hooks for malloc/free.
+ Several (currently, 5) hook pairs may be installed, they are executed
+ in the order they were installed and after calling
+ __sanitizer_malloc_hook/__sanitizer_free_hook.
+ Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
+ chained and do not rely on weak symbols working on the platform, but
+ require __sanitizer_install_malloc_and_free_hooks to be called at startup
+ and thus will not be called on malloc/free very early in the process.
+ Returns the number of hooks currently installed or 0 on failure.
+ Not thread-safe, should be called in the main thread before starting
+ other threads.
+ */
+ int __sanitizer_install_malloc_and_free_hooks(
+ void (*malloc_hook)(const volatile void *, size_t),
+ void (*free_hook)(const volatile void *));
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/include/sanitizer/common_interface_defs.h b/libsanitizer/include/sanitizer/common_interface_defs.h
index 35463e08fbb..fd38c55a235 100644
--- a/libsanitizer/include/sanitizer/common_interface_defs.h
+++ b/libsanitizer/include/sanitizer/common_interface_defs.h
@@ -39,6 +39,9 @@ extern "C" {
// Tell the tools to write their reports to "path.<pid>" instead of stderr.
void __sanitizer_set_report_path(const char *path);
+ // Tell the tools to write their reports to the provided file descriptor
+ // (casted to void *).
+ void __sanitizer_set_report_fd(void *fd);
// Notify the tools that the sandbox is going to be turned on. The reserved
// parameter will be used in the future to hold a structure with functions
@@ -112,6 +115,16 @@ extern "C" {
// Print the stack trace leading to this call. Useful for debugging user code.
void __sanitizer_print_stack_trace();
+ // Symbolizes the supplied 'pc' using the format string 'fmt'.
+ // Outputs at most 'out_buf_size' bytes into 'out_buf'.
+ // The format syntax is described in
+ // lib/sanitizer_common/sanitizer_stacktrace_printer.h.
+ void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
+ size_t out_buf_size);
+ // Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
+ void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
+ char *out_buf, size_t out_buf_size);
+
// Sets the callback to be called right before death on error.
// Passing 0 will unset the callback.
void __sanitizer_set_death_callback(void (*callback)(void));
@@ -123,9 +136,50 @@ extern "C" {
// to know what is being passed to libc functions, e.g. memcmp.
// FIXME: implement more hooks.
void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
- const void *s2, size_t n);
+ const void *s2, size_t n, int result);
void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
- const char *s2, size_t n);
+ const char *s2, size_t n, int result);
+ void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
+ const char *s2, size_t n, int result);
+ void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
+ const char *s2, int result);
+ void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
+ const char *s2, int result);
+ void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
+ const char *s2, char *result);
+ void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
+ const char *s2, char *result);
+ void __sanitizer_weak_hook_memmem(void *called_pc,
+ const void *s1, size_t len1,
+ const void *s2, size_t len2, void *result);
+
+ // Prints stack traces for all live heap allocations ordered by total
+ // allocation size until `top_percent` of total live heap is shown.
+ // `top_percent` should be between 1 and 100.
+ // Experimental feature currently available only with asan on Linux/x86_64.
+ void __sanitizer_print_memory_profile(size_t top_percent);
+
+ // Fiber annotation interface.
+ // Before switching to a different stack, one must call
+ // __sanitizer_start_switch_fiber with a pointer to the bottom of the
+ // destination stack and its size. When code starts running on the new stack,
+ // it must call __sanitizer_finish_switch_fiber to finalize the switch.
+ // The start_switch function takes a void** to store the current fake stack if
+ // there is one (it is needed when detect_stack_use_after_return is enabled).
+ // When restoring a stack, this pointer must be given to the finish_switch
+ // function. In most cases, this void* can be stored on the stack just before
+ // switching. When leaving a fiber definitely, null must be passed as first
+ // argument to the start_switch function so that the fake stack is destroyed.
+ // If you do not want support for stack use-after-return detection, you can
+ // always pass null to these two functions.
+ // Note that the fake stack mechanism is disabled during fiber switch, so if a
+ // signal callback runs during the switch, it will not benefit from the stack
+ // use-after-return detection.
+ void __sanitizer_start_switch_fiber(void **fake_stack_save,
+ const void *bottom, size_t size);
+ void __sanitizer_finish_switch_fiber(void *fake_stack_save,
+ const void **bottom_old,
+ size_t *size_old);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/include/sanitizer/coverage_interface.h b/libsanitizer/include/sanitizer/coverage_interface.h
index 37c133aae3c..ffb956c39db 100644
--- a/libsanitizer/include/sanitizer/coverage_interface.h
+++ b/libsanitizer/include/sanitizer/coverage_interface.h
@@ -56,6 +56,7 @@ extern "C" {
// __sanitizer_get_number_of_counters bytes long and 8-aligned.
uintptr_t
__sanitizer_update_counter_bitset_and_clear_counters(uint8_t *bitset);
+
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/include/sanitizer/esan_interface.h b/libsanitizer/include/sanitizer/esan_interface.h
new file mode 100644
index 00000000000..cd18f34fc00
--- /dev/null
+++ b/libsanitizer/include/sanitizer/esan_interface.h
@@ -0,0 +1,48 @@
+//===-- sanitizer/esan_interface.h ------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of EfficiencySanitizer, a family of performance tuners.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ESAN_INTERFACE_H
+#define SANITIZER_ESAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+// We declare our interface routines as weak to allow the user to avoid
+// ifdefs and instead use this pattern to allow building the same sources
+// with and without our runtime library:
+// if (__esan_report)
+// __esan_report();
+#ifdef _MSC_VER
+/* selectany is as close to weak as we'll get. */
+#define COMPILER_RT_WEAK __declspec(selectany)
+#elif __GNUC__
+#define COMPILER_RT_WEAK __attribute__((weak))
+#else
+#define COMPILER_RT_WEAK
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// This function can be called mid-run (or at the end of a run for
+// a server process that doesn't shut down normally) to request that
+// data for that point in the run be reported from the tool.
+void COMPILER_RT_WEAK __esan_report();
+
+// This function returns the number of samples that the esan tool has collected
+// to this point. This is useful for testing.
+unsigned int COMPILER_RT_WEAK __esan_get_sample_count();
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_ESAN_INTERFACE_H
diff --git a/libsanitizer/include/sanitizer/linux_syscall_hooks.h b/libsanitizer/include/sanitizer/linux_syscall_hooks.h
index 17742b6c1c4..34bb2912406 100644
--- a/libsanitizer/include/sanitizer/linux_syscall_hooks.h
+++ b/libsanitizer/include/sanitizer/linux_syscall_hooks.h
@@ -1833,6 +1833,17 @@
__sanitizer_syscall_pre_impl_vfork()
#define __sanitizer_syscall_post_vfork(res) \
__sanitizer_syscall_post_impl_vfork(res)
+#define __sanitizer_syscall_pre_sigaction(signum, act, oldact) \
+ __sanitizer_syscall_pre_impl_sigaction((long)signum, (long)act, (long)oldact)
+#define __sanitizer_syscall_post_sigaction(res, signum, act, oldact) \
+ __sanitizer_syscall_post_impl_sigaction(res, (long)signum, (long)act, \
+ (long)oldact)
+#define __sanitizer_syscall_pre_rt_sigaction(signum, act, oldact, sz) \
+ __sanitizer_syscall_pre_impl_rt_sigaction((long)signum, (long)act, \
+ (long)oldact, (long)sz)
+#define __sanitizer_syscall_post_rt_sigaction(res, signum, act, oldact, sz) \
+ __sanitizer_syscall_post_impl_rt_sigaction(res, (long)signum, (long)act, \
+ (long)oldact, (long)sz)
// And now a few syscalls we don't handle yet.
#define __sanitizer_syscall_pre_afs_syscall(...)
@@ -1887,7 +1898,6 @@
#define __sanitizer_syscall_pre_query_module(...)
#define __sanitizer_syscall_pre_readahead(...)
#define __sanitizer_syscall_pre_readdir(...)
-#define __sanitizer_syscall_pre_rt_sigaction(...)
#define __sanitizer_syscall_pre_rt_sigreturn(...)
#define __sanitizer_syscall_pre_rt_sigsuspend(...)
#define __sanitizer_syscall_pre_security(...)
@@ -1901,7 +1911,6 @@
#define __sanitizer_syscall_pre_setreuid32(...)
#define __sanitizer_syscall_pre_set_thread_area(...)
#define __sanitizer_syscall_pre_setuid32(...)
-#define __sanitizer_syscall_pre_sigaction(...)
#define __sanitizer_syscall_pre_sigaltstack(...)
#define __sanitizer_syscall_pre_sigreturn(...)
#define __sanitizer_syscall_pre_sigsuspend(...)
@@ -1969,7 +1978,6 @@
#define __sanitizer_syscall_post_query_module(res, ...)
#define __sanitizer_syscall_post_readahead(res, ...)
#define __sanitizer_syscall_post_readdir(res, ...)
-#define __sanitizer_syscall_post_rt_sigaction(res, ...)
#define __sanitizer_syscall_post_rt_sigreturn(res, ...)
#define __sanitizer_syscall_post_rt_sigsuspend(res, ...)
#define __sanitizer_syscall_post_security(res, ...)
@@ -1983,7 +1991,6 @@
#define __sanitizer_syscall_post_setreuid32(res, ...)
#define __sanitizer_syscall_post_set_thread_area(res, ...)
#define __sanitizer_syscall_post_setuid32(res, ...)
-#define __sanitizer_syscall_post_sigaction(res, ...)
#define __sanitizer_syscall_post_sigaltstack(res, ...)
#define __sanitizer_syscall_post_sigreturn(res, ...)
#define __sanitizer_syscall_post_sigsuspend(res, ...)
@@ -3060,7 +3067,13 @@ void __sanitizer_syscall_pre_impl_fork();
void __sanitizer_syscall_post_impl_fork(long res);
void __sanitizer_syscall_pre_impl_vfork();
void __sanitizer_syscall_post_impl_vfork(long res);
-
+void __sanitizer_syscall_pre_impl_sigaction(long signum, long act, long oldact);
+void __sanitizer_syscall_post_impl_sigaction(long res, long signum, long act,
+ long oldact);
+void __sanitizer_syscall_pre_impl_rt_sigaction(long signum, long act,
+ long oldact, long sz);
+void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act,
+ long oldact, long sz);
#ifdef __cplusplus
} // extern "C"
#endif
diff --git a/libsanitizer/interception/Makefile.in b/libsanitizer/interception/Makefile.in
index 3dfa7746779..550c8f5c3f9 100644
--- a/libsanitizer/interception/Makefile.in
+++ b/libsanitizer/interception/Makefile.in
@@ -169,6 +169,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
diff --git a/libsanitizer/interception/interception.h b/libsanitizer/interception/interception.h
index f2d48c9332f..0db36ddf28e 100644
--- a/libsanitizer/interception/interception.h
+++ b/libsanitizer/interception/interception.h
@@ -90,8 +90,8 @@ typedef __sanitizer::OFF64_T OFF64_T;
// Just a pair of pointers.
struct interpose_substitution {
- const uptr replacement;
- const uptr original;
+ const __sanitizer::uptr replacement;
+ const __sanitizer::uptr original;
};
// For a function foo() create a global pair of pointers { wrap_foo, foo } in
@@ -156,10 +156,12 @@ const interpose_substitution substitution_##func_name[] \
namespace __interception { \
extern FUNC_TYPE(func) PTR_TO_REAL(func); \
}
+# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
#else // __APPLE__
# define REAL(x) x
# define DECLARE_REAL(ret_type, func, ...) \
extern "C" ret_type func(__VA_ARGS__);
+# define ASSIGN_REAL(x, y)
#endif // __APPLE__
#define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
diff --git a/libsanitizer/interception/interception_win.cc b/libsanitizer/interception/interception_win.cc
index df51fa21ead..fa81162097e 100644
--- a/libsanitizer/interception/interception_win.cc
+++ b/libsanitizer/interception/interception_win.cc
@@ -8,19 +8,178 @@
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Windows-specific interception methods.
+//
+// This file is implementing several hooking techniques to intercept calls
+// to functions. The hooks are dynamically installed by modifying the assembly
+// code.
+//
+// The hooking techniques are making assumptions on the way the code is
+// generated and are safe under these assumptions.
+//
+// On 64-bit architecture, there is no direct 64-bit jump instruction. To allow
+// arbitrary branching on the whole memory space, the notion of trampoline
+// region is used. A trampoline region is a memory space withing 2G boundary
+// where it is safe to add custom assembly code to build 64-bit jumps.
+//
+// Hooking techniques
+// ==================
+//
+// 1) Detour
+//
+// The Detour hooking technique is assuming the presence of an header with
+// padding and an overridable 2-bytes nop instruction (mov edi, edi). The
+// nop instruction can safely be replaced by a 2-bytes jump without any need
+// to save the instruction. A jump to the target is encoded in the function
+// header and the nop instruction is replaced by a short jump to the header.
+//
+// head: 5 x nop head: jmp <hook>
+// func: mov edi, edi --> func: jmp short <head>
+// [...] real: [...]
+//
+// This technique is only implemented on 32-bit architecture.
+// Most of the time, Windows API are hookable with the detour technique.
+//
+// 2) Redirect Jump
+//
+// The redirect jump is applicable when the first instruction is a direct
+// jump. The instruction is replaced by jump to the hook.
+//
+// func: jmp <label> --> func: jmp <hook>
+//
+// On an 64-bit architecture, a trampoline is inserted.
+//
+// func: jmp <label> --> func: jmp <tramp>
+// [...]
+//
+// [trampoline]
+// tramp: jmp QWORD [addr]
+// addr: .bytes <hook>
+//
+// Note: <real> is equilavent to <label>.
+//
+// 3) HotPatch
+//
+// The HotPatch hooking is assuming the presence of an header with padding
+// and a first instruction with at least 2-bytes.
+//
+// The reason to enforce the 2-bytes limitation is to provide the minimal
+// space to encode a short jump. HotPatch technique is only rewriting one
+// instruction to avoid breaking a sequence of instructions containing a
+// branching target.
+//
+// Assumptions are enforced by MSVC compiler by using the /HOTPATCH flag.
+// see: https://msdn.microsoft.com/en-us/library/ms173507.aspx
+// Default padding length is 5 bytes in 32-bits and 6 bytes in 64-bits.
+//
+// head: 5 x nop head: jmp <hook>
+// func: <instr> --> func: jmp short <head>
+// [...] body: [...]
+//
+// [trampoline]
+// real: <instr>
+// jmp <body>
+//
+// On an 64-bit architecture:
+//
+// head: 6 x nop head: jmp QWORD [addr1]
+// func: <instr> --> func: jmp short <head>
+// [...] body: [...]
+//
+// [trampoline]
+// addr1: .bytes <hook>
+// real: <instr>
+// jmp QWORD [addr2]
+// addr2: .bytes <body>
+//
+// 4) Trampoline
+//
+// The Trampoline hooking technique is the most aggressive one. It is
+// assuming that there is a sequence of instructions that can be safely
+// replaced by a jump (enough room and no incoming branches).
+//
+// Unfortunately, these assumptions can't be safely presumed and code may
+// be broken after hooking.
+//
+// func: <instr> --> func: jmp <hook>
+// <instr>
+// [...] body: [...]
+//
+// [trampoline]
+// real: <instr>
+// <instr>
+// jmp <body>
+//
+// On an 64-bit architecture:
+//
+// func: <instr> --> func: jmp QWORD [addr1]
+// <instr>
+// [...] body: [...]
+//
+// [trampoline]
+// addr1: .bytes <hook>
+// real: <instr>
+// <instr>
+// jmp QWORD [addr2]
+// addr2: .bytes <body>
//===----------------------------------------------------------------------===//
#ifdef _WIN32
#include "interception.h"
+#include "sanitizer_common/sanitizer_platform.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
namespace __interception {
+static const int kAddressLength = FIRST_32_SECOND_64(4, 8);
+static const int kJumpInstructionLength = 5;
+static const int kShortJumpInstructionLength = 2;
+static const int kIndirectJumpInstructionLength = 6;
+static const int kBranchLength =
+ FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
+static const int kDirectBranchLength = kBranchLength + kAddressLength;
+
+static void InterceptionFailed() {
+ // Do we have a good way to abort with an error message here?
+ __debugbreak();
+}
+
+static bool DistanceIsWithin2Gig(uptr from, uptr target) {
+ if (from < target)
+ return target - from <= (uptr)0x7FFFFFFFU;
+ else
+ return from - target <= (uptr)0x80000000U;
+}
+
+static uptr GetMmapGranularity() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwAllocationGranularity;
+}
+
+static uptr RoundUpTo(uptr size, uptr boundary) {
+ return (size + boundary - 1) & ~(boundary - 1);
+}
+
// FIXME: internal_str* and internal_mem* functions should be moved from the
// ASan sources into interception/.
+static size_t _strlen(const char *str) {
+ const char* p = str;
+ while (*p != '\0') ++p;
+ return p - str;
+}
+
+static char* _strchr(char* str, char c) {
+ while (*str) {
+ if (*str == c)
+ return str;
+ ++str;
+ }
+ return nullptr;
+}
+
static void _memset(void *p, int value, size_t sz) {
for (size_t i = 0; i < sz; ++i)
((char*)p)[i] = (char)value;
@@ -33,163 +192,641 @@ static void _memcpy(void *dst, void *src, size_t sz) {
dst_c[i] = src_c[i];
}
-static void WriteJumpInstruction(char *jmp_from, char *to) {
- // jmp XXYYZZWW = E9 WW ZZ YY XX, where XXYYZZWW is an offset fromt jmp_from
- // to the next instruction to the destination.
- ptrdiff_t offset = to - jmp_from - 5;
- *jmp_from = '\xE9';
- *(ptrdiff_t*)(jmp_from + 1) = offset;
-}
-
-static char *GetMemoryForTrampoline(size_t size) {
- // Trampolines are allocated from a common pool.
- const int POOL_SIZE = 1024;
- static char *pool = NULL;
- static size_t pool_used = 0;
- if (!pool) {
- pool = (char *)VirtualAlloc(NULL, POOL_SIZE, MEM_RESERVE | MEM_COMMIT,
- PAGE_EXECUTE_READWRITE);
- // FIXME: Might want to apply PAGE_EXECUTE_READ access after all the
- // interceptors are in place.
- if (!pool)
- return NULL;
- _memset(pool, 0xCC /* int 3 */, POOL_SIZE);
+static bool ChangeMemoryProtection(
+ uptr address, uptr size, DWORD *old_protection) {
+ return ::VirtualProtect((void*)address, size,
+ PAGE_EXECUTE_READWRITE,
+ old_protection) != FALSE;
+}
+
+static bool RestoreMemoryProtection(
+ uptr address, uptr size, DWORD old_protection) {
+ DWORD unused;
+ return ::VirtualProtect((void*)address, size,
+ old_protection,
+ &unused) != FALSE;
+}
+
+static bool IsMemoryPadding(uptr address, uptr size) {
+ u8* function = (u8*)address;
+ for (size_t i = 0; i < size; ++i)
+ if (function[i] != 0x90 && function[i] != 0xCC)
+ return false;
+ return true;
+}
+
+static const u8 kHintNop10Bytes[] = {
+ 0x66, 0x66, 0x0F, 0x1F, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+template<class T>
+static bool FunctionHasPrefix(uptr address, const T &pattern) {
+ u8* function = (u8*)address - sizeof(pattern);
+ for (size_t i = 0; i < sizeof(pattern); ++i)
+ if (function[i] != pattern[i])
+ return false;
+ return true;
+}
+
+static bool FunctionHasPadding(uptr address, uptr size) {
+ if (IsMemoryPadding(address - size, size))
+ return true;
+ if (size <= sizeof(kHintNop10Bytes) &&
+ FunctionHasPrefix(address, kHintNop10Bytes))
+ return true;
+ return false;
+}
+
+static void WritePadding(uptr from, uptr size) {
+ _memset((void*)from, 0xCC, (size_t)size);
+}
+
+static void WriteJumpInstruction(uptr from, uptr target) {
+ if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
+ InterceptionFailed();
+ ptrdiff_t offset = target - from - kJumpInstructionLength;
+ *(u8*)from = 0xE9;
+ *(u32*)(from + 1) = offset;
+}
+
+static void WriteShortJumpInstruction(uptr from, uptr target) {
+ sptr offset = target - from - kShortJumpInstructionLength;
+ if (offset < -128 || offset > 127)
+ InterceptionFailed();
+ *(u8*)from = 0xEB;
+ *(u8*)(from + 1) = (u8)offset;
+}
+
+#if SANITIZER_WINDOWS64
+static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
+ // jmp [rip + <offset>] = FF 25 <offset> where <offset> is a relative
+ // offset.
+ // The offset is the distance from then end of the jump instruction to the
+ // memory location containing the targeted address. The displacement is still
+ // 32-bit in x64, so indirect_target must be located within +/- 2GB range.
+ int offset = indirect_target - from - kIndirectJumpInstructionLength;
+ if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
+ indirect_target)) {
+ InterceptionFailed();
}
+ *(u16*)from = 0x25FF;
+ *(u32*)(from + 2) = offset;
+}
+#endif
+
+static void WriteBranch(
+ uptr from, uptr indirect_target, uptr target) {
+#if SANITIZER_WINDOWS64
+ WriteIndirectJumpInstruction(from, indirect_target);
+ *(u64*)indirect_target = target;
+#else
+ (void)indirect_target;
+ WriteJumpInstruction(from, target);
+#endif
+}
+
+static void WriteDirectBranch(uptr from, uptr target) {
+#if SANITIZER_WINDOWS64
+ // Emit an indirect jump through immediately following bytes:
+ // jmp [rip + kBranchLength]
+ // .quad <target>
+ WriteBranch(from, from + kBranchLength, target);
+#else
+ WriteJumpInstruction(from, target);
+#endif
+}
+
+struct TrampolineMemoryRegion {
+ uptr content;
+ uptr allocated_size;
+ uptr max_size;
+};
+
+static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
+static const int kMaxTrampolineRegion = 1024;
+static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
- if (pool_used + size > POOL_SIZE)
- return NULL;
+static void *AllocateTrampolineRegion(uptr image_address, size_t granularity) {
+#if SANITIZER_WINDOWS64
+ uptr address = image_address;
+ uptr scanned = 0;
+ while (scanned < kTrampolineScanLimitRange) {
+ MEMORY_BASIC_INFORMATION info;
+ if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ return nullptr;
- char *ret = pool + pool_used;
- pool_used += size;
- return ret;
+ // Check whether a region can be allocated at |address|.
+ if (info.State == MEM_FREE && info.RegionSize >= granularity) {
+ void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity),
+ granularity,
+ MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ return page;
+ }
+
+ // Move to the next region.
+ address = (uptr)info.BaseAddress + info.RegionSize;
+ scanned += info.RegionSize;
+ }
+ return nullptr;
+#else
+ return ::VirtualAlloc(nullptr,
+ granularity,
+ MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+#endif
+}
+
+// Used by unittests to release mapped memory space.
+void TestOnlyReleaseTrampolineRegions() {
+ for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
+ TrampolineMemoryRegion *current = &TrampolineRegions[bucket];
+ if (current->content == 0)
+ return;
+ ::VirtualFree((void*)current->content, 0, MEM_RELEASE);
+ current->content = 0;
+ }
+}
+
+static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
+ // Find a region within 2G with enough space to allocate |size| bytes.
+ TrampolineMemoryRegion *region = nullptr;
+ for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
+ TrampolineMemoryRegion* current = &TrampolineRegions[bucket];
+ if (current->content == 0) {
+ // No valid region found, allocate a new region.
+ size_t bucket_size = GetMmapGranularity();
+ void *content = AllocateTrampolineRegion(image_address, bucket_size);
+ if (content == nullptr)
+ return 0U;
+
+ current->content = (uptr)content;
+ current->allocated_size = 0;
+ current->max_size = bucket_size;
+ region = current;
+ break;
+ } else if (current->max_size - current->allocated_size > size) {
+#if SANITIZER_WINDOWS64
+ // In 64-bits, the memory space must be allocated within 2G boundary.
+ uptr next_address = current->content + current->allocated_size;
+ if (next_address < image_address ||
+ next_address - image_address >= 0x7FFF0000)
+ continue;
+#endif
+ // The space can be allocated in the current region.
+ region = current;
+ break;
+ }
+ }
+
+ // Failed to find a region.
+ if (region == nullptr)
+ return 0U;
+
+ // Allocate the space in the current region.
+ uptr allocated_space = region->content + region->allocated_size;
+ region->allocated_size += size;
+ WritePadding(allocated_space, size);
+
+ return allocated_space;
}
// Returns 0 on error.
-static size_t RoundUpToInstrBoundary(size_t size, char *code) {
+static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
+ switch (*(u64*)address) {
+ case 0x90909090909006EB: // stub: jmp over 6 x nop.
+ return 8;
+ }
+
+ switch (*(u8*)address) {
+ case 0x90: // 90 : nop
+ return 1;
+
+ case 0x50: // push eax / rax
+ case 0x51: // push ecx / rcx
+ case 0x52: // push edx / rdx
+ case 0x53: // push ebx / rbx
+ case 0x54: // push esp / rsp
+ case 0x55: // push ebp / rbp
+ case 0x56: // push esi / rsi
+ case 0x57: // push edi / rdi
+ case 0x5D: // pop ebp / rbp
+ return 1;
+
+ case 0x6A: // 6A XX = push XX
+ return 2;
+
+ case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
+ case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
+ return 5;
+
+ // Cannot overwrite control-instruction. Return 0 to indicate failure.
+ case 0xE9: // E9 XX XX XX XX : jmp <label>
+ case 0xE8: // E8 XX XX XX XX : call <func>
+ case 0xC3: // C3 : ret
+ case 0xEB: // EB XX : jmp XX (short jump)
+ case 0x70: // 7Y YY : jy XX (short conditional jump)
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78:
+ case 0x79:
+ case 0x7A:
+ case 0x7B:
+ case 0x7C:
+ case 0x7D:
+ case 0x7E:
+ case 0x7F:
+ return 0;
+ }
+
+ switch (*(u16*)(address)) {
+ case 0xFF8B: // 8B FF : mov edi, edi
+ case 0xEC8B: // 8B EC : mov ebp, esp
+ case 0xc889: // 89 C8 : mov eax, ecx
+ case 0xC18B: // 8B C1 : mov eax, ecx
+ case 0xC033: // 33 C0 : xor eax, eax
+ case 0xC933: // 33 C9 : xor ecx, ecx
+ case 0xD233: // 33 D2 : xor edx, edx
+ return 2;
+
+ // Cannot overwrite control-instruction. Return 0 to indicate failure.
+ case 0x25FF: // FF 25 XX XX XX XX : jmp [XXXXXXXX]
+ return 0;
+ }
+
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
+ return 7;
+ }
+
+#if SANITIZER_WINDOWS64
+ switch (*(u8*)address) {
+ case 0xA1: // A1 XX XX XX XX XX XX XX XX :
+ // movabs eax, dword ptr ds:[XXXXXXXX]
+ return 8;
+ }
+
+ switch (*(u16*)address) {
+ case 0x5040: // push rax
+ case 0x5140: // push rcx
+ case 0x5240: // push rdx
+ case 0x5340: // push rbx
+ case 0x5440: // push rsp
+ case 0x5540: // push rbp
+ case 0x5640: // push rsi
+ case 0x5740: // push rdi
+ case 0x5441: // push r12
+ case 0x5541: // push r13
+ case 0x5641: // push r14
+ case 0x5741: // push r15
+ case 0x9066: // Two-byte NOP
+ return 2;
+ }
+
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0xe58948: // 48 8b c4 : mov rbp, rsp
+ case 0xc18b48: // 48 8b c1 : mov rax, rcx
+ case 0xc48b48: // 48 8b c4 : mov rax, rsp
+ case 0xd9f748: // 48 f7 d9 : neg rcx
+ case 0xd12b48: // 48 2b d1 : sub rdx, rcx
+ case 0x07c1f6: // f6 c1 07 : test cl, 0x7
+ case 0xc98548: // 48 85 C9 : test rcx, rcx
+ case 0xc0854d: // 4d 85 c0 : test r8, r8
+ case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
+ case 0xc03345: // 45 33 c0 : xor r8d, r8d
+ case 0xdb3345: // 45 33 DB : xor r11d, r11d
+ case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
+ case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
+ case 0xc98b4c: // 4C 8B C9 : mov r9, rcx
+ case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
+ case 0xca2b48: // 48 2b ca : sub rcx, rdx
+ case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
+ case 0xc00b4d: // 3d 0b c0 : or r8, r8
+ case 0xd18b48: // 48 8b d1 : mov rdx, rcx
+ case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
+ case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
+ return 3;
+
+ case 0xec8348: // 48 83 ec XX : sub rsp, XX
+ case 0xf88349: // 49 83 f8 XX : cmp r8, XX
+ case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
+ return 4;
+
+ case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
+ return 7;
+
+ case 0x058b48: // 48 8b 05 XX XX XX XX :
+ // mov rax, QWORD PTR [rip + XXXXXXXX]
+ case 0x25ff48: // 48 ff 25 XX XX XX XX :
+ // rex.W jmp QWORD PTR [rip + XXXXXXXX]
+
+ // Instructions having offset relative to 'rip' need offset adjustment.
+ if (rel_offset)
+ *rel_offset = 3;
+ return 7;
+
+ case 0x2444c7: // C7 44 24 XX YY YY YY YY
+ // mov dword ptr [rsp + XX], YYYYYYYY
+ return 8;
+ }
+
+ switch (*(u32*)(address)) {
+ case 0x24448b48: // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]
+ case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
+ case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
+ case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
+ return 5;
+ }
+
+#else
+
+ switch (*(u8*)address) {
+ case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
+ return 5;
+ }
+ switch (*(u16*)address) {
+ case 0x458B: // 8B 45 XX : mov eax, dword ptr [ebp + XX]
+ case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
+ case 0x7D8B: // 8B 7D XX : mov edi, dword ptr [ebp + XX]
+ case 0xEC83: // 83 EC XX : sub esp, XX
+ case 0x75FF: // FF 75 XX : push dword ptr [ebp + XX]
+ return 3;
+ case 0xC1F7: // F7 C1 XX YY ZZ WW : test ecx, WWZZYYXX
+ case 0x25FF: // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX]
+ return 6;
+ case 0x3D83: // 83 3D XX YY ZZ WW TT : cmp TT, WWZZYYXX
+ return 7;
+ case 0x7D83: // 83 7D XX YY : cmp dword ptr [ebp + XX], YY
+ return 4;
+ }
+
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0x24448A: // 8A 44 24 XX : mov eal, dword ptr [esp + XX]
+ case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
+ case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
+ case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
+ case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
+ case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
+ return 4;
+ }
+
+ switch (*(u32*)address) {
+ case 0x2444B60F: // 0F B6 44 24 XX : movzx eax, byte ptr [esp + XX]
+ return 5;
+ }
+#endif
+
+ // Unknown instruction!
+ // FIXME: Unknown instruction failures might happen when we add a new
+ // interceptor or a new compiler version. In either case, they should result
+ // in visible and readable error messages. However, merely calling abort()
+ // leads to an infinite recursion in CheckFailed.
+ InterceptionFailed();
+ return 0;
+}
+
+// Returns 0 on error.
+static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
size_t cursor = 0;
while (cursor < size) {
- switch (code[cursor]) {
- case '\x51': // push ecx
- case '\x52': // push edx
- case '\x53': // push ebx
- case '\x54': // push esp
- case '\x55': // push ebp
- case '\x56': // push esi
- case '\x57': // push edi
- case '\x5D': // pop ebp
- cursor++;
- continue;
- case '\x6A': // 6A XX = push XX
- cursor += 2;
- continue;
- case '\xE9': // E9 XX YY ZZ WW = jmp WWZZYYXX
- case '\xB8': // B8 XX YY ZZ WW = mov eax, WWZZYYXX
- cursor += 5;
- continue;
- }
- switch (*(unsigned short*)(code + cursor)) { // NOLINT
- case 0xFF8B: // 8B FF = mov edi, edi
- case 0xEC8B: // 8B EC = mov ebp, esp
- case 0xC033: // 33 C0 = xor eax, eax
- cursor += 2;
- continue;
- case 0x458B: // 8B 45 XX = mov eax, dword ptr [ebp+XXh]
- case 0x5D8B: // 8B 5D XX = mov ebx, dword ptr [ebp+XXh]
- case 0xEC83: // 83 EC XX = sub esp, XX
- case 0x75FF: // FF 75 XX = push dword ptr [ebp+XXh]
- cursor += 3;
- continue;
- case 0xC1F7: // F7 C1 XX YY ZZ WW = test ecx, WWZZYYXX
- case 0x25FF: // FF 25 XX YY ZZ WW = jmp dword ptr ds:[WWZZYYXX]
- cursor += 6;
- continue;
- case 0x3D83: // 83 3D XX YY ZZ WW TT = cmp TT, WWZZYYXX
- cursor += 7;
- continue;
- }
- switch (0x00FFFFFF & *(unsigned int*)(code + cursor)) {
- case 0x24448A: // 8A 44 24 XX = mov eal, dword ptr [esp+XXh]
- case 0x24448B: // 8B 44 24 XX = mov eax, dword ptr [esp+XXh]
- case 0x244C8B: // 8B 4C 24 XX = mov ecx, dword ptr [esp+XXh]
- case 0x24548B: // 8B 54 24 XX = mov edx, dword ptr [esp+XXh]
- case 0x24748B: // 8B 74 24 XX = mov esi, dword ptr [esp+XXh]
- case 0x247C8B: // 8B 7C 24 XX = mov edi, dword ptr [esp+XXh]
- cursor += 4;
- continue;
+ size_t instruction_size = GetInstructionSize(address + cursor);
+ if (!instruction_size)
+ return 0;
+ cursor += instruction_size;
+ }
+ return cursor;
+}
+
+static bool CopyInstructions(uptr to, uptr from, size_t size) {
+ size_t cursor = 0;
+ while (cursor != size) {
+ size_t rel_offset = 0;
+ size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
+ _memcpy((void*)(to + cursor), (void*)(from + cursor),
+ (size_t)instruction_size);
+ if (rel_offset) {
+ uptr delta = to - from;
+ uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
+#if SANITIZER_WINDOWS64
+ if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
+ return false;
+#endif
+ *(u32*)(to + cursor + rel_offset) = relocated_offset;
}
+ cursor += instruction_size;
+ }
+ return true;
+}
- // Unknown instruction!
- // FIXME: Unknown instruction failures might happen when we add a new
- // interceptor or a new compiler version. In either case, they should result
- // in visible and readable error messages. However, merely calling abort()
- // leads to an infinite recursion in CheckFailed.
- // Do we have a good way to abort with an error message here?
- __debugbreak();
- return 0;
+
+#if !SANITIZER_WINDOWS64
+bool OverrideFunctionWithDetour(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ const int kDetourHeaderLen = 5;
+ const u16 kDetourInstruction = 0xFF8B;
+
+ uptr header = (uptr)old_func - kDetourHeaderLen;
+ uptr patch_length = kDetourHeaderLen + kShortJumpInstructionLength;
+
+ // Validate that the function is hookable.
+ if (*(u16*)old_func != kDetourInstruction ||
+ !IsMemoryPadding(header, kDetourHeaderLen))
+ return false;
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(header, patch_length, &protection))
+ return false;
+
+ // Write a relative jump to the redirected function.
+ WriteJumpInstruction(header, new_func);
+
+ // Write the short jump to the function prefix.
+ WriteShortJumpInstruction(old_func, header);
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(header, patch_length, protection))
+ return false;
+
+ if (orig_old_func)
+ *orig_old_func = old_func + kShortJumpInstructionLength;
+
+ return true;
+}
+#endif
+
+bool OverrideFunctionWithRedirectJump(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ // Check whether the first instruction is a relative jump.
+ if (*(u8*)old_func != 0xE9)
+ return false;
+
+ if (orig_old_func) {
+ uptr relative_offset = *(u32*)(old_func + 1);
+ uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
+ *orig_old_func = absolute_target;
}
- return cursor;
+#if SANITIZER_WINDOWS64
+ // If needed, get memory space for a trampoline jump.
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, kDirectBranchLength);
+ if (!trampoline)
+ return false;
+ WriteDirectBranch(trampoline, new_func);
+#endif
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(old_func, kJumpInstructionLength, &protection))
+ return false;
+
+ // Write a relative jump to the redirected function.
+ WriteJumpInstruction(old_func, FIRST_32_SECOND_64(new_func, trampoline));
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(old_func, kJumpInstructionLength, protection))
+ return false;
+
+ return true;
}
-bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func) {
-#ifdef _WIN64
-#error OverrideFunction is not yet supported on x64
+bool OverrideFunctionWithHotPatch(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ const int kHotPatchHeaderLen = kBranchLength;
+
+ uptr header = (uptr)old_func - kHotPatchHeaderLen;
+ uptr patch_length = kHotPatchHeaderLen + kShortJumpInstructionLength;
+
+ // Validate that the function is hot patchable.
+ size_t instruction_size = GetInstructionSize(old_func);
+ if (instruction_size < kShortJumpInstructionLength ||
+ !FunctionHasPadding(old_func, kHotPatchHeaderLen))
+ return false;
+
+ if (orig_old_func) {
+ // Put the needed instructions into the trampoline bytes.
+ uptr trampoline_length = instruction_size + kDirectBranchLength;
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
+ if (!trampoline)
+ return false;
+ if (!CopyInstructions(trampoline, old_func, instruction_size))
+ return false;
+ WriteDirectBranch(trampoline + instruction_size,
+ old_func + instruction_size);
+ *orig_old_func = trampoline;
+ }
+
+ // If needed, get memory space for indirect address.
+ uptr indirect_address = 0;
+#if SANITIZER_WINDOWS64
+ indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
+ if (!indirect_address)
+ return false;
#endif
- // Function overriding works basically like this:
- // We write "jmp <new_func>" (5 bytes) at the beginning of the 'old_func'
- // to override it.
- // We might want to be able to execute the original 'old_func' from the
- // wrapper, in this case we need to keep the leading 5+ bytes ('head')
- // of the original code somewhere with a "jmp <old_func+head>".
- // We call these 'head'+5 bytes of instructions a "trampoline".
- char *old_bytes = (char *)old_func;
-
- // We'll need at least 5 bytes for a 'jmp'.
- size_t head = 5;
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(header, patch_length, &protection))
+ return false;
+
+ // Write jumps to the redirected function.
+ WriteBranch(header, indirect_address, new_func);
+ WriteShortJumpInstruction(old_func, header);
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(header, patch_length, protection))
+ return false;
+
+ return true;
+}
+
+bool OverrideFunctionWithTrampoline(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+
+ size_t instructions_length = kBranchLength;
+ size_t padding_length = 0;
+ uptr indirect_address = 0;
+
if (orig_old_func) {
// Find out the number of bytes of the instructions we need to copy
- // to the trampoline and store it in 'head'.
- head = RoundUpToInstrBoundary(head, old_bytes);
- if (!head)
+ // to the trampoline.
+ instructions_length = RoundUpToInstrBoundary(kBranchLength, old_func);
+ if (!instructions_length)
return false;
// Put the needed instructions into the trampoline bytes.
- char *trampoline = GetMemoryForTrampoline(head + 5);
+ uptr trampoline_length = instructions_length + kDirectBranchLength;
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
if (!trampoline)
return false;
- _memcpy(trampoline, old_bytes, head);
- WriteJumpInstruction(trampoline + head, old_bytes + head);
- *orig_old_func = (uptr)trampoline;
+ if (!CopyInstructions(trampoline, old_func, instructions_length))
+ return false;
+ WriteDirectBranch(trampoline + instructions_length,
+ old_func + instructions_length);
+ *orig_old_func = trampoline;
}
- // Now put the "jmp <new_func>" instruction at the original code location.
- // We should preserve the EXECUTE flag as some of our own code might be
- // located in the same page (sic!). FIXME: might consider putting the
- // __interception code into a separate section or something?
- DWORD old_prot, unused_prot;
- if (!VirtualProtect((void *)old_bytes, head, PAGE_EXECUTE_READWRITE,
- &old_prot))
+#if SANITIZER_WINDOWS64
+ // Check if the targeted address can be encoded in the function padding.
+ // Otherwise, allocate it in the trampoline region.
+ if (IsMemoryPadding(old_func - kAddressLength, kAddressLength)) {
+ indirect_address = old_func - kAddressLength;
+ padding_length = kAddressLength;
+ } else {
+ indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
+ if (!indirect_address)
+ return false;
+ }
+#endif
+
+ // Change memory protection to writable.
+ uptr patch_address = old_func - padding_length;
+ uptr patch_length = instructions_length + padding_length;
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(patch_address, patch_length, &protection))
return false;
- WriteJumpInstruction(old_bytes, (char *)new_func);
- _memset(old_bytes + 5, 0xCC /* int 3 */, head - 5);
+ // Patch the original function.
+ WriteBranch(old_func, indirect_address, new_func);
- // Restore the original permissions.
- if (!VirtualProtect((void *)old_bytes, head, old_prot, &unused_prot))
- return false; // not clear if this failure bothers us.
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(patch_address, patch_length, protection))
+ return false;
return true;
}
+bool OverrideFunction(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+#if !SANITIZER_WINDOWS64
+ if (OverrideFunctionWithDetour(old_func, new_func, orig_old_func))
+ return true;
+#endif
+ if (OverrideFunctionWithRedirectJump(old_func, new_func, orig_old_func))
+ return true;
+ if (OverrideFunctionWithHotPatch(old_func, new_func, orig_old_func))
+ return true;
+ if (OverrideFunctionWithTrampoline(old_func, new_func, orig_old_func))
+ return true;
+ return false;
+}
+
static void **InterestingDLLsAvailable() {
- const char *InterestingDLLs[] = {
- "kernel32.dll",
- "msvcr110.dll", // VS2012
- "msvcr120.dll", // VS2013
- // NTDLL should go last as it exports some functions that we should override
- // in the CRT [presumably only used internally].
- "ntdll.dll", NULL
- };
+ static const char *InterestingDLLs[] = {
+ "kernel32.dll",
+ "msvcr110.dll", // VS2012
+ "msvcr120.dll", // VS2013
+ "vcruntime140.dll", // VS2015
+ "ucrtbase.dll", // Universal CRT
+ // NTDLL should go last as it exports some functions that we should
+ // override in the CRT [presumably only used internally].
+ "ntdll.dll", NULL};
static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
if (!result[0]) {
for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
@@ -244,6 +881,32 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
if (!strcmp(func_name, name)) {
DWORD index = ordinals[i];
RVAPtr<char> func(module, functions[index]);
+
+ // Handle forwarded functions.
+ DWORD offset = functions[index];
+ if (offset >= export_directory->VirtualAddress &&
+ offset < export_directory->VirtualAddress + export_directory->Size) {
+ // An entry for a forwarded function is a string with the following
+ // format: "<module> . <function_name>" that is stored into the
+ // exported directory.
+ char function_name[256];
+ size_t funtion_name_length = _strlen(func);
+ if (funtion_name_length >= sizeof(function_name) - 1)
+ InterceptionFailed();
+
+ _memcpy(function_name, func, funtion_name_length);
+ function_name[funtion_name_length] = '\0';
+ char* separator = _strchr(function_name, '.');
+ if (!separator)
+ InterceptionFailed();
+ *separator = '\0';
+
+ void* redirected_module = GetModuleHandleA(function_name);
+ if (!redirected_module)
+ InterceptionFailed();
+ return InternalGetProcAddress(redirected_module, separator + 1);
+ }
+
return (uptr)(char *)func;
}
}
@@ -251,19 +914,83 @@ uptr InternalGetProcAddress(void *module, const char *func_name) {
return 0;
}
-static bool GetFunctionAddressInDLLs(const char *func_name, uptr *func_addr) {
- *func_addr = 0;
+bool OverrideFunction(
+ const char *func_name, uptr new_func, uptr *orig_old_func) {
+ bool hooked = false;
void **DLLs = InterestingDLLsAvailable();
- for (size_t i = 0; *func_addr == 0 && DLLs[i]; ++i)
- *func_addr = InternalGetProcAddress(DLLs[i], func_name);
- return (*func_addr != 0);
+ for (size_t i = 0; DLLs[i]; ++i) {
+ uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);
+ if (func_addr &&
+ OverrideFunction(func_addr, new_func, orig_old_func)) {
+ hooked = true;
+ }
+ }
+ return hooked;
}
-bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func) {
- uptr orig_func;
- if (!GetFunctionAddressInDLLs(name, &orig_func))
+bool OverrideImportedFunction(const char *module_to_patch,
+ const char *imported_module,
+ const char *function_name, uptr new_function,
+ uptr *orig_old_func) {
+ HMODULE module = GetModuleHandleA(module_to_patch);
+ if (!module)
+ return false;
+
+ // Check that the module header is full and present.
+ RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
+ RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
+ if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
+ headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
+ headers->FileHeader.SizeOfOptionalHeader <
+ sizeof(IMAGE_OPTIONAL_HEADER)) {
return false;
- return OverrideFunction(orig_func, new_func, orig_old_func);
+ }
+
+ IMAGE_DATA_DIRECTORY *import_directory =
+ &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
+
+ // Iterate the list of imported DLLs. FirstThunk will be null for the last
+ // entry.
+ RVAPtr<IMAGE_IMPORT_DESCRIPTOR> imports(module,
+ import_directory->VirtualAddress);
+ for (; imports->FirstThunk != 0; ++imports) {
+ RVAPtr<const char> modname(module, imports->Name);
+ if (_stricmp(&*modname, imported_module) == 0)
+ break;
+ }
+ if (imports->FirstThunk == 0)
+ return false;
+
+ // We have two parallel arrays: the import address table (IAT) and the table
+ // of names. They start out containing the same data, but the loader rewrites
+ // the IAT to hold imported addresses and leaves the name table in
+ // OriginalFirstThunk alone.
+ RVAPtr<IMAGE_THUNK_DATA> name_table(module, imports->OriginalFirstThunk);
+ RVAPtr<IMAGE_THUNK_DATA> iat(module, imports->FirstThunk);
+ for (; name_table->u1.Ordinal != 0; ++name_table, ++iat) {
+ if (!IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
+ RVAPtr<IMAGE_IMPORT_BY_NAME> import_by_name(
+ module, name_table->u1.ForwarderString);
+ const char *funcname = &import_by_name->Name[0];
+ if (strcmp(funcname, function_name) == 0)
+ break;
+ }
+ }
+ if (name_table->u1.Ordinal == 0)
+ return false;
+
+ // Now we have the correct IAT entry. Do the swap. We have to make the page
+ // read/write first.
+ if (orig_old_func)
+ *orig_old_func = iat->u1.AddressOfData;
+ DWORD old_prot, unused_prot;
+ if (!VirtualProtect(&iat->u1.AddressOfData, 4, PAGE_EXECUTE_READWRITE,
+ &old_prot))
+ return false;
+ iat->u1.AddressOfData = new_function;
+ if (!VirtualProtect(&iat->u1.AddressOfData, 4, old_prot, &unused_prot))
+ return false; // Not clear if this failure bothers us.
+ return true;
}
} // namespace __interception
diff --git a/libsanitizer/interception/interception_win.h b/libsanitizer/interception/interception_win.h
index 6388209dcc1..f092d18d28f 100644
--- a/libsanitizer/interception/interception_win.h
+++ b/libsanitizer/interception/interception_win.h
@@ -32,6 +32,31 @@ bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
uptr InternalGetProcAddress(void *module, const char *func_name);
+// Overrides a function only when it is called from a specific DLL. For example,
+// this is used to override calls to HeapAlloc/HeapFree from ucrtbase without
+// affecting other third party libraries.
+bool OverrideImportedFunction(const char *module_to_patch,
+ const char *imported_module,
+ const char *function_name, uptr new_function,
+ uptr *orig_old_func);
+
+#if !SANITIZER_WINDOWS64
+// Exposed for unittests
+bool OverrideFunctionWithDetour(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+#endif
+
+// Exposed for unittests
+bool OverrideFunctionWithRedirectJump(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+bool OverrideFunctionWithHotPatch(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+bool OverrideFunctionWithTrampoline(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+
+// Exposed for unittests
+void TestOnlyReleaseTrampolineRegions();
+
} // namespace __interception
#if defined(INTERCEPTION_DYNAMIC_CRT)
@@ -48,5 +73,10 @@ uptr InternalGetProcAddress(void *module, const char *func_name);
#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
+#define INTERCEPT_FUNCTION_DLLIMPORT(user_dll, provider_dll, func) \
+ ::__interception::OverrideImportedFunction( \
+ user_dll, provider_dll, #func, (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
+
#endif // INTERCEPTION_WIN_H
#endif // _WIN32
diff --git a/libsanitizer/libbacktrace/Makefile.in b/libsanitizer/libbacktrace/Makefile.in
index ff37a81e4ec..3a618ecb838 100644
--- a/libsanitizer/libbacktrace/Makefile.in
+++ b/libsanitizer/libbacktrace/Makefile.in
@@ -211,6 +211,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
diff --git a/libsanitizer/lsan/Makefile.in b/libsanitizer/lsan/Makefile.in
index b02c3736a49..a0c8ae9b7c6 100644
--- a/libsanitizer/lsan/Makefile.in
+++ b/libsanitizer/lsan/Makefile.in
@@ -210,6 +210,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
diff --git a/libsanitizer/lsan/lsan.cc b/libsanitizer/lsan/lsan.cc
index 6e7429c95a5..2ded5544c71 100644
--- a/libsanitizer/lsan/lsan.cc
+++ b/libsanitizer/lsan/lsan.cc
@@ -41,6 +41,7 @@ static void InitializeFlags() {
cf.CopyFrom(*common_flags());
cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
cf.malloc_context_size = 30;
+ cf.intercept_tls_get_addr = true;
cf.detect_leaks = true;
cf.exitcode = 23;
OverrideCommonFlags(cf);
@@ -69,6 +70,7 @@ extern "C" void __lsan_init() {
lsan_init_is_running = true;
SanitizerToolName = "LeakSanitizer";
CacheBinaryName();
+ AvoidCVE_2016_2143();
InitializeFlags();
InitCommonLsan();
InitializeAllocator();
diff --git a/libsanitizer/lsan/lsan.h b/libsanitizer/lsan/lsan.h
index ee2fc02cf08..6d2d427b27d 100644
--- a/libsanitizer/lsan/lsan.h
+++ b/libsanitizer/lsan/lsan.h
@@ -22,8 +22,11 @@
stack_top = t->stack_end(); \
stack_bottom = t->stack_begin(); \
} \
- stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
- /* context */ 0, stack_top, stack_bottom, fast); \
+ if (!SANITIZER_MIPS || \
+ IsValidFrame(GET_CURRENT_FRAME(), stack_top, stack_bottom)) { \
+ stack.Unwind(max_size, StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
+ /* context */ 0, stack_top, stack_bottom, fast); \
+ } \
}
#define GET_STACK_TRACE_FATAL \
diff --git a/libsanitizer/lsan/lsan_allocator.cc b/libsanitizer/lsan/lsan_allocator.cc
index 22b5f7e1a4a..0d2fceac71b 100644
--- a/libsanitizer/lsan/lsan_allocator.cc
+++ b/libsanitizer/lsan/lsan_allocator.cc
@@ -41,10 +41,17 @@ typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE,
PrimaryAllocator;
#else
static const uptr kMaxAllowedMallocSize = 8UL << 30;
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
-typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
- sizeof(ChunkMetadata), DefaultSizeClassMap> PrimaryAllocator;
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x600000000000ULL;
+ static const uptr kSpaceSize = 0x40000000000ULL; // 4T.
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<> SecondaryAllocator;
@@ -97,11 +104,13 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
memset(p, 0, size);
RegisterAllocation(stack, p, size);
if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
+ RunMallocHooks(p, size);
return p;
}
void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
+ RunFreeHooks(p);
RegisterDeallocation(p);
allocator.Deallocate(&cache, p);
}
diff --git a/libsanitizer/lsan/lsan_common.cc b/libsanitizer/lsan/lsan_common.cc
index 6d674c5e437..41024e11873 100644
--- a/libsanitizer/lsan/lsan_common.cc
+++ b/libsanitizer/lsan/lsan_common.cc
@@ -21,6 +21,7 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#if CAN_SANITIZE_LEAKS
namespace __lsan {
@@ -29,8 +30,17 @@ namespace __lsan {
// also to protect the global list of root regions.
BlockingMutex global_mutex(LINKER_INITIALIZED);
+__attribute__((tls_model("initial-exec")))
THREADLOCAL int disable_counter;
bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (!disable_counter && common_flags()->detect_leaks) {
+ Report("Unmatched call to __lsan_enable().\n");
+ Die();
+ }
+ disable_counter--;
+}
Flags lsan_flags;
@@ -183,9 +193,10 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
LOG_THREADS("Processing thread %d.\n", os_id);
uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
+ DTLS *dtls;
bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
&tls_begin, &tls_end,
- &cache_begin, &cache_end);
+ &cache_begin, &cache_end, &dtls);
if (!thread_found) {
// If a thread can't be found in the thread registry, it's probably in the
// process of destruction. Log this event and move on.
@@ -209,9 +220,18 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
if (sp < stack_begin || sp >= stack_end) {
// SP is outside the recorded stack range (e.g. the thread is running a
- // signal handler on alternate stack). Again, consider the entire stack
- // range to be reachable.
+ // signal handler on alternate stack, or swapcontext was used).
+ // Again, consider the entire stack range to be reachable.
LOG_THREADS("WARNING: stack pointer not in stack range.\n");
+ uptr page_size = GetPageSizeCached();
+ int skipped = 0;
+ while (stack_begin < stack_end &&
+ !IsAccessibleMemoryRange(stack_begin, 1)) {
+ skipped++;
+ stack_begin += page_size;
+ }
+ LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
+ skipped, stack_begin, stack_end);
} else {
// Shrink the stack range to ignore out-of-scope values.
stack_begin = sp;
@@ -236,6 +256,17 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
if (tls_end > cache_end)
ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
}
+ if (dtls) {
+ for (uptr j = 0; j < dtls->dtv_size; ++j) {
+ uptr dtls_beg = dtls->dtv[j].beg;
+ uptr dtls_end = dtls_beg + dtls->dtv[j].size;
+ if (dtls_beg < dtls_end) {
+ LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
+ ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
+ kReachable);
+ }
+ }
+ }
}
}
}
@@ -414,6 +445,11 @@ static bool CheckForLeaks() {
if (!param.success) {
Report("LeakSanitizer has encountered a fatal error.\n");
+ Report(
+ "HINT: For debugging, try setting environment variable "
+ "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+ Report(
+ "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
Die();
}
param.leak_report.ApplySuppressions();
@@ -615,6 +651,13 @@ uptr LeakReport::UnsuppressedLeakCount() {
}
} // namespace __lsan
+#else // CAN_SANITIZE_LEAKS
+namespace __lsan {
+void InitCommonLsan() { }
+void DoLeakCheck() { }
+void DisableInThisThread() { }
+void EnableInThisThread() { }
+}
#endif // CAN_SANITIZE_LEAKS
using namespace __lsan; // NOLINT
@@ -680,18 +723,14 @@ void __lsan_unregister_root_region(const void *begin, uptr size) {
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_disable() {
#if CAN_SANITIZE_LEAKS
- __lsan::disable_counter++;
+ __lsan::DisableInThisThread();
#endif
}
SANITIZER_INTERFACE_ATTRIBUTE
void __lsan_enable() {
#if CAN_SANITIZE_LEAKS
- if (!__lsan::disable_counter && common_flags()->detect_leaks) {
- Report("Unmatched call to __lsan_enable().\n");
- Die();
- }
- __lsan::disable_counter--;
+ __lsan::EnableInThisThread();
#endif
}
diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h
index b415567e7f3..1091b84f108 100644
--- a/libsanitizer/lsan/lsan_common.h
+++ b/libsanitizer/lsan/lsan_common.h
@@ -29,6 +29,7 @@
namespace __sanitizer {
class FlagParser;
+struct DTLS;
}
namespace __lsan {
@@ -116,6 +117,16 @@ void InitCommonLsan();
void DoLeakCheck();
bool DisabledInThisThread();
+// Used to implement __lsan::ScopedDisabler.
+void DisableInThisThread();
+void EnableInThisThread();
+// Can be used to ignore memory allocated by an intercepted
+// function.
+struct ScopedInterceptorDisabler {
+ ScopedInterceptorDisabler() { DisableInThisThread(); }
+ ~ScopedInterceptorDisabler() { EnableInThisThread(); }
+};
+
// Special case for "new T[0]" where T is a type with DTOR.
// new T[0] will allocate one word for the array size (0) and store a pointer
// to the end of allocated chunk.
@@ -139,8 +150,8 @@ bool WordIsPoisoned(uptr addr);
void LockThreadRegistry();
void UnlockThreadRegistry();
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
- uptr *tls_begin, uptr *tls_end,
- uptr *cache_begin, uptr *cache_end);
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls);
void ForEachExtraStackRange(uptr os_id, RangeIteratorCallback callback,
void *arg);
// If called from the main thread, updates the main thread's TID in the thread
diff --git a/libsanitizer/lsan/lsan_common_linux.cc b/libsanitizer/lsan/lsan_common_linux.cc
index 0456dce890a..abbb61f07c9 100644
--- a/libsanitizer/lsan/lsan_common_linux.cc
+++ b/libsanitizer/lsan/lsan_common_linux.cc
@@ -24,9 +24,8 @@
namespace __lsan {
static const char kLinkerName[] = "ld";
-// We request 2 modules matching "ld", so we can print a warning if there's more
-// than one match. But only the first one is actually used.
-static char linker_placeholder[2 * sizeof(LoadedModule)] ALIGNED(64);
+
+static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
static LoadedModule *linker = nullptr;
static bool IsLinker(const char* full_name) {
@@ -34,20 +33,24 @@ static bool IsLinker(const char* full_name) {
}
void InitializePlatformSpecificModules() {
- internal_memset(linker_placeholder, 0, sizeof(linker_placeholder));
- uptr num_matches = GetListOfModules(
- reinterpret_cast<LoadedModule *>(linker_placeholder), 2, IsLinker);
- if (num_matches == 1) {
- linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
- return;
+ ListOfModules modules;
+ modules.init();
+ for (LoadedModule &module : modules) {
+ if (!IsLinker(module.full_name())) continue;
+ if (linker == nullptr) {
+ linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
+ *linker = module;
+ module = LoadedModule();
+ } else {
+ VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+ "TLS will not be handled correctly.\n", kLinkerName);
+ linker->clear();
+ linker = nullptr;
+ return;
+ }
}
- if (num_matches == 0)
- VReport(1, "LeakSanitizer: Dynamic linker not found. "
- "TLS will not be handled correctly.\n");
- else if (num_matches > 1)
- VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
- "TLS will not be handled correctly.\n", kLinkerName);
- linker = nullptr;
+ VReport(1, "LeakSanitizer: Dynamic linker not found. "
+ "TLS will not be handled correctly.\n");
}
static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
@@ -66,7 +69,7 @@ static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
if (begin <= allocator_begin && allocator_begin < end) {
CHECK_LE(allocator_begin, allocator_end);
- CHECK_LT(allocator_end, end);
+ CHECK_LE(allocator_end, end);
if (begin < allocator_begin)
ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
kReachable);
@@ -98,6 +101,7 @@ static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
struct ProcessPlatformAllocParam {
Frontier *frontier;
StackDepotReverseMap *stack_depot_reverse_map;
+ bool skip_linker_allocations;
};
// ForEachChunk callback. Identifies unreachable chunks which must be treated as
@@ -115,7 +119,8 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
// If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
// it as reachable, as we can't properly report its allocation stack anyway.
- if (caller_pc == 0 || linker->containsAddress(caller_pc)) {
+ if (caller_pc == 0 || (param->skip_linker_allocations &&
+ linker->containsAddress(caller_pc))) {
m.set_tag(kReachable);
param->frontier->push_back(chunk);
}
@@ -140,10 +145,12 @@ static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
// guaranteed to include all dynamic TLS blocks (and possibly other allocations
// which we don't care about).
void ProcessPlatformSpecificAllocations(Frontier *frontier) {
- if (!flags()->use_tls) return;
- if (!linker) return;
StackDepotReverseMap stack_depot_reverse_map;
- ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map};
+ ProcessPlatformAllocParam arg;
+ arg.frontier = frontier;
+ arg.stack_depot_reverse_map = &stack_depot_reverse_map;
+ arg.skip_linker_allocations =
+ flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
}
diff --git a/libsanitizer/lsan/lsan_flags.inc b/libsanitizer/lsan/lsan_flags.inc
index 73a980e1724..98611257494 100644
--- a/libsanitizer/lsan/lsan_flags.inc
+++ b/libsanitizer/lsan/lsan_flags.inc
@@ -32,6 +32,10 @@ LSAN_FLAG(bool, use_tls, true,
"Root set: include TLS and thread-specific storage")
LSAN_FLAG(bool, use_root_regions, true,
"Root set: include regions added via __lsan_register_root_region().")
+LSAN_FLAG(bool, use_ld_allocations, true,
+ "Root set: mark as reachable all allocations made from dynamic "
+ "linker. This was the old way to handle dynamic TLS, and will "
+ "be removed soon. Do not use this flag.")
LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
LSAN_FLAG(bool, use_poisoned, false,
diff --git a/libsanitizer/lsan/lsan_interceptors.cc b/libsanitizer/lsan/lsan_interceptors.cc
index 57581e855c4..160ed5979c4 100644
--- a/libsanitizer/lsan/lsan_interceptors.cc
+++ b/libsanitizer/lsan/lsan_interceptors.cc
@@ -18,8 +18,10 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_linux.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan.h"
#include "lsan_allocator.h"
+#include "lsan_common.h"
#include "lsan_thread.h"
using namespace __lsan;
@@ -102,6 +104,14 @@ INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
return 0;
}
+INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ void *res = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ DTLS_on_libc_memalign(res, size);
+ return res;
+}
+
INTERCEPTOR(void*, valloc, uptr size) {
ENSURE_LSAN_INITED;
GET_STACK_TRACE_MALLOC;
@@ -172,11 +182,6 @@ void operator delete[](void *ptr, std::nothrow_t const &) {
OPERATOR_DELETE_BODY;
}
-// We need this to intercept the __libc_memalign calls that are used to
-// allocate dynamic TLS space in ld-linux.so.
-INTERCEPTOR(void *, __libc_memalign, uptr align, uptr s)
- ALIAS(WRAPPER_NAME(memalign));
-
///// Thread initialization and finalization. /////
static unsigned g_thread_finalize_key;
@@ -235,7 +240,15 @@ INTERCEPTOR(int, pthread_create, void *th, void *attr,
p.callback = callback;
p.param = param;
atomic_store(&p.tid, 0, memory_order_relaxed);
- int res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
+ int res;
+ {
+ // Ignore all allocations made by pthread_create: thread stack/TLS may be
+ // stored by pthread for future reuse even after thread destruction, and
+ // the linked list it's stored in doesn't even hold valid pointers to the
+ // objects, the latter are calculated by obscure pointer arithmetic.
+ ScopedInterceptorDisabler disabler;
+ res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
+ }
if (res == 0) {
int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th, detached);
CHECK_NE(tid, 0);
diff --git a/libsanitizer/lsan/lsan_thread.cc b/libsanitizer/lsan/lsan_thread.cc
index 1313ea2dce1..af5ad47913f 100644
--- a/libsanitizer/lsan/lsan_thread.cc
+++ b/libsanitizer/lsan/lsan_thread.cc
@@ -15,6 +15,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "lsan_allocator.h"
namespace __lsan {
@@ -33,7 +34,7 @@ static const uptr kMaxThreads = 1 << 13;
static const uptr kThreadQuarantineSize = 64;
void InitializeThreadRegistry() {
- static char thread_registry_placeholder[sizeof(ThreadRegistry)] ALIGNED(64);
+ static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
thread_registry = new(thread_registry_placeholder)
ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
}
@@ -47,18 +48,20 @@ void SetCurrentThread(u32 tid) {
}
ThreadContext::ThreadContext(int tid)
- : ThreadContextBase(tid),
- stack_begin_(0),
- stack_end_(0),
- cache_begin_(0),
- cache_end_(0),
- tls_begin_(0),
- tls_end_(0) {}
+ : ThreadContextBase(tid),
+ stack_begin_(0),
+ stack_end_(0),
+ cache_begin_(0),
+ cache_end_(0),
+ tls_begin_(0),
+ tls_end_(0),
+ dtls_(nullptr) {}
struct OnStartedArgs {
uptr stack_begin, stack_end,
cache_begin, cache_end,
tls_begin, tls_end;
+ DTLS *dtls;
};
void ThreadContext::OnStarted(void *arg) {
@@ -69,10 +72,12 @@ void ThreadContext::OnStarted(void *arg) {
tls_end_ = args->tls_end;
cache_begin_ = args->cache_begin;
cache_end_ = args->cache_end;
+ dtls_ = args->dtls;
}
void ThreadContext::OnFinished() {
AllocatorThreadFinish();
+ DTLS_Destroy();
}
u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
@@ -89,6 +94,7 @@ void ThreadStart(u32 tid, uptr os_id) {
args.stack_end = args.stack_begin + stack_size;
args.tls_end = args.tls_begin + tls_size;
GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
+ args.dtls = DTLS_Get();
thread_registry->StartThread(tid, os_id, &args);
}
@@ -129,8 +135,8 @@ void EnsureMainThreadIDIsCorrect() {
///// Interface to the common LSan module. /////
bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
- uptr *tls_begin, uptr *tls_end,
- uptr *cache_begin, uptr *cache_end) {
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
ThreadContext *context = static_cast<ThreadContext *>(
thread_registry->FindThreadContextByOsIDLocked(os_id));
if (!context) return false;
@@ -140,6 +146,7 @@ bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
*tls_end = context->tls_end();
*cache_begin = context->cache_begin();
*cache_end = context->cache_end();
+ *dtls = context->dtls();
return true;
}
diff --git a/libsanitizer/lsan/lsan_thread.h b/libsanitizer/lsan/lsan_thread.h
index 70c3ff92616..dafd8af0a29 100644
--- a/libsanitizer/lsan/lsan_thread.h
+++ b/libsanitizer/lsan/lsan_thread.h
@@ -15,6 +15,10 @@
#include "sanitizer_common/sanitizer_thread_registry.h"
+namespace __sanitizer {
+struct DTLS;
+}
+
namespace __lsan {
class ThreadContext : public ThreadContextBase {
@@ -28,10 +32,13 @@ class ThreadContext : public ThreadContextBase {
uptr tls_end() { return tls_end_; }
uptr cache_begin() { return cache_begin_; }
uptr cache_end() { return cache_end_; }
+ DTLS *dtls() { return dtls_; }
+
private:
uptr stack_begin_, stack_end_,
cache_begin_, cache_end_,
tls_begin_, tls_end_;
+ DTLS *dtls_;
};
void InitializeThreadRegistry();
diff --git a/libsanitizer/merge.sh b/libsanitizer/merge.sh
index 29e444248f1..b3321027733 100755
--- a/libsanitizer/merge.sh
+++ b/libsanitizer/merge.sh
@@ -72,6 +72,10 @@ merge lib/sanitizer_common sanitizer_common
merge lib/interception interception
merge lib/ubsan ubsan
+# Need to merge lib/builtins/assembly.h file:
+mkdir -p builtins
+cp -v upstream/lib/builtins/assembly.h builtins/assembly.h
+
rm -rf upstream
# Update the MERGE file.
diff --git a/libsanitizer/sanitizer_common/Makefile.am b/libsanitizer/sanitizer_common/Makefile.am
index ee7a3f1dd2c..8b6336fd3ad 100644
--- a/libsanitizer/sanitizer_common/Makefile.am
+++ b/libsanitizer/sanitizer_common/Makefile.am
@@ -32,6 +32,7 @@ sanitizer_common_files = \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
+ sanitizer_linux_s390.cc \
sanitizer_mac.cc \
sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
@@ -55,6 +56,7 @@ sanitizer_common_files = \
sanitizer_symbolizer_libcdep.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
+ sanitizer_termination.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
sanitizer_unwind_linux_libcdep.cc \
@@ -62,6 +64,9 @@ sanitizer_common_files = \
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
+EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S
+libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
+libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
diff --git a/libsanitizer/sanitizer_common/Makefile.in b/libsanitizer/sanitizer_common/Makefile.in
index 4b008ad7ae6..052802e176a 100644
--- a/libsanitizer/sanitizer_common/Makefile.in
+++ b/libsanitizer/sanitizer_common/Makefile.in
@@ -79,7 +79,7 @@ CONFIG_HEADER = $(top_builddir)/config.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
-libsanitizer_common_la_LIBADD =
+am__DEPENDENCIES_1 =
am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_common_libcdep.lo sanitizer_coverage_libcdep.lo \
sanitizer_coverage_mapping_libcdep.lo \
@@ -87,8 +87,8 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_deadlock_detector2.lo sanitizer_flags.lo \
sanitizer_flag_parser.lo sanitizer_libc.lo \
sanitizer_libignore.lo sanitizer_linux.lo \
- sanitizer_linux_libcdep.lo sanitizer_mac.lo \
- sanitizer_persistent_allocator.lo \
+ sanitizer_linux_libcdep.lo sanitizer_linux_s390.lo \
+ sanitizer_mac.lo sanitizer_persistent_allocator.lo \
sanitizer_platform_limits_linux.lo \
sanitizer_platform_limits_posix.lo sanitizer_posix.lo \
sanitizer_posix_libcdep.lo sanitizer_printf.lo \
@@ -102,15 +102,20 @@ am__objects_1 = sanitizer_allocator.lo sanitizer_common.lo \
sanitizer_symbolizer_libbacktrace.lo \
sanitizer_symbolizer_libcdep.lo \
sanitizer_symbolizer_posix_libcdep.lo \
- sanitizer_symbolizer_win.lo sanitizer_thread_registry.lo \
- sanitizer_tls_get_addr.lo sanitizer_unwind_linux_libcdep.lo \
- sanitizer_win.lo
+ sanitizer_symbolizer_win.lo sanitizer_termination.lo \
+ sanitizer_thread_registry.lo sanitizer_tls_get_addr.lo \
+ sanitizer_unwind_linux_libcdep.lo sanitizer_win.lo
am_libsanitizer_common_la_OBJECTS = $(am__objects_1)
libsanitizer_common_la_OBJECTS = $(am_libsanitizer_common_la_OBJECTS)
DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)
depcomp = $(SHELL) $(top_srcdir)/../depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
+CPPASCOMPILE = $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
+LTCPPASCOMPILE = $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS)
CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
@@ -120,7 +125,17 @@ CXXLD = $(CXX)
CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
--mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \
$(LDFLAGS) -o $@
-SOURCES = $(libsanitizer_common_la_SOURCES)
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+ $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
+ $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+CCLD = $(CC)
+LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \
+ --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \
+ $(LDFLAGS) -o $@
+SOURCES = $(libsanitizer_common_la_SOURCES) \
+ $(EXTRA_libsanitizer_common_la_SOURCES)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
@@ -198,6 +213,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
@@ -296,6 +312,7 @@ sanitizer_common_files = \
sanitizer_libignore.cc \
sanitizer_linux.cc \
sanitizer_linux_libcdep.cc \
+ sanitizer_linux_s390.cc \
sanitizer_mac.cc \
sanitizer_persistent_allocator.cc \
sanitizer_platform_limits_linux.cc \
@@ -319,12 +336,16 @@ sanitizer_common_files = \
sanitizer_symbolizer_libcdep.cc \
sanitizer_symbolizer_posix_libcdep.cc \
sanitizer_symbolizer_win.cc \
+ sanitizer_termination.cc \
sanitizer_thread_registry.cc \
sanitizer_tls_get_addr.cc \
sanitizer_unwind_linux_libcdep.cc \
sanitizer_win.cc
libsanitizer_common_la_SOURCES = $(sanitizer_common_files)
+EXTRA_libsanitizer_common_la_SOURCES = sanitizer_linux_mips64.S sanitizer_linux_x86_64.S
+libsanitizer_common_la_LIBADD = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
+libsanitizer_common_la_DEPENDENCIES = $(SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS)
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
@@ -368,7 +389,7 @@ MAKEOVERRIDES =
all: all-am
.SUFFIXES:
-.SUFFIXES: .cc .lo .o .obj
+.SUFFIXES: .S .cc .lo .o .obj
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
@@ -430,6 +451,9 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_libignore.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_libcdep.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_mips64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_s390.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_linux_x86_64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_persistent_allocator.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_platform_limits_linux.Plo@am__quote@
@@ -453,11 +477,33 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_posix_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_symbolizer_win.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_termination.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_thread_registry.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_tls_get_addr.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_unwind_linux_libcdep.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sanitizer_win.Plo@am__quote@
+.S.o:
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ $<
+
+.S.obj:
+@am__fastdepCCAS_TRUE@ $(CPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(CPPASCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.S.lo:
+@am__fastdepCCAS_TRUE@ $(LTCPPASCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCCAS_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCCAS_FALSE@ DEPDIR=$(DEPDIR) $(CCASDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCCAS_FALSE@ $(LTCPPASCOMPILE) -c -o $@ $<
+
.cc.o:
@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCXX_TRUE@ $(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.cc b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
index b5e0bab91f1..2755853acbc 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.cc
@@ -11,39 +11,72 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
+
#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
// ThreadSanitizer for Go uses libc malloc/free.
-#if defined(SANITIZER_GO) || defined(SANITIZER_USE_MALLOC)
+#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
# if SANITIZER_LINUX && !SANITIZER_ANDROID
extern "C" void *__libc_malloc(uptr size);
+# if !SANITIZER_GO
+extern "C" void *__libc_memalign(uptr alignment, uptr size);
+# endif
+extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" void __libc_free(void *ptr);
-# define LIBC_MALLOC __libc_malloc
-# define LIBC_FREE __libc_free
# else
# include <stdlib.h>
-# define LIBC_MALLOC malloc
-# define LIBC_FREE free
+# define __libc_malloc malloc
+# if !SANITIZER_GO
+static void *__libc_memalign(uptr alignment, uptr size) {
+ void *p;
+ uptr error = posix_memalign(&p, alignment, size);
+ if (error) return nullptr;
+ return p;
+}
+# endif
+# define __libc_realloc realloc
+# define __libc_free free
# endif
-static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
+ uptr alignment) {
+ (void)cache;
+#if !SANITIZER_GO
+ if (alignment == 0)
+ return __libc_malloc(size);
+ else
+ return __libc_memalign(alignment, size);
+#else
+ // Windows does not provide __libc_memalign/posix_memalign. It provides
+ // __aligned_malloc, but the allocated blocks can't be passed to free,
+ // they need to be passed to __aligned_free. InternalAlloc interface does
+ // not account for such requirement. Alignemnt does not seem to be used
+ // anywhere in runtime, so just call __libc_malloc for now.
+ DCHECK_EQ(alignment, 0);
+ return __libc_malloc(size);
+#endif
+}
+
+static void *RawInternalRealloc(void *ptr, uptr size,
+ InternalAllocatorCache *cache) {
(void)cache;
- return LIBC_MALLOC(size);
+ return __libc_realloc(ptr, size);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
(void)cache;
- LIBC_FREE(ptr);
+ __libc_free(ptr);
}
InternalAllocator *internal_allocator() {
return 0;
}
-#else // SANITIZER_GO
+#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
static atomic_uint8_t internal_allocator_initialized;
@@ -66,13 +99,26 @@ InternalAllocator *internal_allocator() {
return internal_allocator_instance;
}
-static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache) {
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
+ uptr alignment) {
+ if (alignment == 0) alignment = 8;
+ if (cache == 0) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Allocate(&internal_allocator_cache, size,
+ alignment, false);
+ }
+ return internal_allocator()->Allocate(cache, size, alignment, false);
+}
+
+static void *RawInternalRealloc(void *ptr, uptr size,
+ InternalAllocatorCache *cache) {
+ uptr alignment = 8;
if (cache == 0) {
SpinMutexLock l(&internal_allocator_cache_mu);
- return internal_allocator()->Allocate(&internal_allocator_cache, size, 8,
- false);
+ return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
+ size, alignment);
}
- return internal_allocator()->Allocate(cache, size, 8, false);
+ return internal_allocator()->Reallocate(cache, ptr, size, alignment);
}
static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
@@ -83,20 +129,42 @@ static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
internal_allocator()->Deallocate(cache, ptr);
}
-#endif // SANITIZER_GO
+#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
-void *InternalAlloc(uptr size, InternalAllocatorCache *cache) {
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
if (size + sizeof(u64) < size)
return nullptr;
- void *p = RawInternalAlloc(size + sizeof(u64), cache);
+ void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
if (!p)
return nullptr;
((u64*)p)[0] = kBlockMagic;
return (char*)p + sizeof(u64);
}
+void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
+ if (!addr)
+ return InternalAlloc(size, cache);
+ if (size + sizeof(u64) < size)
+ return nullptr;
+ addr = (char*)addr - sizeof(u64);
+ size = size + sizeof(u64);
+ CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
+ void *p = RawInternalRealloc(addr, size, cache);
+ if (!p)
+ return nullptr;
+ return (char*)p + sizeof(u64);
+}
+
+void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
+ if (CallocShouldReturnNullDueToOverflow(count, size))
+ return internal_allocator()->ReturnNullOrDieOnBadRequest();
+ void *p = InternalAlloc(count * size, cache);
+ if (p) internal_memset(p, 0, count * size);
+ return p;
+}
+
void InternalFree(void *addr, InternalAllocatorCache *cache) {
if (!addr)
return;
@@ -138,7 +206,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
-void NORETURN ReportAllocatorCannotReturnNull() {
+static atomic_uint8_t reporting_out_of_memory = {0};
+
+bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
+
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
+ if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h
index d98528c722c..ba50acddbbf 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h
@@ -18,271 +18,16 @@
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
#include "sanitizer_lfstack.h"
+#include "sanitizer_procmaps.h"
namespace __sanitizer {
-// Prints error message and kills the program.
-void NORETURN ReportAllocatorCannotReturnNull();
-
-// SizeClassMap maps allocation sizes into size classes and back.
-// Class 0 corresponds to size 0.
-// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
-// Next 4 classes: 256 + i * 64 (i = 1 to 4).
-// Next 4 classes: 512 + i * 128 (i = 1 to 4).
-// ...
-// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
-// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
-//
-// This structure of the size class map gives us:
-// - Efficient table-free class-to-size and size-to-class functions.
-// - Difference between two consequent size classes is betweed 14% and 25%
-//
-// This class also gives a hint to a thread-caching allocator about the amount
-// of chunks that need to be cached per-thread:
-// - kMaxNumCached is the maximal number of chunks per size class.
-// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
-//
-// Part of output of SizeClassMap::Print():
-// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
-// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
-// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
-// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
-// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
-// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
-// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
-// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
-//
-// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
-// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
-// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
-// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
-// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
-// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
-// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
-// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
-//
-// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
-// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
-// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
-// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
-//
-// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
-// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
-// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
-// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
-//
-// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
-// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
-// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
-// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
-//
-// ...
-//
-// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
-// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
-// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
-// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
-//
-// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
-
-template <uptr kMaxSizeLog, uptr kMaxNumCachedT, uptr kMaxBytesCachedLog>
-class SizeClassMap {
- static const uptr kMinSizeLog = 4;
- static const uptr kMidSizeLog = kMinSizeLog + 4;
- static const uptr kMinSize = 1 << kMinSizeLog;
- static const uptr kMidSize = 1 << kMidSizeLog;
- static const uptr kMidClass = kMidSize / kMinSize;
- static const uptr S = 2;
- static const uptr M = (1 << S) - 1;
-
- public:
- static const uptr kMaxNumCached = kMaxNumCachedT;
- // We transfer chunks between central and thread-local free lists in batches.
- // For small size classes we allocate batches separately.
- // For large size classes we use one of the chunks to store the batch.
- struct TransferBatch {
- TransferBatch *next;
- uptr count;
- void *batch[kMaxNumCached];
- };
-
- static const uptr kMaxSize = 1UL << kMaxSizeLog;
- static const uptr kNumClasses =
- kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
- COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
- static const uptr kNumClassesRounded =
- kNumClasses == 32 ? 32 :
- kNumClasses <= 64 ? 64 :
- kNumClasses <= 128 ? 128 : 256;
-
- static uptr Size(uptr class_id) {
- if (class_id <= kMidClass)
- return kMinSize * class_id;
- class_id -= kMidClass;
- uptr t = kMidSize << (class_id >> S);
- return t + (t >> S) * (class_id & M);
- }
-
- static uptr ClassID(uptr size) {
- if (size <= kMidSize)
- return (size + kMinSize - 1) >> kMinSizeLog;
- if (size > kMaxSize) return 0;
- uptr l = MostSignificantSetBitIndex(size);
- uptr hbits = (size >> (l - S)) & M;
- uptr lbits = size & ((1 << (l - S)) - 1);
- uptr l1 = l - kMidSizeLog;
- return kMidClass + (l1 << S) + hbits + (lbits > 0);
- }
-
- static uptr MaxCached(uptr class_id) {
- if (class_id == 0) return 0;
- uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
- return Max<uptr>(1, Min(kMaxNumCached, n));
- }
-
- static void Print() {
- uptr prev_s = 0;
- uptr total_cached = 0;
- for (uptr i = 0; i < kNumClasses; i++) {
- uptr s = Size(i);
- if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
- Printf("\n");
- uptr d = s - prev_s;
- uptr p = prev_s ? (d * 100 / prev_s) : 0;
- uptr l = s ? MostSignificantSetBitIndex(s) : 0;
- uptr cached = MaxCached(i) * s;
- Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
- "cached: %zd %zd; id %zd\n",
- i, Size(i), d, p, l, MaxCached(i), cached, ClassID(s));
- total_cached += cached;
- prev_s = s;
- }
- Printf("Total cached: %zd\n", total_cached);
- }
-
- static bool SizeClassRequiresSeparateTransferBatch(uptr class_id) {
- return Size(class_id) < sizeof(TransferBatch) -
- sizeof(uptr) * (kMaxNumCached - MaxCached(class_id));
- }
-
- static void Validate() {
- for (uptr c = 1; c < kNumClasses; c++) {
- // Printf("Validate: c%zd\n", c);
- uptr s = Size(c);
- CHECK_NE(s, 0U);
- CHECK_EQ(ClassID(s), c);
- if (c != kNumClasses - 1)
- CHECK_EQ(ClassID(s + 1), c + 1);
- CHECK_EQ(ClassID(s - 1), c);
- if (c)
- CHECK_GT(Size(c), Size(c-1));
- }
- CHECK_EQ(ClassID(kMaxSize + 1), 0);
-
- for (uptr s = 1; s <= kMaxSize; s++) {
- uptr c = ClassID(s);
- // Printf("s%zd => c%zd\n", s, c);
- CHECK_LT(c, kNumClasses);
- CHECK_GE(Size(c), s);
- if (c > 0)
- CHECK_LT(Size(c-1), s);
- }
- }
-};
+// Returns true if ReportAllocatorCannotReturnNull(true) was called.
+// Can be use to avoid memory hungry operations.
+bool IsReportingOOM();
-typedef SizeClassMap<17, 128, 16> DefaultSizeClassMap;
-typedef SizeClassMap<17, 64, 14> CompactSizeClassMap;
-template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
-
-// Memory allocator statistics
-enum AllocatorStat {
- AllocatorStatAllocated,
- AllocatorStatMapped,
- AllocatorStatCount
-};
-
-typedef uptr AllocatorStatCounters[AllocatorStatCount];
-
-// Per-thread stats, live in per-thread cache.
-class AllocatorStats {
- public:
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- }
- void InitLinkerInitialized() {}
-
- void Add(AllocatorStat i, uptr v) {
- v += atomic_load(&stats_[i], memory_order_relaxed);
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- void Sub(AllocatorStat i, uptr v) {
- v = atomic_load(&stats_[i], memory_order_relaxed) - v;
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- void Set(AllocatorStat i, uptr v) {
- atomic_store(&stats_[i], v, memory_order_relaxed);
- }
-
- uptr Get(AllocatorStat i) const {
- return atomic_load(&stats_[i], memory_order_relaxed);
- }
-
- private:
- friend class AllocatorGlobalStats;
- AllocatorStats *next_;
- AllocatorStats *prev_;
- atomic_uintptr_t stats_[AllocatorStatCount];
-};
-
-// Global stats, used for aggregation and querying.
-class AllocatorGlobalStats : public AllocatorStats {
- public:
- void InitLinkerInitialized() {
- next_ = this;
- prev_ = this;
- }
- void Init() {
- internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized();
- }
-
- void Register(AllocatorStats *s) {
- SpinMutexLock l(&mu_);
- s->next_ = next_;
- s->prev_ = this;
- next_->prev_ = s;
- next_ = s;
- }
-
- void Unregister(AllocatorStats *s) {
- SpinMutexLock l(&mu_);
- s->prev_->next_ = s->next_;
- s->next_->prev_ = s->prev_;
- for (int i = 0; i < AllocatorStatCount; i++)
- Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
- }
-
- void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
- SpinMutexLock l(&mu_);
- const AllocatorStats *stats = this;
- for (;;) {
- for (int i = 0; i < AllocatorStatCount; i++)
- s[i] += stats->Get(AllocatorStat(i));
- stats = stats->next_;
- if (stats == this)
- break;
- }
- // All stats must be non-negative.
- for (int i = 0; i < AllocatorStatCount; i++)
- s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
- }
-
- private:
- mutable SpinMutex mu_;
-};
+// Prints error message and kills the program.
+void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
@@ -293,1161 +38,18 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
-// SizeClassAllocator64 -- allocator for 64-bit address space.
-//
-// Space: a portion of address space of kSpaceSize bytes starting at
-// a fixed address (kSpaceBeg). Both constants are powers of two and
-// kSpaceBeg is kSpaceSize-aligned.
-// At the beginning the entire space is mprotect-ed, then small parts of it
-// are mapped on demand.
-//
-// Region: a part of Space dedicated to a single size class.
-// There are kNumClasses Regions of equal size.
-//
-// UserChunk: a piece of memory returned to user.
-// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
-//
-// A Region looks like this:
-// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
-template <const uptr kSpaceBeg, const uptr kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class SizeClassAllocator64 {
- public:
- typedef typename SizeClassMap::TransferBatch Batch;
- typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, MapUnmapCallback> ThisT;
- typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
-
- void Init() {
- CHECK_EQ(kSpaceBeg,
- reinterpret_cast<uptr>(MmapNoAccess(kSpaceBeg, kSpaceSize)));
- MapWithCallback(kSpaceEnd, AdditionalSize());
- }
-
- void MapWithCallback(uptr beg, uptr size) {
- CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
- MapUnmapCallback().OnMap(beg, size);
- }
-
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
- }
-
- static bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- Batch *b = region->free_list.Pop();
- if (!b)
- b = PopulateFreeList(stat, c, class_id, region);
- region->n_allocated += b->count;
- return b;
- }
-
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
- RegionInfo *region = GetRegionInfo(class_id);
- CHECK_GT(b->count, 0);
- region->free_list.Push(b);
- region->n_freed += b->count;
- }
-
- static bool PointerIsMine(const void *p) {
- return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
- }
-
- static uptr GetSizeClass(const void *p) {
- return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClassesRounded;
- }
-
- void *GetBlockBegin(const void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- if (!size) return nullptr;
- uptr chunk_idx = GetChunkIdx((uptr)p, size);
- uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
- uptr beg = chunk_idx * size;
- uptr next_beg = beg + size;
- if (class_id >= kNumClasses) return nullptr;
- RegionInfo *region = GetRegionInfo(class_id);
- if (region->mapped_user >= next_beg)
- return reinterpret_cast<void*>(reg_beg + beg);
- return nullptr;
- }
-
- static uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- void *GetMetaData(const void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
- return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
- (1 + chunk_idx) * kMetadataSize);
- }
-
- uptr TotalMemoryUsed() {
- uptr res = 0;
- for (uptr i = 0; i < kNumClasses; i++)
- res += GetRegionInfo(i)->allocated_user;
- return res;
- }
-
- // Test-only.
- void TestOnlyUnmap() {
- UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
- }
-
- void PrintStats() {
- uptr total_mapped = 0;
- uptr n_allocated = 0;
- uptr n_freed = 0;
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- total_mapped += region->mapped_user;
- n_allocated += region->n_allocated;
- n_freed += region->n_freed;
- }
- Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
- "remains %zd\n",
- total_mapped >> 20, n_allocated, n_allocated - n_freed);
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- if (region->mapped_user == 0) continue;
- Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
- class_id,
- SizeClassMap::Size(class_id),
- region->mapped_user >> 10,
- region->n_allocated,
- region->n_allocated - region->n_freed);
- }
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- for (uptr i = 0; i < kNumClasses; i++) {
- GetRegionInfo(i)->mutex.Lock();
- }
- }
-
- void ForceUnlock() {
- for (int i = (int)kNumClasses - 1; i >= 0; i--) {
- GetRegionInfo(i)->mutex.Unlock();
- }
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
- RegionInfo *region = GetRegionInfo(class_id);
- uptr chunk_size = SizeClassMap::Size(class_id);
- uptr region_beg = kSpaceBeg + class_id * kRegionSize;
- for (uptr chunk = region_beg;
- chunk < region_beg + region->allocated_user;
- chunk += chunk_size) {
- // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
- callback(chunk, arg);
- }
- }
- }
-
- static uptr AdditionalSize() {
- return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
- GetPageSizeCached());
- }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
- static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
-
- private:
- static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
- static const uptr kSpaceEnd = kSpaceBeg + kSpaceSize;
- COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
- // kRegionSize must be >= 2^32.
- COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
- // Populate the free list with at most this number of bytes at once
- // or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 14;
- // Call mmap for user memory with at least this size.
- static const uptr kUserMapSize = 1 << 16;
- // Call mmap for metadata memory with at least this size.
- static const uptr kMetaMapSize = 1 << 16;
-
- struct RegionInfo {
- BlockingMutex mutex;
- LFStack<Batch> free_list;
- uptr allocated_user; // Bytes allocated for user memory.
- uptr allocated_meta; // Bytes allocated for metadata.
- uptr mapped_user; // Bytes mapped for user memory.
- uptr mapped_meta; // Bytes mapped for metadata.
- uptr n_allocated, n_freed; // Just stats.
- };
- COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
-
- RegionInfo *GetRegionInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
- return &regions[class_id];
- }
-
- static uptr GetChunkIdx(uptr chunk, uptr size) {
- uptr offset = chunk % kRegionSize;
- // Here we divide by a non-constant. This is costly.
- // size always fits into 32-bits. If the offset fits too, use 32-bit div.
- if (offset >> (SANITIZER_WORDSIZE / 2))
- return offset / size;
- return (u32)offset / (u32)size;
- }
-
- NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id, RegionInfo *region) {
- BlockingMutexLock l(&region->mutex);
- Batch *b = region->free_list.Pop();
- if (b)
- return b;
- uptr size = SizeClassMap::Size(class_id);
- uptr count = size < kPopulateSize ? SizeClassMap::MaxCached(class_id) : 1;
- uptr beg_idx = region->allocated_user;
- uptr end_idx = beg_idx + count * size;
- uptr region_beg = kSpaceBeg + kRegionSize * class_id;
- if (end_idx + size > region->mapped_user) {
- // Do the mmap for the user memory.
- uptr map_size = kUserMapSize;
- while (end_idx + size > region->mapped_user + map_size)
- map_size += kUserMapSize;
- CHECK_GE(region->mapped_user + map_size, end_idx);
- MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- region->mapped_user += map_size;
- }
- uptr total_count = (region->mapped_user - beg_idx - size)
- / size / count * count;
- region->allocated_meta += total_count * kMetadataSize;
- if (region->allocated_meta > region->mapped_meta) {
- uptr map_size = kMetaMapSize;
- while (region->allocated_meta > region->mapped_meta + map_size)
- map_size += kMetaMapSize;
- // Do the mmap for the metadata.
- CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
- MapWithCallback(region_beg + kRegionSize -
- region->mapped_meta - map_size, map_size);
- region->mapped_meta += map_size;
- }
- CHECK_LE(region->allocated_meta, region->mapped_meta);
- if (region->mapped_user + region->mapped_meta > kRegionSize) {
- Printf("%s: Out of memory. Dying. ", SanitizerToolName);
- Printf("The process has exhausted %zuMB for size class %zu.\n",
- kRegionSize / 1024 / 1024, size);
- Die();
- }
- for (;;) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)(region_beg + beg_idx);
- b->count = count;
- for (uptr i = 0; i < count; i++)
- b->batch[i] = (void*)(region_beg + beg_idx + i * size);
- region->allocated_user += count * size;
- CHECK_LE(region->allocated_user, region->mapped_user);
- beg_idx += count * size;
- if (beg_idx + count * size + size > region->mapped_user)
- break;
- CHECK_GT(b->count, 0);
- region->free_list.Push(b);
- }
- return b;
- }
-};
-
-// Maps integers in rage [0, kSize) to u8 values.
-template<u64 kSize>
-class FlatByteMap {
- public:
- void TestOnlyInit() {
- internal_memset(map_, 0, sizeof(map_));
- }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize);
- CHECK_EQ(0U, map_[idx]);
- map_[idx] = val;
- }
- u8 operator[] (uptr idx) {
- CHECK_LT(idx, kSize);
- // FIXME: CHECK may be too expensive here.
- return map_[idx];
- }
- private:
- u8 map_[kSize];
-};
-
-// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
-// It is implemented as a two-dimensional array: array of kSize1 pointers
-// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
-// Each value is initially zero and can be set to something else only once.
-// Setting and getting values from multiple threads is safe w/o extra locking.
-template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
-class TwoLevelByteMap {
- public:
- void TestOnlyInit() {
- internal_memset(map1_, 0, sizeof(map1_));
- mu_.Init();
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kSize1; i++) {
- u8 *p = Get(i);
- if (!p) continue;
- MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
- UnmapOrDie(p, kSize2);
- }
- }
-
- uptr size() const { return kSize1 * kSize2; }
- uptr size1() const { return kSize1; }
- uptr size2() const { return kSize2; }
-
- void set(uptr idx, u8 val) {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = GetOrCreate(idx / kSize2);
- CHECK_EQ(0U, map2[idx % kSize2]);
- map2[idx % kSize2] = val;
- }
-
- u8 operator[] (uptr idx) const {
- CHECK_LT(idx, kSize1 * kSize2);
- u8 *map2 = Get(idx / kSize2);
- if (!map2) return 0;
- return map2[idx % kSize2];
- }
-
- private:
- u8 *Get(uptr idx) const {
- CHECK_LT(idx, kSize1);
- return reinterpret_cast<u8 *>(
- atomic_load(&map1_[idx], memory_order_acquire));
- }
-
- u8 *GetOrCreate(uptr idx) {
- u8 *res = Get(idx);
- if (!res) {
- SpinMutexLock l(&mu_);
- if (!(res = Get(idx))) {
- res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
- MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
- atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
- memory_order_release);
- }
- }
- return res;
- }
-
- atomic_uintptr_t map1_[kSize1];
- StaticSpinMutex mu_;
-};
-
-// SizeClassAllocator32 -- allocator for 32-bit address space.
-// This allocator can theoretically be used on 64-bit arch, but there it is less
-// efficient than SizeClassAllocator64.
-//
-// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
-// be returned by MmapOrDie().
-//
-// Region:
-// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
-// Since the regions are aligned by kRegionSize, there are exactly
-// kNumPossibleRegions possible regions in the address space and so we keep
-// a ByteMap possible_regions to store the size classes of each Region.
-// 0 size class means the region is not used by the allocator.
-//
-// One Region is used to allocate chunks of a single size class.
-// A Region looks like this:
-// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
-//
-// In order to avoid false sharing the objects of this class should be
-// chache-line aligned.
-template <const uptr kSpaceBeg, const u64 kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap,
- const uptr kRegionSizeLog,
- class ByteMap,
- class MapUnmapCallback = NoOpMapUnmapCallback>
-class SizeClassAllocator32 {
- public:
- typedef typename SizeClassMap::TransferBatch Batch;
- typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
- SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
- typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
-
- void Init() {
- possible_regions.TestOnlyInit();
- internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
- }
-
- void *MapWithCallback(uptr size) {
- size = RoundUpTo(size, GetPageSizeCached());
- void *res = MmapOrDie(size, "SizeClassAllocator32");
- MapUnmapCallback().OnMap((uptr)res, size);
- return res;
- }
-
- void UnmapWithCallback(uptr beg, uptr size) {
- MapUnmapCallback().OnUnmap(beg, size);
- UnmapOrDie(reinterpret_cast<void *>(beg), size);
- }
-
- static bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- void *GetMetaData(const void *p) {
- CHECK(PointerIsMine(p));
- uptr mem = reinterpret_cast<uptr>(p);
- uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
- u32 offset = mem - beg;
- uptr n = offset / (u32)size; // 32-bit division
- uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
- return reinterpret_cast<void*>(meta);
- }
-
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- if (sci->free_list.empty())
- PopulateFreeList(stat, c, sci, class_id);
- CHECK(!sci->free_list.empty());
- Batch *b = sci->free_list.front();
- sci->free_list.pop_front();
- return b;
- }
-
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
- CHECK_LT(class_id, kNumClasses);
- SizeClassInfo *sci = GetSizeClassInfo(class_id);
- SpinMutexLock l(&sci->mutex);
- CHECK_GT(b->count, 0);
- sci->free_list.push_front(b);
- }
-
- bool PointerIsMine(const void *p) {
- return GetSizeClass(p) != 0;
- }
-
- uptr GetSizeClass(const void *p) {
- return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
- }
-
- void *GetBlockBegin(const void *p) {
- CHECK(PointerIsMine(p));
- uptr mem = reinterpret_cast<uptr>(p);
- uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
- u32 offset = mem - beg;
- u32 n = offset / (u32)size; // 32-bit division
- uptr res = beg + (n * (u32)size);
- return reinterpret_cast<void*>(res);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- uptr TotalMemoryUsed() {
- // No need to lock here.
- uptr res = 0;
- for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (possible_regions[i])
- res += kRegionSize;
- return res;
- }
-
- void TestOnlyUnmap() {
- for (uptr i = 0; i < kNumPossibleRegions; i++)
- if (possible_regions[i])
- UnmapWithCallback((i * kRegionSize), kRegionSize);
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- for (uptr i = 0; i < kNumClasses; i++) {
- GetSizeClassInfo(i)->mutex.Lock();
- }
- }
-
- void ForceUnlock() {
- for (int i = kNumClasses - 1; i >= 0; i--) {
- GetSizeClassInfo(i)->mutex.Unlock();
- }
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr region = 0; region < kNumPossibleRegions; region++)
- if (possible_regions[region]) {
- uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
- uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
- uptr region_beg = region * kRegionSize;
- for (uptr chunk = region_beg;
- chunk < region_beg + max_chunks_in_region * chunk_size;
- chunk += chunk_size) {
- // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
- callback(chunk, arg);
- }
- }
- }
-
- void PrintStats() {
- }
-
- static uptr AdditionalSize() {
- return 0;
- }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses;
-
- private:
- static const uptr kRegionSize = 1 << kRegionSizeLog;
- static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
-
- struct SizeClassInfo {
- SpinMutex mutex;
- IntrusiveList<Batch> free_list;
- char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
- };
- COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
-
- uptr ComputeRegionId(uptr mem) {
- uptr res = mem >> kRegionSizeLog;
- CHECK_LT(res, kNumPossibleRegions);
- return res;
- }
-
- uptr ComputeRegionBeg(uptr mem) {
- return mem & ~(kRegionSize - 1);
- }
-
- uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
- "SizeClassAllocator32"));
- MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMapped, kRegionSize);
- CHECK_EQ(0U, (res & (kRegionSize - 1)));
- possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
- return res;
- }
-
- SizeClassInfo *GetSizeClassInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- return &size_class_info_array[class_id];
- }
-
- void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- SizeClassInfo *sci, uptr class_id) {
- uptr size = SizeClassMap::Size(class_id);
- uptr reg = AllocateRegion(stat, class_id);
- uptr n_chunks = kRegionSize / (size + kMetadataSize);
- uptr max_count = SizeClassMap::MaxCached(class_id);
- Batch *b = nullptr;
- for (uptr i = reg; i < reg + n_chunks * size; i += size) {
- if (!b) {
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)c->Allocate(this, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)i;
- b->count = 0;
- }
- b->batch[b->count++] = (void*)i;
- if (b->count == max_count) {
- CHECK_GT(b->count, 0);
- sci->free_list.push_back(b);
- b = nullptr;
- }
- }
- if (b) {
- CHECK_GT(b->count, 0);
- sci->free_list.push_back(b);
- }
- }
-
- ByteMap possible_regions;
- SizeClassInfo size_class_info_array[kNumClasses];
-};
-
-// Objects of this type should be used as local caches for SizeClassAllocator64
-// or SizeClassAllocator32. Since the typical use of this class is to have one
-// object per thread in TLS, is has to be POD.
-template<class SizeClassAllocator>
-struct SizeClassAllocatorLocalCache {
- typedef SizeClassAllocator Allocator;
- static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
-
- void Init(AllocatorGlobalStats *s) {
- stats_.Init();
- if (s)
- s->Register(&stats_);
- }
-
- void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
- Drain(allocator);
- if (s)
- s->Unregister(&stats_);
- }
-
- void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
- PerClass *c = &per_class_[class_id];
- if (UNLIKELY(c->count == 0))
- Refill(allocator, class_id);
- void *res = c->batch[--c->count];
- PREFETCH(c->batch[c->count - 1]);
- return res;
- }
-
- void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
- CHECK_NE(class_id, 0UL);
- CHECK_LT(class_id, kNumClasses);
- // If the first allocator call on a new thread is a deallocation, then
- // max_count will be zero, leading to check failure.
- InitCache();
- stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
- PerClass *c = &per_class_[class_id];
- CHECK_NE(c->max_count, 0UL);
- if (UNLIKELY(c->count == c->max_count))
- Drain(allocator, class_id);
- c->batch[c->count++] = p;
- }
-
- void Drain(SizeClassAllocator *allocator) {
- for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
- PerClass *c = &per_class_[class_id];
- while (c->count > 0)
- Drain(allocator, class_id);
- }
- }
-
- // private:
- typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
- typedef typename SizeClassMap::TransferBatch Batch;
- struct PerClass {
- uptr count;
- uptr max_count;
- void *batch[2 * SizeClassMap::kMaxNumCached];
- };
- PerClass per_class_[kNumClasses];
- AllocatorStats stats_;
-
- void InitCache() {
- if (per_class_[1].max_count)
- return;
- for (uptr i = 0; i < kNumClasses; i++) {
- PerClass *c = &per_class_[i];
- c->max_count = 2 * SizeClassMap::MaxCached(i);
- }
- }
-
- NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
- InitCache();
- PerClass *c = &per_class_[class_id];
- Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
- CHECK_GT(b->count, 0);
- for (uptr i = 0; i < b->count; i++)
- c->batch[i] = b->batch[i];
- c->count = b->count;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- Deallocate(allocator, SizeClassMap::ClassID(sizeof(Batch)), b);
- }
-
- NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
- InitCache();
- PerClass *c = &per_class_[class_id];
- Batch *b;
- if (SizeClassMap::SizeClassRequiresSeparateTransferBatch(class_id))
- b = (Batch*)Allocate(allocator, SizeClassMap::ClassID(sizeof(Batch)));
- else
- b = (Batch*)c->batch[0];
- uptr cnt = Min(c->max_count / 2, c->count);
- for (uptr i = 0; i < cnt; i++) {
- b->batch[i] = c->batch[i];
- c->batch[i] = c->batch[i + c->max_count / 2];
- }
- b->count = cnt;
- c->count -= cnt;
- CHECK_GT(b->count, 0);
- allocator->DeallocateBatch(&stats_, class_id, b);
- }
-};
-
-// This class can (de)allocate only large chunks of memory using mmap/unmap.
-// The main purpose of this allocator is to cover large and rare allocation
-// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
-class LargeMmapAllocator {
- public:
- void InitLinkerInitialized(bool may_return_null) {
- page_size_ = GetPageSizeCached();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
-
- void Init(bool may_return_null) {
- internal_memset(this, 0, sizeof(*this));
- InitLinkerInitialized(may_return_null);
- }
-
- void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
- CHECK(IsPowerOfTwo(alignment));
- uptr map_size = RoundUpMapSize(size);
- if (alignment > page_size_)
- map_size += alignment;
- // Overflow.
- if (map_size < size)
- return ReturnNullOrDie();
- uptr map_beg = reinterpret_cast<uptr>(
- MmapOrDie(map_size, "LargeMmapAllocator"));
- CHECK(IsAligned(map_beg, page_size_));
- MapUnmapCallback().OnMap(map_beg, map_size);
- uptr map_end = map_beg + map_size;
- uptr res = map_beg + page_size_;
- if (res & (alignment - 1)) // Align.
- res += alignment - (res & (alignment - 1));
- CHECK(IsAligned(res, alignment));
- CHECK(IsAligned(res, page_size_));
- CHECK_GE(res + size, map_beg);
- CHECK_LE(res + size, map_end);
- Header *h = GetHeader(res);
- h->size = size;
- h->map_beg = map_beg;
- h->map_size = map_size;
- uptr size_log = MostSignificantSetBitIndex(map_size);
- CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
- {
- SpinMutexLock l(&mutex_);
- uptr idx = n_chunks_++;
- chunks_sorted_ = false;
- CHECK_LT(idx, kMaxNumChunks);
- h->chunk_idx = idx;
- chunks_[idx] = h;
- stats.n_allocs++;
- stats.currently_allocated += map_size;
- stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
- stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatAllocated, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- }
- return reinterpret_cast<void*>(res);
- }
-
- void *ReturnNullOrDie() {
- if (atomic_load(&may_return_null_, memory_order_acquire))
- return nullptr;
- ReportAllocatorCannotReturnNull();
- }
-
- void SetMayReturnNull(bool may_return_null) {
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- void Deallocate(AllocatorStats *stat, void *p) {
- Header *h = GetHeader(p);
- {
- SpinMutexLock l(&mutex_);
- uptr idx = h->chunk_idx;
- CHECK_EQ(chunks_[idx], h);
- CHECK_LT(idx, n_chunks_);
- chunks_[idx] = chunks_[n_chunks_ - 1];
- chunks_[idx]->chunk_idx = idx;
- n_chunks_--;
- chunks_sorted_ = false;
- stats.n_frees++;
- stats.currently_allocated -= h->map_size;
- stat->Sub(AllocatorStatAllocated, h->map_size);
- stat->Sub(AllocatorStatMapped, h->map_size);
- }
- MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
- UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
- }
-
- uptr TotalMemoryUsed() {
- SpinMutexLock l(&mutex_);
- uptr res = 0;
- for (uptr i = 0; i < n_chunks_; i++) {
- Header *h = chunks_[i];
- CHECK_EQ(h->chunk_idx, i);
- res += RoundUpMapSize(h->size);
- }
- return res;
- }
-
- bool PointerIsMine(const void *p) {
- return GetBlockBegin(p) != nullptr;
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- return RoundUpTo(GetHeader(p)->size, page_size_);
- }
-
- // At least page_size_/2 metadata bytes is available.
- void *GetMetaData(const void *p) {
- // Too slow: CHECK_EQ(p, GetBlockBegin(p));
- if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
- Printf("%s: bad pointer %p\n", SanitizerToolName, p);
- CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
- }
- return GetHeader(p) + 1;
- }
-
- void *GetBlockBegin(const void *ptr) {
- uptr p = reinterpret_cast<uptr>(ptr);
- SpinMutexLock l(&mutex_);
- uptr nearest_chunk = 0;
- // Cache-friendly linear search.
- for (uptr i = 0; i < n_chunks_; i++) {
- uptr ch = reinterpret_cast<uptr>(chunks_[i]);
- if (p < ch) continue; // p is at left to this chunk, skip it.
- if (p - ch < p - nearest_chunk)
- nearest_chunk = ch;
- }
- if (!nearest_chunk)
- return nullptr;
- Header *h = reinterpret_cast<Header *>(nearest_chunk);
- CHECK_GE(nearest_chunk, h->map_beg);
- CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
- CHECK_LE(nearest_chunk, p);
- if (h->map_beg + h->map_size <= p)
- return nullptr;
- return GetUser(h);
- }
-
- // This function does the same as GetBlockBegin, but is much faster.
- // Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *ptr) {
- mutex_.CheckLocked();
- uptr p = reinterpret_cast<uptr>(ptr);
- uptr n = n_chunks_;
- if (!n) return nullptr;
- if (!chunks_sorted_) {
- // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
- SortArray(reinterpret_cast<uptr*>(chunks_), n);
- for (uptr i = 0; i < n; i++)
- chunks_[i]->chunk_idx = i;
- chunks_sorted_ = true;
- min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
- max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
- chunks_[n - 1]->map_size;
- }
- if (p < min_mmap_ || p >= max_mmap_)
- return nullptr;
- uptr beg = 0, end = n - 1;
- // This loop is a log(n) lower_bound. It does not check for the exact match
- // to avoid expensive cache-thrashing loads.
- while (end - beg >= 2) {
- uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
- if (p < reinterpret_cast<uptr>(chunks_[mid]))
- end = mid - 1; // We are not interested in chunks_[mid].
- else
- beg = mid; // chunks_[mid] may still be what we want.
- }
-
- if (beg < end) {
- CHECK_EQ(beg + 1, end);
- // There are 2 chunks left, choose one.
- if (p >= reinterpret_cast<uptr>(chunks_[end]))
- beg = end;
- }
-
- Header *h = chunks_[beg];
- if (h->map_beg + h->map_size <= p || p < h->map_beg)
- return nullptr;
- return GetUser(h);
- }
-
- void PrintStats() {
- Printf("Stats: LargeMmapAllocator: allocated %zd times, "
- "remains %zd (%zd K) max %zd M; by size logs: ",
- stats.n_allocs, stats.n_allocs - stats.n_frees,
- stats.currently_allocated >> 10, stats.max_allocated >> 20);
- for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
- uptr c = stats.by_size_log[i];
- if (!c) continue;
- Printf("%zd:%zd; ", i, c);
- }
- Printf("\n");
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- mutex_.Lock();
- }
-
- void ForceUnlock() {
- mutex_.Unlock();
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- for (uptr i = 0; i < n_chunks_; i++)
- callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
- }
-
- private:
- static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
- struct Header {
- uptr map_beg;
- uptr map_size;
- uptr size;
- uptr chunk_idx;
- };
-
- Header *GetHeader(uptr p) {
- CHECK(IsAligned(p, page_size_));
- return reinterpret_cast<Header*>(p - page_size_);
- }
- Header *GetHeader(const void *p) {
- return GetHeader(reinterpret_cast<uptr>(p));
- }
-
- void *GetUser(Header *h) {
- CHECK(IsAligned((uptr)h, page_size_));
- return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
- }
-
- uptr RoundUpMapSize(uptr size) {
- return RoundUpTo(size, page_size_) + page_size_;
- }
-
- uptr page_size_;
- Header *chunks_[kMaxNumChunks];
- uptr n_chunks_;
- uptr min_mmap_, max_mmap_;
- bool chunks_sorted_;
- struct Stats {
- uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
- } stats;
- atomic_uint8_t may_return_null_;
- SpinMutex mutex_;
-};
-
-// This class implements a complete memory allocator by using two
-// internal allocators:
-// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
-// When allocating 2^x bytes it should return 2^x aligned chunk.
-// PrimaryAllocator is used via a local AllocatorCache.
-// SecondaryAllocator can allocate anything, but is not efficient.
-template <class PrimaryAllocator, class AllocatorCache,
- class SecondaryAllocator> // NOLINT
-class CombinedAllocator {
- public:
- void InitCommon(bool may_return_null) {
- primary_.Init();
- atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
- }
-
- void InitLinkerInitialized(bool may_return_null) {
- secondary_.InitLinkerInitialized(may_return_null);
- stats_.InitLinkerInitialized();
- InitCommon(may_return_null);
- }
-
- void Init(bool may_return_null) {
- secondary_.Init(may_return_null);
- stats_.Init();
- InitCommon(may_return_null);
- }
-
- void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
- bool cleared = false, bool check_rss_limit = false) {
- // Returning 0 on malloc(0) may break a lot of code.
- if (size == 0)
- size = 1;
- if (size + alignment < size)
- return ReturnNullOrDie();
- if (check_rss_limit && RssLimitIsExceeded())
- return ReturnNullOrDie();
- if (alignment > 8)
- size = RoundUpTo(size, alignment);
- void *res;
- bool from_primary = primary_.CanAllocate(size, alignment);
- if (from_primary)
- res = cache->Allocate(&primary_, primary_.ClassID(size));
- else
- res = secondary_.Allocate(&stats_, size, alignment);
- if (alignment > 8)
- CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
- if (cleared && res && from_primary)
- internal_bzero_aligned16(res, RoundUpTo(size, 16));
- return res;
- }
-
- bool MayReturnNull() const {
- return atomic_load(&may_return_null_, memory_order_acquire);
- }
-
- void *ReturnNullOrDie() {
- if (MayReturnNull())
- return nullptr;
- ReportAllocatorCannotReturnNull();
- }
-
- void SetMayReturnNull(bool may_return_null) {
- secondary_.SetMayReturnNull(may_return_null);
- atomic_store(&may_return_null_, may_return_null, memory_order_release);
- }
-
- bool RssLimitIsExceeded() {
- return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
- }
-
- void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
- atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
- memory_order_release);
- }
-
- void Deallocate(AllocatorCache *cache, void *p) {
- if (!p) return;
- if (primary_.PointerIsMine(p))
- cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
- else
- secondary_.Deallocate(&stats_, p);
- }
-
- void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
- uptr alignment) {
- if (!p)
- return Allocate(cache, new_size, alignment);
- if (!new_size) {
- Deallocate(cache, p);
- return nullptr;
- }
- CHECK(PointerIsMine(p));
- uptr old_size = GetActuallyAllocatedSize(p);
- uptr memcpy_size = Min(new_size, old_size);
- void *new_p = Allocate(cache, new_size, alignment);
- if (new_p)
- internal_memcpy(new_p, p, memcpy_size);
- Deallocate(cache, p);
- return new_p;
- }
-
- bool PointerIsMine(void *p) {
- if (primary_.PointerIsMine(p))
- return true;
- return secondary_.PointerIsMine(p);
- }
-
- bool FromPrimary(void *p) {
- return primary_.PointerIsMine(p);
- }
-
- void *GetMetaData(const void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetMetaData(p);
- return secondary_.GetMetaData(p);
- }
-
- void *GetBlockBegin(const void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetBlockBegin(p);
- return secondary_.GetBlockBegin(p);
- }
-
- // This function does the same as GetBlockBegin, but is much faster.
- // Must be called with the allocator locked.
- void *GetBlockBeginFastLocked(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetBlockBegin(p);
- return secondary_.GetBlockBeginFastLocked(p);
- }
-
- uptr GetActuallyAllocatedSize(void *p) {
- if (primary_.PointerIsMine(p))
- return primary_.GetActuallyAllocatedSize(p);
- return secondary_.GetActuallyAllocatedSize(p);
- }
-
- uptr TotalMemoryUsed() {
- return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
- }
-
- void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
-
- void InitCache(AllocatorCache *cache) {
- cache->Init(&stats_);
- }
-
- void DestroyCache(AllocatorCache *cache) {
- cache->Destroy(&primary_, &stats_);
- }
-
- void SwallowCache(AllocatorCache *cache) {
- cache->Drain(&primary_);
- }
-
- void GetStats(AllocatorStatCounters s) const {
- stats_.Get(s);
- }
-
- void PrintStats() {
- primary_.PrintStats();
- secondary_.PrintStats();
- }
-
- // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
- // introspection API.
- void ForceLock() {
- primary_.ForceLock();
- secondary_.ForceLock();
- }
-
- void ForceUnlock() {
- secondary_.ForceUnlock();
- primary_.ForceUnlock();
- }
-
- // Iterate over all existing chunks.
- // The allocator must be locked when calling this function.
- void ForEachChunk(ForEachChunkCallback callback, void *arg) {
- primary_.ForEachChunk(callback, arg);
- secondary_.ForEachChunk(callback, arg);
- }
-
- private:
- PrimaryAllocator primary_;
- SecondaryAllocator secondary_;
- AllocatorGlobalStats stats_;
- atomic_uint8_t may_return_null_;
- atomic_uint8_t rss_limit_is_exceeded_;
-};
-
// Returns true if calloc(size, n) should return 0 due to overflow in size*n.
bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n);
+#include "sanitizer_allocator_size_class_map.h"
+#include "sanitizer_allocator_stats.h"
+#include "sanitizer_allocator_primary64.h"
+#include "sanitizer_allocator_bytemap.h"
+#include "sanitizer_allocator_primary32.h"
+#include "sanitizer_allocator_local_cache.h"
+#include "sanitizer_allocator_secondary.h"
+#include "sanitizer_allocator_combined.h"
+
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h b/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h
new file mode 100644
index 00000000000..5e768ce9ef9
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_bytemap.h
@@ -0,0 +1,100 @@
+//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Maps integers in rage [0, kSize) to u8 values.
+template<u64 kSize>
+class FlatByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize);
+ CHECK_EQ(0U, map_[idx]);
+ map_[idx] = val;
+ }
+ u8 operator[] (uptr idx) {
+ CHECK_LT(idx, kSize);
+ // FIXME: CHECK may be too expensive here.
+ return map_[idx];
+ }
+ private:
+ u8 map_[kSize];
+};
+
+// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <u64 kSize1, u64 kSize2, class MapUnmapCallback = NoOpMapUnmapCallback>
+class TwoLevelByteMap {
+ public:
+ void TestOnlyInit() {
+ internal_memset(map1_, 0, sizeof(map1_));
+ mu_.Init();
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ u8 *p = Get(i);
+ if (!p) continue;
+ MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
+ UnmapOrDie(p, kSize2);
+ }
+ }
+
+ uptr size() const { return kSize1 * kSize2; }
+ uptr size1() const { return kSize1; }
+ uptr size2() const { return kSize2; }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = GetOrCreate(idx / kSize2);
+ CHECK_EQ(0U, map2[idx % kSize2]);
+ map2[idx % kSize2] = val;
+ }
+
+ u8 operator[] (uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = Get(idx / kSize2);
+ if (!map2) return 0;
+ return map2[idx % kSize2];
+ }
+
+ private:
+ u8 *Get(uptr idx) const {
+ CHECK_LT(idx, kSize1);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ u8 *GetOrCreate(uptr idx) {
+ u8 *res = Get(idx);
+ if (!res) {
+ SpinMutexLock l(&mu_);
+ if (!(res = Get(idx))) {
+ res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
+ MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ }
+ return res;
+ }
+
+ atomic_uintptr_t map1_[kSize1];
+ StaticSpinMutex mu_;
+};
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h b/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h
new file mode 100644
index 00000000000..4dc9ca7401f
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h
@@ -0,0 +1,209 @@
+//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator, class AllocatorCache,
+ class SecondaryAllocator> // NOLINT
+class CombinedAllocator {
+ public:
+ void InitCommon(bool may_return_null) {
+ primary_.Init();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void InitLinkerInitialized(bool may_return_null) {
+ secondary_.InitLinkerInitialized(may_return_null);
+ stats_.InitLinkerInitialized();
+ InitCommon(may_return_null);
+ }
+
+ void Init(bool may_return_null) {
+ secondary_.Init(may_return_null);
+ stats_.Init();
+ InitCommon(may_return_null);
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
+ bool cleared = false, bool check_rss_limit = false) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0)
+ size = 1;
+ if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
+ if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ void *res;
+ bool from_primary = primary_.CanAllocate(size, alignment);
+ if (from_primary)
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(&stats_, size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ if (cleared && res && from_primary)
+ internal_bzero_aligned16(res, RoundUpTo(size, 16));
+ return res;
+ }
+
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull())
+ return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ secondary_.SetMayReturnNull(may_return_null);
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ bool RssLimitIsExceeded() {
+ return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
+ }
+
+ void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
+ atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
+ memory_order_release);
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(&stats_, p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return nullptr;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ void *GetBlockBegin(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBegin(p);
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void InitCache(AllocatorCache *cache) {
+ cache->Init(&stats_);
+ }
+
+ void DestroyCache(AllocatorCache *cache) {
+ cache->Destroy(&primary_, &stats_);
+ }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ void GetStats(AllocatorStatCounters s) const {
+ stats_.Get(s);
+ }
+
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ primary_.ForceLock();
+ secondary_.ForceLock();
+ }
+
+ void ForceUnlock() {
+ secondary_.ForceUnlock();
+ primary_.ForceUnlock();
+ }
+
+ void ReleaseToOS() { primary_.ReleaseToOS(); }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ primary_.ForEachChunk(callback, arg);
+ secondary_.ForEachChunk(callback, arg);
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+ AllocatorGlobalStats stats_;
+ atomic_uint8_t may_return_null_;
+ atomic_uint8_t rss_limit_is_exceeded_;
+};
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h b/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h
index fd5bed30c4b..166f8c22049 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_interface.h
@@ -27,10 +27,18 @@ SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(
+ void (*malloc_hook)(const void *, uptr),
+ void (*free_hook)(const void *));
+
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __sanitizer_malloc_hook(void *ptr, uptr size);
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
/* OPTIONAL */ void __sanitizer_free_hook(void *ptr);
+
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_print_memory_profile(int top_percent);
} // extern "C"
#endif // SANITIZER_ALLOCATOR_INTERFACE_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
index 99d8516d8cf..6a8da8f3584 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_internal.h
@@ -43,7 +43,12 @@ typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
LargeMmapAllocator<> > InternalAllocator;
-void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr);
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
+ uptr alignment = 0);
+void *InternalRealloc(void *p, uptr size,
+ InternalAllocatorCache *cache = nullptr);
+void *InternalCalloc(uptr countr, uptr size,
+ InternalAllocatorCache *cache = nullptr);
void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
InternalAllocator *internal_allocator();
@@ -54,8 +59,8 @@ enum InternalAllocEnum {
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
- InternalAllocEnum) {
- return InternalAlloc(size);
+ __sanitizer::InternalAllocEnum) {
+ return __sanitizer::InternalAlloc(size);
}
#endif // SANITIZER_ALLOCATOR_INTERNAL_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h b/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h
new file mode 100644
index 00000000000..40ef0781adb
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -0,0 +1,246 @@
+//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Objects of this type should be used as local caches for SizeClassAllocator64
+// or SizeClassAllocator32. Since the typical use of this class is to have one
+// object per thread in TLS, is has to be POD.
+template<class SizeClassAllocator>
+struct SizeClassAllocatorLocalCache
+ : SizeClassAllocator::AllocatorCache {
+};
+
+// Cache used by SizeClassAllocator64.
+template <class SizeClassAllocator>
+struct SizeClassAllocator64LocalCache {
+ typedef SizeClassAllocator Allocator;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(c, allocator, class_id);
+ CHECK_GT(c->count, 0);
+ CompactPtrT chunk = c->chunks[--c->count];
+ void *res = reinterpret_cast<void *>(allocator->CompactPtrToPointer(
+ allocator->GetRegionBeginBySizeClass(class_id), chunk));
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(c, allocator, class_id, c->max_count / 2);
+ CompactPtrT chunk = allocator->PointerToCompactPtr(
+ allocator->GetRegionBeginBySizeClass(class_id),
+ reinterpret_cast<uptr>(p));
+ c->chunks[c->count++] = chunk;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(c, allocator, class_id, c->count);
+ }
+ }
+
+ // private:
+ struct PerClass {
+ u32 count;
+ u32 max_count;
+ CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[1].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * SizeClassMap::MaxCachedHint(i);
+ }
+ }
+
+ NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ InitCache();
+ uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);
+ allocator->GetFromAllocator(&stats_, class_id, c->chunks,
+ num_requested_chunks);
+ c->count = num_requested_chunks;
+ }
+
+ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
+ uptr count) {
+ InitCache();
+ CHECK_GE(c->count, count);
+ uptr first_idx_to_drain = c->count - count;
+ c->count -= count;
+ allocator->ReturnToAllocator(&stats_, class_id,
+ &c->chunks[first_idx_to_drain], count);
+ }
+};
+
+// Cache used by SizeClassAllocator32.
+template <class SizeClassAllocator>
+struct SizeClassAllocator32LocalCache {
+ typedef SizeClassAllocator Allocator;
+ typedef typename Allocator::TransferBatch TransferBatch;
+ static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0))
+ Refill(allocator, class_id);
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ InitCache();
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
+ PerClass *c = &per_class_[class_id];
+ CHECK_NE(c->max_count, 0UL);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(allocator, class_id);
+ c->batch[c->count++] = p;
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++) {
+ PerClass *c = &per_class_[class_id];
+ while (c->count > 0)
+ Drain(allocator, class_id);
+ }
+ }
+
+ // private:
+ typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ void *batch[2 * TransferBatch::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache() {
+ if (per_class_[1].max_count)
+ return;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ c->max_count = 2 * TransferBatch::MaxCached(i);
+ }
+ }
+
+ // TransferBatch class is declared in SizeClassAllocator.
+ // We transfer chunks between central and thread-local free lists in batches.
+ // For small size classes we allocate batches separately.
+ // For large size classes we may use one of the chunks to store the batch.
+ // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
+ static uptr SizeClassForTransferBatch(uptr class_id) {
+ if (Allocator::ClassIdToSize(class_id) <
+ TransferBatch::AllocationSizeRequiredForNElements(
+ TransferBatch::MaxCached(class_id)))
+ return SizeClassMap::ClassID(sizeof(TransferBatch));
+ return 0;
+ }
+
+ // Returns a TransferBatch suitable for class_id.
+ // For small size classes allocates the batch from the allocator.
+ // For large size classes simply returns b.
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
+ return b;
+ }
+
+ // Destroys TransferBatch b.
+ // For small size classes deallocates b to the allocator.
+ // Does notthing for large size classes.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ Deallocate(allocator, batch_class_id, b);
+ }
+
+ NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ CHECK_GT(b->Count(), 0);
+ b->CopyToArray(c->batch);
+ c->count = b->Count();
+ DestroyBatch(class_id, allocator, b);
+ }
+
+ NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {
+ InitCache();
+ PerClass *c = &per_class_[class_id];
+ uptr cnt = Min(c->max_count / 2, c->count);
+ uptr first_idx_to_drain = c->count - cnt;
+ TransferBatch *b = CreateBatch(
+ class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+ b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),
+ &c->batch[first_idx_to_drain], cnt);
+ c->count -= cnt;
+ allocator->DeallocateBatch(&stats_, class_id, b);
+ }
+};
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h b/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h
new file mode 100644
index 00000000000..989b87a1923
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_primary32.h
@@ -0,0 +1,302 @@
+//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// a ByteMap possible_regions to store the size classes of each Region.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+template <const uptr kSpaceBeg, const u64 kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap,
+ const uptr kRegionSizeLog,
+ class ByteMap,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class SizeClassAllocator32 {
+ public:
+ struct TransferBatch {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
+ void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
+ count_ = count;
+ CHECK_LE(count_, kMaxNumCached);
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = batch[i];
+ }
+ uptr Count() const { return count_; }
+ void Clear() { count_ = 0; }
+ void Add(void *ptr) {
+ batch_[count_++] = ptr;
+ CHECK_LE(count_, kMaxNumCached);
+ }
+ void CopyToArray(void *to_batch[]) {
+ for (uptr i = 0, n = Count(); i < n; i++)
+ to_batch[i] = batch_[i];
+ }
+
+ // How much memory do we need for a batch containing n elements.
+ static uptr AllocationSizeRequiredForNElements(uptr n) {
+ return sizeof(uptr) * 2 + sizeof(void *) * n;
+ }
+ static uptr MaxCached(uptr class_id) {
+ return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
+ }
+
+ TransferBatch *next;
+
+ private:
+ uptr count_;
+ void *batch_[kMaxNumCached];
+ };
+
+ static const uptr kBatchSize = sizeof(TransferBatch);
+ COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+ COMPILER_CHECK(sizeof(TransferBatch) ==
+ SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
+ SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
+ typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
+
+ void Init() {
+ possible_regions.TestOnlyInit();
+ internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
+ }
+
+ void *MapWithCallback(uptr size) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ void *res = MmapOrDie(size, "SizeClassAllocator32");
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *GetMetaData(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ if (sci->free_list.empty())
+ PopulateFreeList(stat, c, sci, class_id);
+ CHECK(!sci->free_list.empty());
+ TransferBatch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
+ }
+
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
+ TransferBatch *b) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_front(b);
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
+
+ bool PointerIsMine(const void *p) {
+ uptr mem = reinterpret_cast<uptr>(p);
+ if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
+ return false;
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetSizeClassInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = kNumClasses - 1; i >= 0; i--) {
+ GetSizeClassInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr region = 0; region < kNumPossibleRegions; region++)
+ if (possible_regions[region]) {
+ uptr chunk_size = ClassIdToSize(possible_regions[region]);
+ uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
+ uptr region_beg = region * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + max_chunks_in_region * chunk_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ void PrintStats() {
+ }
+
+ static uptr AdditionalSize() {
+ return 0;
+ }
+
+ // This is empty here. Currently only implemented in 64-bit allocator.
+ void ReleaseToOS() { }
+
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct SizeClassInfo {
+ SpinMutex mutex;
+ IntrusiveList<TransferBatch> free_list;
+ char padding[kCacheLineSize - sizeof(uptr) -
+ sizeof(IntrusiveList<TransferBatch>)];
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
+
+ uptr ComputeRegionId(uptr mem) {
+ uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ return &size_class_info_array[class_id];
+ }
+
+ void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ SizeClassInfo *sci, uptr class_id) {
+ uptr size = ClassIdToSize(class_id);
+ uptr reg = AllocateRegion(stat, class_id);
+ uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ uptr max_count = TransferBatch::MaxCached(class_id);
+ TransferBatch *b = nullptr;
+ for (uptr i = reg; i < reg + n_chunks * size; i += size) {
+ if (!b) {
+ b = c->CreateBatch(class_id, this, (TransferBatch*)i);
+ b->Clear();
+ }
+ b->Add((void*)i);
+ if (b->Count() == max_count) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ b = nullptr;
+ }
+ }
+ if (b) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ }
+ }
+
+ ByteMap possible_regions;
+ SizeClassInfo size_class_info_array[kNumClasses];
+};
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h b/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h
new file mode 100644
index 00000000000..620639afbfd
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_primary64.h
@@ -0,0 +1,503 @@
+//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
+
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+// The template parameter Params is a class containing the actual parameters.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
+// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
+// Otherwise SpaceBeg=kSpaceBeg (fixed address).
+// kSpaceSize is a power of two.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+
+// FreeArray is an array free-d chunks (stored as 4-byte offsets)
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
+
+struct SizeClassAllocator64FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ };
+};
+
+template <class Params>
+class SizeClassAllocator64 {
+ public:
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const uptr kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+
+ static const bool kRandomShuffleChunks =
+ Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+
+ typedef SizeClassAllocator64<Params> ThisT;
+ typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
+
+ // When we know the size class (the region base) we can represent a pointer
+ // as a 4-byte integer (offset from the region start shifted right by 4).
+ typedef u32 CompactPtrT;
+ static const uptr kCompactPtrScale = 4;
+ CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) {
+ return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
+ }
+ uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) {
+ return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
+ }
+
+ void Init() {
+ uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
+ if (kUsingConstantSpaceBeg) {
+ CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
+ MmapFixedNoAccess(kSpaceBeg, TotalSpaceSize)));
+ } else {
+ NonConstSpaceBeg =
+ reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
+ CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ }
+ MapWithCallback(SpaceEnd(), AdditionalSize());
+ }
+
+ void MapWithCallback(uptr beg, uptr size) {
+ CHECK_EQ(beg, reinterpret_cast<uptr>(MmapFixedOrDie(beg, size)));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ const CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ uptr old_num_chunks = region->num_freed_chunks;
+ uptr new_num_freed_chunks = old_num_chunks + n_chunks;
+ EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks);
+ for (uptr i = 0; i < n_chunks; i++)
+ free_array[old_num_chunks + i] = chunks[i];
+ region->num_freed_chunks = new_num_freed_chunks;
+ region->n_freed += n_chunks;
+ }
+
+ NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,
+ CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
+ PopulateFreeArray(stat, class_id, region,
+ n_chunks - region->num_freed_chunks);
+ CHECK_GE(region->num_freed_chunks, n_chunks);
+ }
+ region->num_freed_chunks -= n_chunks;
+ uptr base_idx = region->num_freed_chunks;
+ for (uptr i = 0; i < n_chunks; i++)
+ chunks[i] = free_array[base_idx + i];
+ region->n_allocated += n_chunks;
+ }
+
+
+ bool PointerIsMine(const void *p) {
+ uptr P = reinterpret_cast<uptr>(p);
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return P / kSpaceSize == kSpaceBeg / kSpaceSize;
+ return P >= SpaceBeg() && P < SpaceEnd();
+ }
+
+ uptr GetRegionBegin(const void *p) {
+ if (kUsingConstantSpaceBeg)
+ return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
+ uptr space_beg = SpaceBeg();
+ return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
+ space_beg;
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) {
+ return SpaceBeg() + kRegionSize * class_id;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
+ return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
+ kNumClassesRounded;
+ }
+
+ void *GetBlockBegin(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ if (!size) return nullptr;
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = GetRegionBegin(p);
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ if (class_id >= kNumClasses) return nullptr;
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapWithCallback(SpaceBeg(), kSpaceSize + AdditionalSize());
+ }
+
+ static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
+ uptr stats_size) {
+ for (uptr class_id = 0; class_id < stats_size; class_id++)
+ if (stats[class_id] == start)
+ stats[class_id] = rss;
+ }
+
+ void PrintStats(uptr class_id, uptr rss) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) return;
+ uptr in_use = region->n_allocated - region->n_freed;
+ uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
+ Printf(
+ " %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd "
+ "num_freed_chunks %zd"
+ " avail: %zd rss: %zdK releases: %zd\n",
+ class_id, ClassIdToSize(class_id), region->mapped_user >> 10,
+ region->n_allocated, region->n_freed, in_use,
+ region->num_freed_chunks, avail_chunks, rss >> 10,
+ region->rtoi.num_releases);
+ }
+
+ void PrintStats() {
+ uptr total_mapped = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ total_mapped += region->mapped_user;
+ n_allocated += region->n_allocated;
+ n_freed += region->n_freed;
+ }
+ Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
+ "remains %zd\n",
+ total_mapped >> 20, n_allocated, n_allocated - n_freed);
+ uptr rss_stats[kNumClasses];
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++)
+ rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
+ GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++)
+ PrintStats(class_id, rss_stats[class_id]);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetRegionInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = (int)kNumClasses - 1; i >= 0; i--) {
+ GetRegionInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr chunk_size = ClassIdToSize(class_id);
+ uptr region_beg = SpaceBeg() + class_id * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + region->allocated_user;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
+ void ReleaseToOS() {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++)
+ ReleaseToOS(class_id);
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+
+ private:
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
+ // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+ // elements, but in reality this will not happen. For simplicity we
+ // dedicate 1/8 of the region's virtual space to FreeArray.
+ static const uptr kFreeArraySize = kRegionSize / 8;
+
+ static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
+ uptr NonConstSpaceBeg;
+ uptr SpaceBeg() const {
+ return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
+ }
+ uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize must be <= 2^36, see CompactPtrT.
+ COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 16;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
+ // Call mmap for free array memory with at least this size.
+ static const uptr kFreeArrayMapSize = 1 << 16;
+ // Granularity of ReleaseToOs (aka madvise).
+ static const uptr kReleaseToOsGranularity = 1 << 12;
+
+ struct ReleaseToOsInfo {
+ uptr n_freed_at_last_release;
+ uptr num_releases;
+ };
+
+ struct RegionInfo {
+ BlockingMutex mutex;
+ uptr num_freed_chunks; // Number of elements in the freearray.
+ uptr mapped_free_array; // Bytes mapped for freearray.
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
+ uptr n_allocated, n_freed; // Just stats.
+ ReleaseToOsInfo rtoi;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
+
+ u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
+ return (*state = *state * 1103515245 + 12345) >> 16;
+ }
+
+ u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
+
+ void RandomShuffle(u32 *a, u32 n, u32 *rand_state) {
+ if (n <= 1) return;
+ for (u32 i = n - 1; i > 0; i--)
+ Swap(a[i], a[RandN(rand_state, i + 1)]);
+ }
+
+ RegionInfo *GetRegionInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions =
+ reinterpret_cast<RegionInfo *>(SpaceBeg() + kSpaceSize);
+ return &regions[class_id];
+ }
+
+ uptr GetMetadataEnd(uptr region_beg) {
+ return region_beg + kRegionSize - kFreeArraySize;
+ }
+
+ uptr GetChunkIdx(uptr chunk, uptr size) {
+ if (!kUsingConstantSpaceBeg)
+ chunk -= SpaceBeg();
+
+ uptr offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // size always fits into 32-bits. If the offset fits too, use 32-bit div.
+ if (offset >> (SANITIZER_WORDSIZE / 2))
+ return offset / size;
+ return (u32)offset / (u32)size;
+ }
+
+ CompactPtrT *GetFreeArray(uptr region_beg) {
+ return reinterpret_cast<CompactPtrT *>(region_beg + kRegionSize -
+ kFreeArraySize);
+ }
+
+ void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
+ uptr num_freed_chunks) {
+ uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
+ if (region->mapped_free_array < needed_space) {
+ CHECK_LE(needed_space, kFreeArraySize);
+ uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
+ uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
+ region->mapped_free_array;
+ uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
+ MapWithCallback(current_map_end, new_map_size);
+ region->mapped_free_array = new_mapped_free_array;
+ }
+ }
+
+
+ NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id,
+ RegionInfo *region, uptr requested_count) {
+ // region->mutex is held.
+ uptr size = ClassIdToSize(class_id);
+ uptr beg_idx = region->allocated_user;
+ uptr end_idx = beg_idx + requested_count * size;
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ if (end_idx > region->mapped_user) {
+ if (!kUsingConstantSpaceBeg && region->mapped_user == 0)
+ region->rand_state = static_cast<u32>(region_beg >> 12); // From ASLR.
+ // Do the mmap for the user memory.
+ uptr map_size = kUserMapSize;
+ while (end_idx > region->mapped_user + map_size)
+ map_size += kUserMapSize;
+ CHECK_GE(region->mapped_user + map_size, end_idx);
+ MapWithCallback(region_beg + region->mapped_user, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ region->mapped_user += map_size;
+ }
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ uptr total_count = (region->mapped_user - beg_idx) / size;
+ uptr num_freed_chunks = region->num_freed_chunks;
+ EnsureFreeArraySpace(region, region_beg, num_freed_chunks + total_count);
+ for (uptr i = 0; i < total_count; i++) {
+ uptr chunk = beg_idx + i * size;
+ free_array[num_freed_chunks + total_count - 1 - i] =
+ PointerToCompactPtr(0, chunk);
+ }
+ if (kRandomShuffleChunks)
+ RandomShuffle(&free_array[num_freed_chunks], total_count,
+ &region->rand_state);
+ region->num_freed_chunks += total_count;
+ region->allocated_user += total_count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+
+ region->allocated_meta += total_count * kMetadataSize;
+ if (region->allocated_meta > region->mapped_meta) {
+ uptr map_size = kMetaMapSize;
+ while (region->allocated_meta > region->mapped_meta + map_size)
+ map_size += kMetaMapSize;
+ // Do the mmap for the metadata.
+ CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);
+ MapWithCallback(GetMetadataEnd(region_beg) -
+ region->mapped_meta - map_size, map_size);
+ region->mapped_meta += map_size;
+ }
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
+ if (region->mapped_user + region->mapped_meta >
+ kRegionSize - kFreeArraySize) {
+ Printf("%s: Out of memory. Dying. ", SanitizerToolName);
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize / 1024 / 1024, size);
+ Die();
+ }
+ }
+
+ bool MaybeReleaseChunkRange(uptr region_beg, uptr chunk_size,
+ CompactPtrT first, CompactPtrT last) {
+ uptr beg_ptr = CompactPtrToPointer(region_beg, first);
+ uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
+ CHECK_GE(end_ptr - beg_ptr, kReleaseToOsGranularity);
+ beg_ptr = RoundUpTo(beg_ptr, kReleaseToOsGranularity);
+ end_ptr = RoundDownTo(end_ptr, kReleaseToOsGranularity);
+ if (end_ptr == beg_ptr) return false;
+ ReleaseMemoryToOS(beg_ptr, end_ptr - beg_ptr);
+ return true;
+ }
+
+ // Releases some RAM back to OS.
+ // Algorithm:
+ // * Lock the region.
+ // * Sort the chunks.
+ // * Find ranges fully covered by free-d chunks
+ // * Release them to OS with madvise.
+ //
+ // TODO(kcc): make sure we don't do it too frequently.
+ void ReleaseToOS(uptr class_id) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ uptr chunk_size = ClassIdToSize(class_id);
+ uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
+ const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale;
+ BlockingMutexLock l(&region->mutex);
+ uptr n = region->num_freed_chunks;
+ if (n * chunk_size < kReleaseToOsGranularity)
+ return; // No chance to release anything.
+ if ((region->rtoi.n_freed_at_last_release - region->n_freed) * chunk_size <
+ kReleaseToOsGranularity)
+ return; // Nothing new to release.
+ SortArray(free_array, n);
+ uptr beg = free_array[0];
+ uptr prev = free_array[0];
+ for (uptr i = 1; i < n; i++) {
+ uptr chunk = free_array[i];
+ CHECK_GT(chunk, prev);
+ if (chunk - prev != scaled_chunk_size) {
+ CHECK_GT(chunk - prev, scaled_chunk_size);
+ if (prev + scaled_chunk_size - beg >= kScaledGranularity) {
+ MaybeReleaseChunkRange(region_beg, chunk_size, beg, prev);
+ region->rtoi.n_freed_at_last_release = region->n_freed;
+ region->rtoi.num_releases++;
+ }
+ beg = chunk;
+ }
+ prev = chunk;
+ }
+ }
+};
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h b/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h
new file mode 100644
index 00000000000..91c2ecc5b26
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_secondary.h
@@ -0,0 +1,271 @@
+//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback>
+class LargeMmapAllocator {
+ public:
+ void InitLinkerInitialized(bool may_return_null) {
+ page_size_ = GetPageSizeCached();
+ atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
+ }
+
+ void Init(bool may_return_null) {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized(may_return_null);
+ }
+
+ void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = RoundUpMapSize(size);
+ if (alignment > page_size_)
+ map_size += alignment;
+ // Overflow.
+ if (map_size < size) return ReturnNullOrDieOnBadRequest();
+ uptr map_beg = reinterpret_cast<uptr>(
+ MmapOrDie(map_size, "LargeMmapAllocator"));
+ CHECK(IsAligned(map_beg, page_size_));
+ MapUnmapCallback().OnMap(map_beg, map_size);
+ uptr map_end = map_beg + map_size;
+ uptr res = map_beg + page_size_;
+ if (res & (alignment - 1)) // Align.
+ res += alignment - (res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
+ CHECK_LE(res + size, map_end);
+ Header *h = GetHeader(res);
+ h->size = size;
+ h->map_beg = map_beg;
+ h->map_size = map_size;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = n_chunks_++;
+ chunks_sorted_ = false;
+ CHECK_LT(idx, kMaxNumChunks);
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ }
+ return reinterpret_cast<void*>(res);
+ }
+
+ bool MayReturnNull() const {
+ return atomic_load(&may_return_null_, memory_order_acquire);
+ }
+
+ void *ReturnNullOrDieOnBadRequest() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(false);
+ }
+
+ void *ReturnNullOrDieOnOOM() {
+ if (MayReturnNull()) return nullptr;
+ ReportAllocatorCannotReturnNull(true);
+ }
+
+ void SetMayReturnNull(bool may_return_null) {
+ atomic_store(&may_return_null_, may_return_null, memory_order_release);
+ }
+
+ void Deallocate(AllocatorStats *stat, void *p) {
+ Header *h = GetHeader(p);
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx]->chunk_idx = idx;
+ n_chunks_--;
+ chunks_sorted_ = false;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
+ }
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
+ UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(const void *p) {
+ return GetBlockBegin(p) != nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpTo(GetHeader(p)->size, page_size_);
+ }
+
+ // At least page_size_/2 metadata bytes is available.
+ void *GetMetaData(const void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
+ Printf("%s: bad pointer %p\n", SanitizerToolName, p);
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ }
+ return GetHeader(p) + 1;
+ }
+
+ void *GetBlockBegin(const void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ SpinMutexLock l(&mutex_);
+ uptr nearest_chunk = 0;
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks_[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
+ }
+ if (!nearest_chunk)
+ return nullptr;
+ Header *h = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size <= p)
+ return nullptr;
+ return GetUser(h);
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
+ mutex_.CheckLocked();
+ uptr p = reinterpret_cast<uptr>(ptr);
+ uptr n = n_chunks_;
+ if (!n) return nullptr;
+ if (!chunks_sorted_) {
+ // Do one-time sort. chunks_sorted_ is reset in Allocate/Deallocate.
+ SortArray(reinterpret_cast<uptr*>(chunks_), n);
+ for (uptr i = 0; i < n; i++)
+ chunks_[i]->chunk_idx = i;
+ chunks_sorted_ = true;
+ min_mmap_ = reinterpret_cast<uptr>(chunks_[0]);
+ max_mmap_ = reinterpret_cast<uptr>(chunks_[n - 1]) +
+ chunks_[n - 1]->map_size;
+ }
+ if (p < min_mmap_ || p >= max_mmap_)
+ return nullptr;
+ uptr beg = 0, end = n - 1;
+ // This loop is a log(n) lower_bound. It does not check for the exact match
+ // to avoid expensive cache-thrashing loads.
+ while (end - beg >= 2) {
+ uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
+ if (p < reinterpret_cast<uptr>(chunks_[mid]))
+ end = mid - 1; // We are not interested in chunks_[mid].
+ else
+ beg = mid; // chunks_[mid] may still be what we want.
+ }
+
+ if (beg < end) {
+ CHECK_EQ(beg + 1, end);
+ // There are 2 chunks left, choose one.
+ if (p >= reinterpret_cast<uptr>(chunks_[end]))
+ beg = end;
+ }
+
+ Header *h = chunks_[beg];
+ if (h->map_beg + h->map_size <= p || p < h->map_beg)
+ return nullptr;
+ return GetUser(h);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M; by size logs: ",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ mutex_.Lock();
+ }
+
+ void ForceUnlock() {
+ mutex_.Unlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr i = 0; i < n_chunks_; i++)
+ callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
+ struct Header {
+ uptr map_beg;
+ uptr map_size;
+ uptr size;
+ uptr chunk_idx;
+ };
+
+ Header *GetHeader(uptr p) {
+ CHECK(IsAligned(p, page_size_));
+ return reinterpret_cast<Header*>(p - page_size_);
+ }
+ Header *GetHeader(const void *p) {
+ return GetHeader(reinterpret_cast<uptr>(p));
+ }
+
+ void *GetUser(Header *h) {
+ CHECK(IsAligned((uptr)h, page_size_));
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, page_size_) + page_size_;
+ }
+
+ uptr page_size_;
+ Header *chunks_[kMaxNumChunks];
+ uptr n_chunks_;
+ uptr min_mmap_, max_mmap_;
+ bool chunks_sorted_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
+ } stats;
+ atomic_uint8_t may_return_null_;
+ SpinMutex mutex_;
+};
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h b/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h
new file mode 100644
index 00000000000..4cda021a947
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_size_class_map.h
@@ -0,0 +1,215 @@
+//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 always corresponds to size 0.
+// The other sizes are controlled by the template parameters:
+// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
+// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
+// kMidSizeLog: the classes starting from 1 increase with step
+// 2^kMinSizeLog until 2^kMidSizeLog.
+// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
+// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
+// look like 0b1xx0..0, where x is either 0 or 1.
+//
+// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
+//
+// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
+// Next 4 classes: 256 + i * 64 (i = 1 to 4).
+// Next 4 classes: 512 + i * 128 (i = 1 to 4).
+// ...
+// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is between 14% and 25%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
+// The actual number is computed in TransferBatch.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
+// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
+// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
+// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
+// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
+// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
+// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
+//
+// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
+// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
+// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
+// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
+// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
+// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
+// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
+// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
+//
+// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
+// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
+// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
+// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
+//
+// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
+// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
+// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
+// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
+//
+// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
+// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
+// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
+// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
+//
+// ...
+//
+// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
+// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
+// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
+// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
+//
+// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
+//
+//
+// Another example (kNumBits=2):
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
+// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
+// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
+// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
+// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
+// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
+// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
+// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
+// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
+// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
+// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
+// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
+// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
+// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
+// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
+// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
+// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
+// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
+// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
+// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
+// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
+// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
+// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
+// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
+// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
+// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
+
+template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
+ uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = kNumBits - 1;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ // kMaxNumCachedHintT is a power of two. It serves as a hint
+ // for the size of TransferBatch, the actual size could be a bit smaller.
+ static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
+ COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
+
+ static const uptr kMaxSize = 1UL << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1;
+ static const uptr kLargestClassID = kNumClasses - 2;
+ COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses <= 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
+
+ static uptr Size(uptr class_id) {
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
+ }
+
+ static uptr ClassID(uptr size) {
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ if (size > kMaxSize) return 0;
+ uptr l = MostSignificantSetBitIndex(size);
+ uptr hbits = (size >> (l - S)) & M;
+ uptr lbits = size & ((1 << (l - S)) - 1);
+ uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
+ }
+
+ static uptr MaxCachedHint(uptr class_id) {
+ if (class_id == 0) return 0;
+ uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
+ return Max<uptr>(1, Min(kMaxNumCachedHint, n));
+ }
+
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = s ? MostSignificantSetBitIndex(s) : 0;
+ uptr cached = MaxCachedHint(i) * s;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCachedHint(i), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_NE(s, 0U);
+ CHECK_EQ(ClassID(s), c);
+ if (c != kNumClasses - 1)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(Size(c), Size(c-1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c-1), s);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
+typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
+typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator_stats.h b/libsanitizer/sanitizer_common/sanitizer_allocator_stats.h
new file mode 100644
index 00000000000..e336b5d365a
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator_stats.h
@@ -0,0 +1,103 @@
+//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Memory allocator statistics
+enum AllocatorStat {
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
+ AllocatorStatCount
+};
+
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+ void InitLinkerInitialized() {}
+
+ void Add(AllocatorStat i, uptr v) {
+ v += atomic_load(&stats_[i], memory_order_relaxed);
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
+ return atomic_load(&stats_[i], memory_order_relaxed);
+ }
+
+ private:
+ friend class AllocatorGlobalStats;
+ AllocatorStats *next_;
+ AllocatorStats *prev_;
+ atomic_uintptr_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+ void InitLinkerInitialized() {
+ next_ = this;
+ prev_ = this;
+ }
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
+
+ void Register(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->next_ = next_;
+ s->prev_ = this;
+ next_->prev_ = s;
+ next_ = s;
+ }
+
+ void Unregister(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->prev_->next_ = s->next_;
+ s->next_->prev_ = s->prev_;
+ for (int i = 0; i < AllocatorStatCount; i++)
+ Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+ }
+
+ void Get(AllocatorStatCounters s) const {
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
+ SpinMutexLock l(&mu_);
+ const AllocatorStats *stats = this;
+ for (;;) {
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] += stats->Get(AllocatorStat(i));
+ stats = stats->next_;
+ if (stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
+ }
+
+ private:
+ mutable SpinMutex mu_;
+};
diff --git a/libsanitizer/sanitizer_common/sanitizer_asm.h b/libsanitizer/sanitizer_common/sanitizer_asm.h
index 985f59c23c2..737ccbb716d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_asm.h
+++ b/libsanitizer/sanitizer_common/sanitizer_asm.h
@@ -40,3 +40,17 @@
# define CFI_DEF_CFA(reg, n)
# define CFI_RESTORE(reg)
#endif
+
+#if !defined(__APPLE__)
+# define ASM_HIDDEN(symbol) .hidden symbol
+# define ASM_TYPE_FUNCTION(symbol) .type symbol, @function
+# define ASM_SIZE(symbol) .size symbol, .-symbol
+# define ASM_TSAN_SYMBOL(symbol) symbol
+# define ASM_TSAN_SYMBOL_INTERCEPTOR(symbol) symbol
+#else
+# define ASM_HIDDEN(symbol)
+# define ASM_TYPE_FUNCTION(symbol)
+# define ASM_SIZE(symbol)
+# define ASM_TSAN_SYMBOL(symbol) _##symbol
+# define ASM_TSAN_SYMBOL_INTERCEPTOR(symbol) _wrap_##symbol
+#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
index 4ac3b90769f..4ae87142d46 100644
--- a/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_atomic_msvc.h
@@ -31,6 +31,10 @@ extern "C" long _InterlockedExchange( // NOLINT
extern "C" long _InterlockedExchangeAdd( // NOLINT
long volatile * Addend, long Value); // NOLINT
#pragma intrinsic(_InterlockedExchangeAdd)
+extern "C" char _InterlockedCompareExchange8( // NOLINT
+ char volatile *Destination, // NOLINT
+ char Exchange, char Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange8)
extern "C" short _InterlockedCompareExchange16( // NOLINT
short volatile *Destination, // NOLINT
short Exchange, short Comparand); // NOLINT
@@ -169,8 +173,6 @@ INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
}
-#ifndef _WIN64
-
INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
u8 *cmp,
u8 xchgv,
@@ -178,6 +180,10 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
(void)mo;
DCHECK(!((uptr)a % sizeof(*a)));
u8 cmpv = *cmp;
+#ifdef _WIN64
+ u8 prev = (u8)_InterlockedCompareExchange8(
+ (volatile char*)&a->val_dont_use, (char)xchgv, (char)cmpv);
+#else
u8 prev;
__asm {
mov al, cmpv
@@ -186,14 +192,13 @@ INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
lock cmpxchg [ecx], dl
mov prev, al
}
+#endif
if (prev == cmpv)
return true;
*cmp = prev;
return false;
}
-#endif
-
INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
uptr *cmp,
uptr xchg,
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.cc b/libsanitizer/sanitizer_common/sanitizer_common.cc
index 4529e63eba9..b445f613b85 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common.cc
@@ -10,6 +10,7 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_flags.h"
#include "sanitizer_libc.h"
@@ -22,13 +23,7 @@ namespace __sanitizer {
const char *SanitizerToolName = "SanitizerTool";
atomic_uint32_t current_verbosity;
-
-uptr GetPageSizeCached() {
- static uptr PageSize;
- if (!PageSize)
- PageSize = GetPageSize();
- return PageSize;
-}
+uptr PageSizeCached;
StaticSpinMutex report_file_mu;
ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
@@ -103,70 +98,13 @@ uptr stoptheworld_tracer_pid = 0;
// writing to the same log file.
uptr stoptheworld_tracer_ppid = 0;
-static const int kMaxNumOfInternalDieCallbacks = 5;
-static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];
-
-bool AddDieCallback(DieCallbackType callback) {
- for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
- if (InternalDieCallbacks[i] == nullptr) {
- InternalDieCallbacks[i] = callback;
- return true;
- }
- }
- return false;
-}
-
-bool RemoveDieCallback(DieCallbackType callback) {
- for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
- if (InternalDieCallbacks[i] == callback) {
- internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],
- sizeof(InternalDieCallbacks[0]) *
- (kMaxNumOfInternalDieCallbacks - i - 1));
- InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;
- return true;
- }
- }
- return false;
-}
-
-static DieCallbackType UserDieCallback;
-void SetUserDieCallback(DieCallbackType callback) {
- UserDieCallback = callback;
-}
-
-void NORETURN Die() {
- if (UserDieCallback)
- UserDieCallback();
- for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {
- if (InternalDieCallbacks[i])
- InternalDieCallbacks[i]();
- }
- if (common_flags()->abort_on_error)
- Abort();
- internal__exit(common_flags()->exitcode);
-}
-
-static CheckFailedCallbackType CheckFailedCallback;
-void SetCheckFailedCallback(CheckFailedCallbackType callback) {
- CheckFailedCallback = callback;
-}
-
-void NORETURN CheckFailed(const char *file, int line, const char *cond,
- u64 v1, u64 v2) {
- if (CheckFailedCallback) {
- CheckFailedCallback(file, line, cond, v1, v2);
- }
- Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
- v1, v2);
- Die();
-}
-
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
- const char *mmap_type, error_t err) {
+ const char *mmap_type, error_t err,
+ bool raw_report) {
static int recursion_count;
- if (recursion_count) {
+ if (raw_report || recursion_count) {
+ // If raw report is requested or we went into recursion, just die.
// The Report() and CHECK calls below may call mmap recursively and fail.
- // If we went into recursion, just die.
RawWrite("ERROR: Failed to mmap\n");
Die();
}
@@ -174,7 +112,7 @@ void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
Report("ERROR: %s failed to "
"%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
SanitizerToolName, mmap_type, size, size, mem_type, err);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DumpProcessMap();
#endif
UNREACHABLE("unable to mmap");
@@ -217,6 +155,7 @@ bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
}
typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
+typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
template<class T>
static inline bool CompareLess(const T &a, const T &b) {
@@ -227,25 +166,8 @@ void SortArray(uptr *array, uptr size) {
InternalSort<uptr*, UptrComparisonFunction>(&array, size, CompareLess);
}
-// We want to map a chunk of address space aligned to 'alignment'.
-// We do it by maping a bit more and then unmaping redundant pieces.
-// We probably can do it with fewer syscalls in some OS-dependent way.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
-// uptr PageSize = GetPageSizeCached();
- CHECK(IsPowerOfTwo(size));
- CHECK(IsPowerOfTwo(alignment));
- uptr map_size = size + alignment;
- uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
- uptr map_end = map_res + map_size;
- uptr res = map_res;
- if (res & (alignment - 1)) // Not aligned.
- res = (map_res + alignment) & ~(alignment - 1);
- uptr end = res + size;
- if (res != map_res)
- UnmapOrDie((void*)map_res, res - map_res);
- if (end != map_end)
- UnmapOrDie((void*)end, map_end - end);
- return (void*)res;
+void SortArray(u32 *array, uptr size) {
+ InternalSort<u32*, U32ComparisonFunction>(&array, size, CompareLess);
}
const char *StripPathPrefix(const char *filepath,
@@ -283,7 +205,7 @@ void ReportErrorSummary(const char *error_message) {
__sanitizer_report_error_summary(buff.data());
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
if (!common_flags()->print_summary)
return;
@@ -295,6 +217,40 @@ void ReportErrorSummary(const char *error_type, const AddressInfo &info) {
}
#endif
+// Removes the ANSI escape sequences from the input string (in-place).
+void RemoveANSIEscapeSequencesFromString(char *str) {
+ if (!str)
+ return;
+
+ // We are going to remove the escape sequences in place.
+ char *s = str;
+ char *z = str;
+ while (*s != '\0') {
+ CHECK_GE(s, z);
+ // Skip over ANSI escape sequences with pointer 's'.
+ if (*s == '\033' && *(s + 1) == '[') {
+ s = internal_strchrnul(s, 'm');
+ if (*s == '\0') {
+ break;
+ }
+ s++;
+ continue;
+ }
+ // 's' now points at a character we want to keep. Copy over the buffer
+ // content if the escape sequence has been perviously skipped andadvance
+ // both pointers.
+ if (s != z)
+ *z = *s;
+
+ // If we have not seen an escape sequence, just advance both pointers.
+ z++;
+ s++;
+ }
+
+ // Null terminate the string.
+ *z = '\0';
+}
+
void LoadedModule::set(const char *module_name, uptr base_address) {
clear();
full_name_ = internal_strdup(module_name);
@@ -318,9 +274,8 @@ void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable) {
}
bool LoadedModule::containsAddress(uptr address) const {
- for (Iterator iter = ranges(); iter.hasNext();) {
- const AddressRange *r = iter.next();
- if (r->beg <= address && address < r->end)
+ for (const AddressRange &r : ranges()) {
+ if (r.beg <= address && address < r.end)
return true;
}
return false;
@@ -387,6 +342,10 @@ bool TemplateMatch(const char *templ, const char *str) {
static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
char *FindPathToBinary(const char *name) {
+ if (FileExists(name)) {
+ return internal_strdup(name);
+ }
+
const char *path = GetEnv("PATH");
if (!path)
return nullptr;
@@ -451,6 +410,53 @@ uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
return name_len;
}
+void PrintCmdline() {
+ char **argv = GetArgv();
+ if (!argv) return;
+ Printf("\nCommand: ");
+ for (uptr i = 0; argv[i]; ++i)
+ Printf("%s ", argv[i]);
+ Printf("\n\n");
+}
+
+// Malloc hooks.
+static const int kMaxMallocFreeHooks = 5;
+struct MallocFreeHook {
+ void (*malloc_hook)(const void *, uptr);
+ void (*free_hook)(const void *);
+};
+
+static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
+
+void RunMallocHooks(const void *ptr, uptr size) {
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ auto hook = MFHooks[i].malloc_hook;
+ if (!hook) return;
+ hook(ptr, size);
+ }
+}
+
+void RunFreeHooks(const void *ptr) {
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ auto hook = MFHooks[i].free_hook;
+ if (!hook) return;
+ hook(ptr);
+ }
+}
+
+static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
+ void (*free_hook)(const void *)) {
+ if (!malloc_hook || !free_hook) return 0;
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ if (MFHooks[i].malloc_hook == nullptr) {
+ MFHooks[i].malloc_hook = malloc_hook;
+ MFHooks[i].free_hook = free_hook;
+ return i + 1;
+ }
+ }
+ return 0;
+}
+
} // namespace __sanitizer
using namespace __sanitizer; // NOLINT
@@ -460,6 +466,11 @@ void __sanitizer_set_report_path(const char *path) {
report_file.SetReportPath(path);
}
+void __sanitizer_set_report_fd(void *fd) {
+ report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
+ report_file.fd_pid = internal_getpid();
+}
+
void __sanitizer_report_error_summary(const char *error_summary) {
Printf("%s\n", error_summary);
}
@@ -468,4 +479,18 @@ SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_death_callback(void (*callback)(void)) {
SetUserDieCallback(callback);
}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
+ uptr),
+ void (*free_hook)(const void *)) {
+ return InstallMallocFreeHooks(malloc_hook, free_hook);
+}
+
+#if !SANITIZER_GO && !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_print_memory_profile(int top_percent) {
+ (void)top_percent;
+}
+#endif
} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 6fb2dd882aa..3e079ab7ad1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -21,7 +21,7 @@
#include "sanitizer_list.h"
#include "sanitizer_mutex.h"
-#ifdef _MSC_VER
+#if defined(_MSC_VER) && !defined(__clang__)
extern "C" void _ReadWriteBarrier();
#pragma intrinsic(_ReadWriteBarrier)
#endif
@@ -42,11 +42,10 @@ const uptr kWordSizeInBits = 8 * kWordSize;
const uptr kMaxPathLength = 4096;
-// 16K loaded modules should be enough for everyone.
-static const uptr kMaxNumberOfModules = 1 << 14;
-
const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
+static const uptr kErrorMessageBufferSize = 1 << 16;
+
// Denotes fake PC values that come from JIT/JAVA/etc.
// For such PC values __tsan_symbolize_external() will be called.
const u64 kExternalPCBit = 1ULL << 60;
@@ -62,7 +61,12 @@ INLINE int Verbosity() {
}
uptr GetPageSize();
-uptr GetPageSizeCached();
+extern uptr PageSizeCached;
+INLINE uptr GetPageSizeCached() {
+ if (!PageSizeCached)
+ PageSizeCached = GetPageSize();
+ return PageSizeCached;
+}
uptr GetMmapGranularity();
uptr GetMaxVirtualAddress();
// Threads
@@ -74,22 +78,30 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size);
// Memory management
-void *MmapOrDie(uptr size, const char *mem_type);
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
+INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
+ return MmapOrDie(size, mem_type, /*raw_report*/ true);
+}
void UnmapOrDie(void *addr, uptr size);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
const char *name = nullptr);
void *MmapNoReserveOrDie(uptr size, const char *mem_type);
void *MmapFixedOrDie(uptr fixed_addr, uptr size);
-void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
+void *MmapNoAccess(uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
-// Disallow access to a memory range. Use MmapNoAccess to allocate an
+// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
// unaccessible memory.
bool MprotectNoAccess(uptr addr, uptr size);
+bool MprotectReadOnly(uptr addr, uptr size);
+
+// Find an available address space.
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding);
// Used to check if we can map shadow memory to a fixed location.
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
-void FlushUnneededShadowMemory(uptr addr, uptr size);
+void ReleaseMemoryToOS(uptr addr, uptr size);
void IncreaseTotalMmap(uptr size);
void DecreaseTotalMmap(uptr size);
uptr GetRSS();
@@ -97,21 +109,21 @@ void NoHugePagesInRegion(uptr addr, uptr length);
void DontDumpShadowMemory(uptr addr, uptr length);
// Check if the built VMA size matches the runtime one.
void CheckVMASize();
+void RunMallocHooks(const void *ptr, uptr size);
+void RunFreeHooks(const void *ptr);
// InternalScopedBuffer can be used instead of large stack arrays to
// keep frame size low.
// FIXME: use InternalAlloc instead of MmapOrDie once
// InternalAlloc is made libc-free.
-template<typename T>
+template <typename T>
class InternalScopedBuffer {
public:
explicit InternalScopedBuffer(uptr cnt) {
cnt_ = cnt;
- ptr_ = (T*)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
- }
- ~InternalScopedBuffer() {
- UnmapOrDie(ptr_, cnt_ * sizeof(T));
+ ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
}
+ ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); }
T &operator[](uptr i) { return ptr_[i]; }
T *data() { return ptr_; }
uptr size() { return cnt_ * sizeof(T); }
@@ -119,9 +131,11 @@ class InternalScopedBuffer {
private:
T *ptr_;
uptr cnt_;
- // Disallow evil constructors.
- InternalScopedBuffer(const InternalScopedBuffer&);
- void operator=(const InternalScopedBuffer&);
+ // Disallow copies and moves.
+ InternalScopedBuffer(const InternalScopedBuffer &) = delete;
+ InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete;
+ InternalScopedBuffer(InternalScopedBuffer &&) = delete;
+ InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete;
};
class InternalScopedString : public InternalScopedBuffer<char> {
@@ -160,6 +174,7 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
// IO
void RawWrite(const char *buffer);
bool ColorizeReports();
+void RemoveANSIEscapeSequencesFromString(char *buffer);
void Printf(const char *format, ...);
void Report(const char *format, ...);
void SetPrintfAndReportCallback(void (*callback)(const char *));
@@ -271,10 +286,27 @@ const char *GetPwd();
char *FindPathToBinary(const char *name);
bool IsPathSeparator(const char c);
bool IsAbsolutePath(const char *path);
+// Starts a subprocess and returs its pid.
+// If *_fd parameters are not kInvalidFd their corresponding input/output
+// streams will be redirect to the file. The files will always be closed
+// in parent process even in case of an error.
+// The child process will close all fds after STDERR_FILENO
+// before passing control to a program.
+pid_t StartSubprocess(const char *filename, const char *const argv[],
+ fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd,
+ fd_t stderr_fd = kInvalidFd);
+// Checks if specified process is still running
+bool IsProcessRunning(pid_t pid);
+// Waits for the process to finish and returns its exit code.
+// Returns -1 in case of an error.
+int WaitForProcess(pid_t pid);
u32 GetUid();
void ReExec();
+char **GetArgv();
+void PrintCmdline();
bool StackSizeIsUnlimited();
+uptr GetStackSizeLimitInBytes();
void SetStackSizeLimitInBytes(uptr limit);
bool AddressSpaceIsUnlimited();
void SetAddressSpaceUnlimited();
@@ -299,6 +331,7 @@ void SleepForMillis(int millis);
u64 NanoTime();
int Atexit(void (*function)(void));
void SortArray(uptr *array, uptr size);
+void SortArray(u32 *array, uptr size);
bool TemplateMatch(const char *templ, const char *str);
// Exit
@@ -307,7 +340,8 @@ void NORETURN Die();
void NORETURN
CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
- const char *mmap_type, error_t err);
+ const char *mmap_type, error_t err,
+ bool raw_report = false);
// Set the name of the current thread to 'name', return true on succees.
// The name may be truncated to a system-dependent limit.
@@ -339,9 +373,15 @@ void SetCheckFailedCallback(CheckFailedCallbackType callback);
// The callback should be registered once at the tool init time.
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
+// Callback to be called when we want to try releasing unused allocator memory
+// back to the OS.
+typedef void (*AllocatorReleaseToOSCallback)();
+// The callback should be registered once at the tool init time.
+void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback);
+
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
-bool IsDeadlySignal(int signum);
+bool IsHandledDeadlySignal(int signum);
void InstallDeadlySignalHandlers(SignalHandlerType handler);
// Alternative signal stack (POSIX-only).
void SetAlternateSignalStack();
@@ -357,7 +397,7 @@ void ReportErrorSummary(const char *error_message);
// error_type file:line[:column][ function]
void ReportErrorSummary(const char *error_type, const AddressInfo &info);
// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
-void ReportErrorSummary(const char *error_type, StackTrace *trace);
+void ReportErrorSummary(const char *error_type, const StackTrace *trace);
// Math
#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
@@ -414,13 +454,13 @@ INLINE uptr RoundUpToPowerOfTwo(uptr size) {
if (IsPowerOfTwo(size)) return size;
uptr up = MostSignificantSetBitIndex(size);
- CHECK(size < (1ULL << (up + 1)));
- CHECK(size > (1ULL << up));
+ CHECK_LT(size, (1ULL << (up + 1)));
+ CHECK_GT(size, (1ULL << up));
return 1ULL << (up + 1);
}
INLINE uptr RoundUpTo(uptr size, uptr boundary) {
- CHECK(IsPowerOfTwo(boundary));
+ RAW_CHECK(IsPowerOfTwo(boundary));
return (size + boundary - 1) & ~(boundary - 1);
}
@@ -487,7 +527,7 @@ class InternalMmapVectorNoCtor {
uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
Resize(new_capacity);
}
- data_[size_++] = element;
+ internal_memcpy(&data_[size_++], &element, sizeof(T));
}
T &back() {
CHECK_GT(size_, 0);
@@ -513,6 +553,19 @@ class InternalMmapVectorNoCtor {
void clear() { size_ = 0; }
bool empty() const { return size() == 0; }
+ const T *begin() const {
+ return data();
+ }
+ T *begin() {
+ return data();
+ }
+ const T *end() const {
+ return data() + size();
+ }
+ T *end() {
+ return data() + size();
+ }
+
private:
void Resize(uptr new_capacity) {
CHECK_GT(new_capacity, 0);
@@ -619,8 +672,7 @@ class LoadedModule {
: next(nullptr), beg(beg), end(end), executable(executable) {}
};
- typedef IntrusiveList<AddressRange>::ConstIterator Iterator;
- Iterator ranges() const { return Iterator(&ranges_); }
+ const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
private:
char *full_name_; // Owned.
@@ -628,13 +680,33 @@ class LoadedModule {
IntrusiveList<AddressRange> ranges_;
};
-// OS-dependent function that fills array with descriptions of at most
-// "max_modules" currently loaded modules. Returns the number of
-// initialized modules. If filter is nonzero, ignores modules for which
-// filter(full_name) is false.
-typedef bool (*string_predicate_t)(const char *);
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
- string_predicate_t filter);
+// List of LoadedModules. OS-dependent implementation is responsible for
+// filling this information.
+class ListOfModules {
+ public:
+ ListOfModules() : modules_(kInitialCapacity) {}
+ ~ListOfModules() { clear(); }
+ void init();
+ const LoadedModule *begin() const { return modules_.begin(); }
+ LoadedModule *begin() { return modules_.begin(); }
+ const LoadedModule *end() const { return modules_.end(); }
+ LoadedModule *end() { return modules_.end(); }
+ uptr size() const { return modules_.size(); }
+ const LoadedModule &operator[](uptr i) const {
+ CHECK_LT(i, modules_.size());
+ return modules_[i];
+ }
+
+ private:
+ void clear() {
+ for (auto &module : modules_) module.clear();
+ modules_.clear();
+ }
+
+ InternalMmapVector<LoadedModule> modules_;
+ // We rarely have more than 16K loaded modules.
+ static const uptr kInitialCapacity = 1 << 14;
+};
// Callback type for iterating over a set of memory ranges.
typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
@@ -646,22 +718,34 @@ enum AndroidApiLevel {
ANDROID_POST_LOLLIPOP = 23
};
+void WriteToSyslog(const char *buffer);
+
+#if SANITIZER_MAC
+void LogFullErrorReport(const char *buffer);
+#else
+INLINE void LogFullErrorReport(const char *buffer) {}
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+void WriteOneLineToSyslog(const char *s);
+void LogMessageOnPrintf(const char *str);
+#else
+INLINE void WriteOneLineToSyslog(const char *s) {}
+INLINE void LogMessageOnPrintf(const char *str) {}
+#endif
+
#if SANITIZER_LINUX
// Initialize Android logging. Any writes before this are silently lost.
void AndroidLogInit();
-void WriteToSyslog(const char *buffer);
#else
INLINE void AndroidLogInit() {}
-INLINE void WriteToSyslog(const char *buffer) {}
#endif
#if SANITIZER_ANDROID
-void GetExtraActivationFlags(char *buf, uptr size);
void SanitizerInitializeUnwinder();
AndroidApiLevel AndroidGetApiLevel();
#else
INLINE void AndroidLogWrite(const char *buffer_unused) {}
-INLINE void GetExtraActivationFlags(char *buf, uptr size) { *buf = '\0'; }
INLINE void SanitizerInitializeUnwinder() {}
INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
#endif
@@ -686,7 +770,7 @@ void MaybeStartBackgroudThread();
// compiler from recognising it and turning it into an actual call to
// memset/memcpy/etc.
static inline void SanitizerBreakOptimization(void *arg) {
-#if _MSC_VER && !defined(__clang__)
+#if defined(_MSC_VER) && !defined(__clang__)
_ReadWriteBarrier();
#else
__asm__ __volatile__("" : : "r" (arg) : "memory");
@@ -699,27 +783,68 @@ struct SignalContext {
uptr pc;
uptr sp;
uptr bp;
+ bool is_memory_access;
- SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp) :
- context(context), addr(addr), pc(pc), sp(sp), bp(bp) {
- }
+ enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
+
+ SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp,
+ bool is_memory_access, WriteFlag write_flag)
+ : context(context),
+ addr(addr),
+ pc(pc),
+ sp(sp),
+ bp(bp),
+ is_memory_access(is_memory_access),
+ write_flag(write_flag) {}
// Creates signal context in a platform-specific manner.
static SignalContext Create(void *siginfo, void *context);
+
+ // Returns true if the "context" indicates a memory write.
+ static WriteFlag GetWriteFlag(void *context);
};
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
-} // namespace __sanitizer
+void MaybeReexec();
-inline void *operator new(__sanitizer::operator_new_size_type size,
- __sanitizer::LowLevelAllocator &alloc) {
- return alloc.Allocate(size);
+template <typename Fn>
+class RunOnDestruction {
+ public:
+ explicit RunOnDestruction(Fn fn) : fn_(fn) {}
+ ~RunOnDestruction() { fn_(); }
+
+ private:
+ Fn fn_;
+};
+
+// A simple scope guard. Usage:
+// auto cleanup = at_scope_exit([]{ do_cleanup; });
+template <typename Fn>
+RunOnDestruction<Fn> at_scope_exit(Fn fn) {
+ return RunOnDestruction<Fn>(fn);
}
+// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
+// if a process uses virtual memory over 4TB (as many sanitizers like
+// to do). This function will abort the process if running on a kernel
+// that looks vulnerable.
+#if SANITIZER_LINUX && SANITIZER_S390_64
+void AvoidCVE_2016_2143();
+#else
+INLINE void AvoidCVE_2016_2143() {}
+#endif
+
struct StackDepotStats {
uptr n_uniq_ids;
uptr allocated;
};
+} // namespace __sanitizer
+
+inline void *operator new(__sanitizer::operator_new_size_type size,
+ __sanitizer::LowLevelAllocator &alloc) {
+ return alloc.Allocate(size);
+}
+
#endif // SANITIZER_COMMON_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
index 8223778cac4..38390ed9621 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc
@@ -9,7 +9,7 @@
// ThreadSanitizer, MemorySanitizer, etc.
//
// This file should be included into the tool's interceptor file,
-// which has to define it's own macros:
+// which has to define its own macros:
// COMMON_INTERCEPTOR_ENTER
// COMMON_INTERCEPTOR_ENTER_NOIGNORE
// COMMON_INTERCEPTOR_READ_RANGE
@@ -89,6 +89,10 @@
#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) {}
#endif
+#ifndef COMMON_INTERCEPTOR_MUTEX_INVALID
+#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) {}
+#endif
+
#ifndef COMMON_INTERCEPTOR_HANDLE_RECVMSG
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
#endif
@@ -141,6 +145,22 @@
#define COMMON_INTERCEPTOR_RELEASE(ctx, u) {}
#endif
+#ifndef COMMON_INTERCEPTOR_USER_CALLBACK_START
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_USER_CALLBACK_END
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() {}
+#endif
+
+#ifdef SANITIZER_NLDBL_VERSION
+#define COMMON_INTERCEPT_FUNCTION_LDBL(fn) \
+ COMMON_INTERCEPT_FUNCTION_VER(fn, SANITIZER_NLDBL_VERSION)
+#else
+#define COMMON_INTERCEPT_FUNCTION_LDBL(fn) \
+ COMMON_INTERCEPT_FUNCTION(fn)
+#endif
+
struct FileMetadata {
// For open_memstream().
char **addr;
@@ -190,6 +210,40 @@ UNUSED static void DeleteInterceptorMetadata(void *addr) {
}
#endif // SI_NOT_WINDOWS
+#if SANITIZER_INTERCEPT_STRLEN
+INTERCEPTOR(SIZE_T, strlen, const char *s) {
+ // Sometimes strlen is called prior to InitializeCommonInterceptors,
+ // in which case the REAL(strlen) typically used in
+ // COMMON_INTERCEPTOR_ENTER will fail. We use internal_strlen here
+ // to handle that.
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strlen(s);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strlen, s);
+ SIZE_T result = REAL(strlen)(s);
+ if (common_flags()->intercept_strlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, result + 1);
+ return result;
+}
+#define INIT_STRLEN COMMON_INTERCEPT_FUNCTION(strlen)
+#else
+#define INIT_STRLEN
+#endif
+
+#if SANITIZER_INTERCEPT_STRNLEN
+INTERCEPTOR(SIZE_T, strnlen, const char *s, SIZE_T maxlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnlen, s, maxlen);
+ SIZE_T length = REAL(strnlen)(s, maxlen);
+ if (common_flags()->intercept_strlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, Min(length + 1, maxlen));
+ return length;
+}
+#define INIT_STRNLEN COMMON_INTERCEPT_FUNCTION(strnlen)
+#else
+#define INIT_STRNLEN
+#endif
+
#if SANITIZER_INTERCEPT_TEXTDOMAIN
INTERCEPTOR(char*, textdomain, const char *domainname) {
void *ctx;
@@ -212,13 +266,11 @@ static inline int CharCmpX(unsigned char c1, unsigned char c2) {
}
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,
- const char *s1, const char *s2)
+ const char *s1, const char *s2, int result)
INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strcmp, s1, s2);
- CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,
- s2);
unsigned char c1, c2;
uptr i;
for (i = 0;; i++) {
@@ -228,19 +280,21 @@ INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
}
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
- return CharCmpX(c1, c2);
+ int result = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,
+ s2, result);
+ return result;
}
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, uptr called_pc,
- const char *s1, const char *s2, uptr n)
+ const char *s1, const char *s2, uptr n,
+ int result)
INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return internal_strncmp(s1, s2, size);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size);
- CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
- s2, size);
unsigned char c1 = 0, c2 = 0;
uptr i;
for (i = 0; i < size; i++) {
@@ -248,9 +302,12 @@ INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
c2 = (unsigned char)s2[i];
if (c1 != c2 || c1 == '\0') break;
}
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));
- COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));
- return CharCmpX(c1, c2);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, Min(i + 1, size));
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, Min(i + 1, size));
+ int result = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
+ s2, size, result);
+ return result;
}
#define INIT_STRCMP COMMON_INTERCEPT_FUNCTION(strcmp)
@@ -267,6 +324,9 @@ static inline int CharCaseCmp(unsigned char c1, unsigned char c2) {
return c1_low - c2_low;
}
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, uptr called_pc,
+ const char *s1, const char *s2, int result)
+
INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strcasecmp, s1, s2);
@@ -279,9 +339,16 @@ INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
}
COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
- return CharCaseCmp(c1, c2);
+ int result = CharCaseCmp(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, GET_CALLER_PC(),
+ s1, s2, result);
+ return result;
}
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc,
+ const char *s1, const char *s2, uptr n,
+ int result)
+
INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, n);
@@ -294,7 +361,10 @@ INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T n) {
}
COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, n));
COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, n));
- return CharCaseCmp(c1, c2);
+ int result = CharCaseCmp(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(),
+ s1, s2, n, result);
+ return result;
}
#define INIT_STRCASECMP COMMON_INTERCEPT_FUNCTION(strcasecmp)
@@ -316,6 +386,10 @@ static inline void StrstrCheck(void *ctx, char *r, const char *s1,
#endif
#if SANITIZER_INTERCEPT_STRSTR
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, uptr called_pc,
+ const char *s1, const char *s2, char *result)
+
INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return internal_strstr(s1, s2);
@@ -324,6 +398,8 @@ INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
char *r = REAL(strstr)(s1, s2);
if (common_flags()->intercept_strstr)
StrstrCheck(ctx, r, s1, s2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, GET_CALLER_PC(), s1,
+ s2, r);
return r;
}
@@ -333,12 +409,18 @@ INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
#endif
#if SANITIZER_INTERCEPT_STRCASESTR
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, uptr called_pc,
+ const char *s1, const char *s2, char *result)
+
INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, strcasestr, s1, s2);
char *r = REAL(strcasestr)(s1, s2);
if (common_flags()->intercept_strstr)
StrstrCheck(ctx, r, s1, s2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, GET_CALLER_PC(),
+ s1, s2, r);
return r;
}
@@ -347,6 +429,79 @@ INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
#define INIT_STRCASESTR
#endif
+#if SANITIZER_INTERCEPT_MEMMEM
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
+ const void *s1, SIZE_T len1, const void *s2,
+ SIZE_T len2, void *result)
+
+INTERCEPTOR(void*, memmem, const void *s1, SIZE_T len1, const void *s2,
+ SIZE_T len2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memmem, s1, len1, s2, len2);
+ void *r = REAL(memmem)(s1, len1, s2, len2);
+ if (common_flags()->intercept_memmem) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, len1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2);
+ }
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, GET_CALLER_PC(),
+ s1, len1, s2, len2, r);
+ return r;
+}
+
+#define INIT_MEMMEM COMMON_INTERCEPT_FUNCTION(memmem);
+#else
+#define INIT_MEMMEM
+#endif // SANITIZER_INTERCEPT_MEMMEM
+
+#if SANITIZER_INTERCEPT_STRCHR
+INTERCEPTOR(char*, strchr, const char *s, int c) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strchr(s, c);
+ COMMON_INTERCEPTOR_ENTER(ctx, strchr, s, c);
+ char *result = REAL(strchr)(s, c);
+ uptr len = internal_strlen(s);
+ uptr n = result ? result - s + 1 : len + 1;
+ if (common_flags()->intercept_strchr)
+ COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, n);
+ return result;
+}
+#define INIT_STRCHR COMMON_INTERCEPT_FUNCTION(strchr)
+#else
+#define INIT_STRCHR
+#endif
+
+#if SANITIZER_INTERCEPT_STRCHRNUL
+INTERCEPTOR(char*, strchrnul, const char *s, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strchrnul, s, c);
+ char *result = REAL(strchrnul)(s, c);
+ uptr len = result - s + 1;
+ if (common_flags()->intercept_strchr)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, len);
+ return result;
+}
+#define INIT_STRCHRNUL COMMON_INTERCEPT_FUNCTION(strchrnul)
+#else
+#define INIT_STRCHRNUL
+#endif
+
+#if SANITIZER_INTERCEPT_STRRCHR
+INTERCEPTOR(char*, strrchr, const char *s, int c) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strrchr(s, c);
+ COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);
+ uptr len = internal_strlen(s);
+ if (common_flags()->intercept_strchr)
+ COMMON_INTERCEPTOR_READ_STRING_OF_LEN(ctx, s, len, len + 1);
+ return REAL(strrchr)(s, c);
+}
+#define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)
+#else
+#define INIT_STRRCHR
+#endif
+
#if SANITIZER_INTERCEPT_STRSPN
INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) {
void *ctx;
@@ -395,18 +550,75 @@ INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
#define INIT_STRPBRK
#endif
+#if SANITIZER_INTERCEPT_MEMSET
+INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memset(dst, v, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size);
+ if (common_flags()->intercept_intrin)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
+ return REAL(memset)(dst, v, size);
+}
+
+#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
+#else
+#define INIT_MEMSET
+#endif
+
+#if SANITIZER_INTERCEPT_MEMMOVE
+INTERCEPTOR(void*, memmove, void *dst, const void *src, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memmove(dst, src, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size);
+ if (common_flags()->intercept_intrin) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
+ }
+ return REAL(memmove)(dst, src, size);
+}
+
+#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
+#else
+#define INIT_MEMMOVE
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCPY
+INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
+ return internal_memmove(dst, src, size);
+ }
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size);
+ if (common_flags()->intercept_intrin) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size);
+ }
+ // N.B.: If we switch this to internal_ we'll have to use internal_memmove
+ // due to memcpy being an alias of memmove on OS X.
+ return REAL(memcpy)(dst, src, size);
+}
+
+#define INIT_MEMCPY COMMON_INTERCEPT_FUNCTION(memcpy)
+#else
+#define INIT_MEMCPY
+#endif
+
#if SANITIZER_INTERCEPT_MEMCMP
DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,
- const void *s1, const void *s2, uptr n)
+ const void *s1, const void *s2, uptr n,
+ int result)
INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
- void *ctx;
- COMMON_INTERCEPTOR_ENTER(ctx, memcmp, a1, a2, size);
if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
return internal_memcmp(a1, a2, size);
- CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1,
- a2, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memcmp, a1, a2, size);
if (common_flags()->intercept_memcmp) {
if (common_flags()->strict_memcmp) {
// Check the entire regions even if the first bytes of the buffers are
@@ -426,10 +638,16 @@ INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
}
COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));
COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));
- return CharCmpX(c1, c2);
+ int r = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(),
+ a1, a2, size, r);
+ return r;
}
}
- return REAL(memcmp(a1, a2, size));
+ int result = REAL(memcmp(a1, a2, size));
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1,
+ a2, size, result);
+ return result;
}
#define INIT_MEMCMP COMMON_INTERCEPT_FUNCTION(memcmp)
@@ -443,7 +661,16 @@ INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
return internal_memchr(s, c, n);
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+#if SANITIZER_WINDOWS
+ void *res;
+ if (REAL(memchr)) {
+ res = REAL(memchr)(s, c, n);
+ } else {
+ res = internal_memchr(s, c, n);
+ }
+#else
void *res = REAL(memchr)(s, c, n);
+#endif
uptr len = res ? (char *)res - (const char *)s + 1 : n;
COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
return res;
@@ -488,7 +715,7 @@ INTERCEPTOR(float, frexpf, float x, int *exp) {
COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
float res = REAL(frexpf)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
return res;
@@ -499,7 +726,7 @@ INTERCEPTOR(long double, frexpl, long double x, int *exp) {
COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
long double res = REAL(frexpl)(x, exp);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
return res;
@@ -507,7 +734,7 @@ INTERCEPTOR(long double, frexpl, long double x, int *exp) {
#define INIT_FREXPF_FREXPL \
COMMON_INTERCEPT_FUNCTION(frexpf); \
- COMMON_INTERCEPT_FUNCTION(frexpl)
+ COMMON_INTERCEPT_FUNCTION_LDBL(frexpl)
#else
#define INIT_FREXPF_FREXPL
#endif // SANITIZER_INTERCEPT_FREXPF_FREXPL
@@ -540,7 +767,7 @@ INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(read)(fd, ptr, count);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -558,7 +785,7 @@ INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -576,7 +803,7 @@ INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -823,7 +1050,7 @@ INTERCEPTOR(char *, ctime, unsigned long *timep) {
COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(ctime)(timep);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
@@ -836,7 +1063,7 @@ INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(ctime_r)(timep, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
@@ -849,7 +1076,7 @@ INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(asctime)(tm);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
@@ -862,7 +1089,7 @@ INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(asctime_r)(tm, result);
if (res) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
@@ -906,7 +1133,7 @@ INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(strptime)(s, format, tm);
COMMON_INTERCEPTOR_READ_STRING(ctx, s, res ? res - s : 0);
if (res && tm) {
@@ -1043,7 +1270,7 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
// FIXME: under ASan the REAL() call below may write to freed memory and
// corrupt its metadata. See
-// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+// https://github.com/google/sanitizers/issues/321.
#define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...) \
{ \
VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__) \
@@ -1060,7 +1287,7 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
// FIXME: under ASan the REAL() call below may write to freed memory and
// corrupt its metadata. See
-// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+// https://github.com/google/sanitizers/issues/321.
#define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...) \
{ \
VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__) \
@@ -1077,7 +1304,7 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
// FIXME: under ASan the REAL() call below may write to freed memory and
// corrupt its metadata. See
-// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+// https://github.com/google/sanitizers/issues/321.
#define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...) \
{ \
VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__) \
@@ -1362,7 +1589,7 @@ INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);
if (!res) {
if (result && *result) unpoison_passwd(ctx, *result);
@@ -1377,7 +1604,7 @@ INTERCEPTOR(int, getpwuid_r, u32 uid, __sanitizer_passwd *pwd, char *buf,
COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);
if (!res) {
if (result && *result) unpoison_passwd(ctx, *result);
@@ -1393,7 +1620,7 @@ INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);
if (!res) {
if (result && *result) unpoison_group(ctx, *result);
@@ -1408,7 +1635,7 @@ INTERCEPTOR(int, getgrgid_r, u32 gid, __sanitizer_group *grp, char *buf,
COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);
if (!res) {
if (result && *result) unpoison_group(ctx, *result);
@@ -1477,7 +1704,7 @@ INTERCEPTOR(int, getpwent_r, __sanitizer_passwd *pwbuf, char *buf,
COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp);
if (!res) {
if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
@@ -1492,7 +1719,7 @@ INTERCEPTOR(int, fgetpwent_r, void *fp, __sanitizer_passwd *pwbuf, char *buf,
COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp);
if (!res) {
if (pwbufp && *pwbufp) unpoison_passwd(ctx, *pwbufp);
@@ -1507,7 +1734,7 @@ INTERCEPTOR(int, getgrent_r, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen,
COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp);
if (!res) {
if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
@@ -1522,7 +1749,7 @@ INTERCEPTOR(int, fgetgrent_r, void *fp, __sanitizer_group *pwbuf, char *buf,
COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp);
if (!res) {
if (pwbufp && *pwbufp) unpoison_group(ctx, *pwbufp);
@@ -1579,7 +1806,7 @@ INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(clock_getres)(clk_id, tp);
if (!res && tp) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
@@ -1591,7 +1818,7 @@ INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(clock_gettime)(clk_id, tp);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
@@ -1618,7 +1845,7 @@ INTERCEPTOR(int, getitimer, int which, void *curr_value) {
COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getitimer)(which, curr_value);
if (!res && curr_value) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz);
@@ -1632,7 +1859,7 @@ INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerval_sz);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(setitimer)(which, new_value, old_value);
if (!res && old_value) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz);
@@ -1689,16 +1916,19 @@ static int wrapped_gl_stat(const char *s, void *st) {
return pglob_copy->gl_stat(s, st);
}
+static const __sanitizer_glob_t kGlobCopy = {
+ 0, 0, 0,
+ 0, wrapped_gl_closedir, wrapped_gl_readdir,
+ wrapped_gl_opendir, wrapped_gl_lstat, wrapped_gl_stat};
+
INTERCEPTOR(int, glob, const char *pattern, int flags,
int (*errfunc)(const char *epath, int eerrno),
__sanitizer_glob_t *pglob) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);
COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
- __sanitizer_glob_t glob_copy = {
- 0, 0, 0,
- 0, wrapped_gl_closedir, wrapped_gl_readdir,
- wrapped_gl_opendir, wrapped_gl_lstat, wrapped_gl_stat};
+ __sanitizer_glob_t glob_copy;
+ internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy));
if (flags & glob_altdirfunc) {
Swap(pglob->gl_closedir, glob_copy.gl_closedir);
Swap(pglob->gl_readdir, glob_copy.gl_readdir);
@@ -1726,10 +1956,8 @@ INTERCEPTOR(int, glob64, const char *pattern, int flags,
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob);
COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
- __sanitizer_glob_t glob_copy = {
- 0, 0, 0,
- 0, wrapped_gl_closedir, wrapped_gl_readdir,
- wrapped_gl_opendir, wrapped_gl_lstat, wrapped_gl_stat};
+ __sanitizer_glob_t glob_copy;
+ internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy));
if (flags & glob_altdirfunc) {
Swap(pglob->gl_closedir, glob_copy.gl_closedir);
Swap(pglob->gl_readdir, glob_copy.gl_readdir);
@@ -1766,7 +1994,7 @@ INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {
COMMON_INTERCEPTOR_ENTER(ctx, wait, status);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(wait)(status);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1784,7 +2012,7 @@ INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(waitid)(idtype, id, infop, options);
if (res != -1 && infop)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);
@@ -1795,7 +2023,7 @@ INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(waitpid)(pid, status, options);
if (res != -1 && status)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1806,7 +2034,7 @@ INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(wait3)(status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1820,7 +2048,7 @@ INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {
COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(__wait4)(pid, status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1835,7 +2063,7 @@ INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(wait4)(pid, status, options, rusage);
if (res != -1) {
if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
@@ -1864,7 +2092,7 @@ INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
// FIXME: figure out read size based on the address family.
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(inet_ntop)(af, src, dst, size);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -1876,7 +2104,7 @@ INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
// FIXME: figure out read size based on the address family.
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(inet_pton)(af, src, dst);
if (res == 1) {
uptr sz = __sanitizer_in_addr_sz(af);
@@ -1898,7 +2126,7 @@ INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(inet_aton)(cp, dst);
if (res != 0) {
uptr sz = __sanitizer_in_addr_sz(af_inet);
@@ -1917,7 +2145,7 @@ INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) {
COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(pthread_getschedparam)(thread, policy, param);
if (res == 0) {
if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy));
@@ -1944,7 +2172,7 @@ INTERCEPTOR(int, getaddrinfo, char *node, char *service,
COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getaddrinfo)(node, service, hints, out);
if (res == 0 && out) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, out, sizeof(*out));
@@ -1976,7 +2204,7 @@ INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
// There is padding in in_addr that may make this too noisy
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res =
REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
if (res == 0) {
@@ -2000,7 +2228,7 @@ INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
int addrlen_in = *addrlen;
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getsockname)(sock_fd, addr, addrlen);
if (res == 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
@@ -2086,7 +2314,7 @@ INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
h_errnop);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
if (result) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -2109,7 +2337,7 @@ INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,
h_errnop);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);
if (result) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -2135,7 +2363,7 @@ INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,
h_errnop);
if (result) {
@@ -2161,7 +2389,7 @@ INTERCEPTOR(int, gethostbyname2_r, char *name, int af,
result, h_errnop);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res =
REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);
if (result) {
@@ -2187,7 +2415,7 @@ INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,
if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen);
if (res == 0)
if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen);
@@ -2231,7 +2459,7 @@ INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
}
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int fd2 = REAL(accept4)(fd, addr, addrlen, f);
if (fd2 >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
@@ -2251,7 +2479,7 @@ INTERCEPTOR(double, modf, double x, double *iptr) {
COMMON_INTERCEPTOR_ENTER(ctx, modf, x, iptr);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
double res = REAL(modf)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -2263,7 +2491,7 @@ INTERCEPTOR(float, modff, float x, float *iptr) {
COMMON_INTERCEPTOR_ENTER(ctx, modff, x, iptr);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
float res = REAL(modff)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -2275,7 +2503,7 @@ INTERCEPTOR(long double, modfl, long double x, long double *iptr) {
COMMON_INTERCEPTOR_ENTER(ctx, modfl, x, iptr);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
long double res = REAL(modfl)(x, iptr);
if (iptr) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
@@ -2285,7 +2513,7 @@ INTERCEPTOR(long double, modfl, long double x, long double *iptr) {
#define INIT_MODF \
COMMON_INTERCEPT_FUNCTION(modf); \
COMMON_INTERCEPT_FUNCTION(modff); \
- COMMON_INTERCEPT_FUNCTION(modfl);
+ COMMON_INTERCEPT_FUNCTION_LDBL(modfl);
#else
#define INIT_MODF
#endif
@@ -2310,7 +2538,7 @@ INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg,
COMMON_INTERCEPTOR_ENTER(ctx, recvmsg, fd, msg, flags);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(recvmsg)(fd, msg, flags);
if (res >= 0) {
if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
@@ -2326,6 +2554,75 @@ INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg,
#define INIT_RECVMSG
#endif
+#if SANITIZER_INTERCEPT_SENDMSG
+static void read_msghdr_control(void *ctx, void *control, uptr controllen) {
+ const unsigned kCmsgDataOffset =
+ RoundUpTo(sizeof(__sanitizer_cmsghdr), sizeof(uptr));
+
+ char *p = (char *)control;
+ char *const control_end = p + controllen;
+ while (true) {
+ if (p + sizeof(__sanitizer_cmsghdr) > control_end) break;
+ __sanitizer_cmsghdr *cmsg = (__sanitizer_cmsghdr *)p;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_len, sizeof(cmsg->cmsg_len));
+
+ if (p + RoundUpTo(cmsg->cmsg_len, sizeof(uptr)) > control_end) break;
+
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_level,
+ sizeof(cmsg->cmsg_level));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_type,
+ sizeof(cmsg->cmsg_type));
+
+ if (cmsg->cmsg_len > kCmsgDataOffset) {
+ char *data = p + kCmsgDataOffset;
+ unsigned data_len = cmsg->cmsg_len - kCmsgDataOffset;
+ if (data_len > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, data_len);
+ }
+
+ p += RoundUpTo(cmsg->cmsg_len, sizeof(uptr));
+ }
+}
+
+static void read_msghdr(void *ctx, struct __sanitizer_msghdr *msg,
+ SSIZE_T maxlen) {
+#define R(f) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &msg->msg_##f, sizeof(msg->msg_##f))
+ R(name);
+ R(namelen);
+ R(iov);
+ R(iovlen);
+ R(control);
+ R(controllen);
+ R(flags);
+#undef R
+ if (msg->msg_name && msg->msg_namelen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_name, msg->msg_namelen);
+ if (msg->msg_iov && msg->msg_iovlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_iov,
+ sizeof(*msg->msg_iov) * msg->msg_iovlen);
+ read_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen);
+ if (msg->msg_control && msg->msg_controllen)
+ read_msghdr_control(ctx, msg->msg_control, msg->msg_controllen);
+}
+
+INTERCEPTOR(SSIZE_T, sendmsg, int fd, struct __sanitizer_msghdr *msg,
+ int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sendmsg, fd, msg, flags);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ SSIZE_T res = REAL(sendmsg)(fd, msg, flags);
+ if (common_flags()->intercept_send && res >= 0 && msg)
+ read_msghdr(ctx, msg, res);
+ return res;
+}
+#define INIT_SENDMSG COMMON_INTERCEPT_FUNCTION(sendmsg);
+#else
+#define INIT_SENDMSG
+#endif
+
#if SANITIZER_INTERCEPT_GETPEERNAME
INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
void *ctx;
@@ -2334,7 +2631,7 @@ INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
if (addrlen) addr_sz = *addrlen;
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getpeername)(sockfd, addr, addrlen);
if (!res && addr && addrlen)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
@@ -2350,7 +2647,7 @@ INTERCEPTOR(int, sysinfo, void *info) {
void *ctx;
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
COMMON_INTERCEPTOR_ENTER(ctx, sysinfo, info);
int res = REAL(sysinfo)(info);
if (!res && info)
@@ -2378,7 +2675,7 @@ INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
__sanitizer_dirent *res = REAL(readdir)(dirp);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
return res;
@@ -2390,7 +2687,7 @@ INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
COMMON_INTERCEPTOR_ENTER(ctx, readdir_r, dirp, entry, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(readdir_r)(dirp, entry, result);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -2414,7 +2711,7 @@ INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
COMMON_INTERCEPTOR_ENTER(ctx, readdir64, dirp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
__sanitizer_dirent64 *res = REAL(readdir64)(dirp);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
return res;
@@ -2426,7 +2723,7 @@ INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
COMMON_INTERCEPTOR_ENTER(ctx, readdir64_r, dirp, entry, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(readdir64_r)(dirp, entry, result);
if (!res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -2473,7 +2770,7 @@ INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
uptr res = REAL(ptrace)(request, pid, addr, data);
if (!res && data) {
@@ -2528,7 +2825,7 @@ INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
COMMON_INTERCEPTOR_ENTER(ctx, getcwd, buf, size);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(getcwd)(buf, size);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -2544,7 +2841,7 @@ INTERCEPTOR(char *, get_current_dir_name, int fake) {
COMMON_INTERCEPTOR_ENTER(ctx, get_current_dir_name, fake);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(get_current_dir_name)(fake);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -2593,7 +2890,7 @@ INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *real_endptr;
INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
@@ -2605,7 +2902,7 @@ INTERCEPTOR(INTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *real_endptr;
INTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
@@ -2625,7 +2922,7 @@ INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
COMMON_INTERCEPTOR_ENTER(ctx, mbstowcs, dest, src, len);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(mbstowcs)(dest, src, len);
if (res != (SIZE_T) - 1 && dest) {
SIZE_T write_cnt = res + (res < len);
@@ -2642,7 +2939,7 @@ INTERCEPTOR(SIZE_T, mbsrtowcs, wchar_t *dest, const char **src, SIZE_T len,
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(mbsrtowcs)(dest, src, len, ps);
if (res != (SIZE_T)(-1) && dest && src) {
// This function, and several others, may or may not write the terminating
@@ -2672,7 +2969,7 @@ INTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms,
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(mbsnrtowcs)(dest, src, nms, len, ps);
if (res != (SIZE_T)(-1) && dest && src) {
SIZE_T write_cnt = res + !*src;
@@ -2692,7 +2989,7 @@ INTERCEPTOR(SIZE_T, wcstombs, char *dest, const wchar_t *src, SIZE_T len) {
COMMON_INTERCEPTOR_ENTER(ctx, wcstombs, dest, src, len);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(wcstombs)(dest, src, len);
if (res != (SIZE_T) - 1 && dest) {
SIZE_T write_cnt = res + (res < len);
@@ -2709,7 +3006,7 @@ INTERCEPTOR(SIZE_T, wcsrtombs, char *dest, const wchar_t **src, SIZE_T len,
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(wcsrtombs)(dest, src, len, ps);
if (res != (SIZE_T) - 1 && dest && src) {
SIZE_T write_cnt = res + !*src;
@@ -2737,7 +3034,7 @@ INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms,
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps);
if (res != ((SIZE_T)-1) && dest && src) {
SIZE_T write_cnt = res + !*src;
@@ -2759,7 +3056,7 @@ INTERCEPTOR(SIZE_T, wcrtomb, char *dest, wchar_t src, void *ps) {
if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(wcrtomb)(dest, src, ps);
if (res != ((SIZE_T)-1) && dest) {
SIZE_T write_cnt = res;
@@ -2779,7 +3076,7 @@ INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
COMMON_INTERCEPTOR_ENTER(ctx, tcgetattr, fd, termios_p);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(tcgetattr)(fd, termios_p);
if (!res && termios_p)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, termios_p, struct_termios_sz);
@@ -2836,7 +3133,7 @@ INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {
COMMON_INTERCEPTOR_ENTER(ctx, confstr, name, buf, len);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(confstr)(name, buf, len);
if (buf && res)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res < len ? res : len);
@@ -2853,7 +3150,7 @@ INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {
COMMON_INTERCEPTOR_ENTER(ctx, sched_getaffinity, pid, cpusetsize, mask);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(sched_getaffinity)(pid, cpusetsize, mask);
if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
return res;
@@ -2895,7 +3192,7 @@ INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(strerror_r)(errnum, buf, buflen);
// There are 2 versions of strerror_r:
// * POSIX version returns 0 on success, negative error code on failure,
@@ -2926,7 +3223,7 @@ INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
COMMON_INTERCEPTOR_ENTER(ctx, __xpg_strerror_r, errnum, buf, buflen);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
// This version always returns a null-terminated string.
if (buf && buflen)
@@ -2971,7 +3268,7 @@ INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
scandir_compar = compar;
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(scandir)(dirp, namelist,
filter ? wrapped_scandir_filter : nullptr,
compar ? wrapped_scandir_compar : nullptr);
@@ -3024,7 +3321,7 @@ INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
scandir64_compar = compar;
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res =
REAL(scandir64)(dirp, namelist,
filter ? wrapped_scandir64_filter : nullptr,
@@ -3051,7 +3348,7 @@ INTERCEPTOR(int, getgroups, int size, u32 *lst) {
COMMON_INTERCEPTOR_ENTER(ctx, getgroups, size, lst);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getgroups)(size, lst);
if (res && lst) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
return res;
@@ -3117,7 +3414,7 @@ INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {
if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(wordexp)(s, p, flags);
if (!res && p) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
@@ -3143,7 +3440,7 @@ INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigwait)(set, sig);
if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
return res;
@@ -3160,7 +3457,7 @@ INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigwaitinfo)(set, info);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
@@ -3179,7 +3476,7 @@ INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigtimedwait)(set, info, timeout);
if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
return res;
@@ -3195,7 +3492,7 @@ INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {
COMMON_INTERCEPTOR_ENTER(ctx, sigemptyset, set);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigemptyset)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -3206,7 +3503,7 @@ INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
COMMON_INTERCEPTOR_ENTER(ctx, sigfillset, set);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigfillset)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -3224,7 +3521,7 @@ INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
COMMON_INTERCEPTOR_ENTER(ctx, sigpending, set);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigpending)(set);
if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
return res;
@@ -3242,7 +3539,7 @@ INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,
// FIXME: read sigset_t when all of sigemptyset, etc are intercepted
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(sigprocmask)(how, set, oldset);
if (!res && oldset)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
@@ -3259,7 +3556,7 @@ INTERCEPTOR(int, backtrace, void **buffer, int size) {
COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(backtrace)(buffer, size);
if (res && buffer)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
@@ -3273,7 +3570,7 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char **res = REAL(backtrace_symbols)(buffer, size);
if (res && size) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
@@ -3293,7 +3590,9 @@ INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
INTERCEPTOR(void, _exit, int status) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, _exit, status);
+ COMMON_INTERCEPTOR_USER_CALLBACK_START();
int status1 = COMMON_INTERCEPTOR_ON_EXIT(ctx);
+ COMMON_INTERCEPTOR_USER_CALLBACK_END();
if (status == 0) status = status1;
REAL(_exit)(status);
}
@@ -3311,6 +3610,8 @@ INTERCEPTOR(int, pthread_mutex_lock, void *m) {
COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
if (res == 0 || res == errno_EOWNERDEAD)
COMMON_INTERCEPTOR_MUTEX_LOCK(ctx, m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
return res;
}
@@ -3318,7 +3619,10 @@ INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m);
COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
- return REAL(pthread_mutex_unlock)(m);
+ int res = REAL(pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
}
#define INIT_PTHREAD_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(pthread_mutex_lock)
@@ -3381,7 +3685,7 @@ INTERCEPTOR(int, statfs, char *path, void *buf) {
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(statfs)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
return res;
@@ -3391,7 +3695,7 @@ INTERCEPTOR(int, fstatfs, int fd, void *buf) {
COMMON_INTERCEPTOR_ENTER(ctx, fstatfs, fd, buf);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(fstatfs)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
return res;
@@ -3410,7 +3714,7 @@ INTERCEPTOR(int, statfs64, char *path, void *buf) {
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(statfs64)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
return res;
@@ -3420,7 +3724,7 @@ INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
COMMON_INTERCEPTOR_ENTER(ctx, fstatfs64, fd, buf);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(fstatfs64)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
return res;
@@ -3439,7 +3743,7 @@ INTERCEPTOR(int, statvfs, char *path, void *buf) {
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(statvfs)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
@@ -3449,7 +3753,7 @@ INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(fstatvfs)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
return res;
@@ -3468,7 +3772,7 @@ INTERCEPTOR(int, statvfs64, char *path, void *buf) {
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(statvfs64)(path, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
return res;
@@ -3478,7 +3782,7 @@ INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs64, fd, buf);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(fstatvfs64)(fd, buf);
if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
return res;
@@ -3534,7 +3838,7 @@ INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(ether_ntohost)(hostname, addr);
if (!res && hostname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
@@ -3547,7 +3851,7 @@ INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(ether_hostton)(hostname, addr);
if (!res && addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
return res;
@@ -3559,7 +3863,7 @@ INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(ether_line)(line, addr, hostname);
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
@@ -3583,7 +3887,7 @@ INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {
if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(ether_ntoa_r)(addr, buf);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
return res;
@@ -3595,7 +3899,7 @@ INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
__sanitizer_ether_addr *res = REAL(ether_aton_r)(buf, addr);
if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(*res));
return res;
@@ -3613,7 +3917,7 @@ INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {
COMMON_INTERCEPTOR_ENTER(ctx, shmctl, shmid, cmd, buf);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(shmctl)(shmid, cmd, buf);
if (res >= 0) {
unsigned sz = 0;
@@ -3638,7 +3942,7 @@ INTERCEPTOR(int, random_r, void *buf, u32 *result) {
COMMON_INTERCEPTOR_ENTER(ctx, random_r, buf, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(random_r)(buf, result);
if (!res && result)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
@@ -3651,7 +3955,7 @@ INTERCEPTOR(int, random_r, void *buf, u32 *result) {
// FIXME: under ASan the REAL() call below may write to freed memory and corrupt
// its metadata. See
-// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+// https://github.com/google/sanitizers/issues/321.
#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED || \
SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GET || \
@@ -3690,7 +3994,7 @@ INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getstack, attr, addr, size);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(pthread_attr_getstack)(attr, addr, size);
if (!res) {
if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
@@ -3739,7 +4043,7 @@ INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
cpuset);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(pthread_attr_getaffinity_np)(attr, cpusetsize, cpuset);
if (!res && cpusetsize && cpuset)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);
@@ -3849,7 +4153,7 @@ INTERCEPTOR(char *, tmpnam, char *s) {
if (s)
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
else
COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
@@ -3867,7 +4171,7 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
COMMON_INTERCEPTOR_ENTER(ctx, tmpnam_r, s);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(tmpnam_r)(s);
if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
return res;
@@ -3877,6 +4181,20 @@ INTERCEPTOR(char *, tmpnam_r, char *s) {
#define INIT_TMPNAM_R
#endif
+#if SANITIZER_INTERCEPT_TTYNAME_R
+INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
+ int res = REAL(ttyname_r)(fd, name, namesize);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return res;
+}
+#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
+#else
+#define INIT_TTYNAME_R
+#endif
+
#if SANITIZER_INTERCEPT_TEMPNAM
INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
void *ctx;
@@ -3911,7 +4229,7 @@ INTERCEPTOR(void, sincos, double x, double *sin, double *cos) {
COMMON_INTERCEPTOR_ENTER(ctx, sincos, x, sin, cos);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
REAL(sincos)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -3921,7 +4239,7 @@ INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {
COMMON_INTERCEPTOR_ENTER(ctx, sincosf, x, sin, cos);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
REAL(sincosf)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -3931,7 +4249,7 @@ INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
COMMON_INTERCEPTOR_ENTER(ctx, sincosl, x, sin, cos);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
REAL(sincosl)(x, sin, cos);
if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
@@ -3939,7 +4257,7 @@ INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
#define INIT_SINCOS \
COMMON_INTERCEPT_FUNCTION(sincos); \
COMMON_INTERCEPT_FUNCTION(sincosf); \
- COMMON_INTERCEPT_FUNCTION(sincosl);
+ COMMON_INTERCEPT_FUNCTION_LDBL(sincosl);
#else
#define INIT_SINCOS
#endif
@@ -3950,7 +4268,7 @@ INTERCEPTOR(double, remquo, double x, double y, int *quo) {
COMMON_INTERCEPTOR_ENTER(ctx, remquo, x, y, quo);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
double res = REAL(remquo)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -3960,7 +4278,7 @@ INTERCEPTOR(float, remquof, float x, float y, int *quo) {
COMMON_INTERCEPTOR_ENTER(ctx, remquof, x, y, quo);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
float res = REAL(remquof)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -3970,7 +4288,7 @@ INTERCEPTOR(long double, remquol, long double x, long double y, int *quo) {
COMMON_INTERCEPTOR_ENTER(ctx, remquol, x, y, quo);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
long double res = REAL(remquol)(x, y, quo);
if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
return res;
@@ -3978,7 +4296,7 @@ INTERCEPTOR(long double, remquol, long double x, long double y, int *quo) {
#define INIT_REMQUO \
COMMON_INTERCEPT_FUNCTION(remquo); \
COMMON_INTERCEPT_FUNCTION(remquof); \
- COMMON_INTERCEPT_FUNCTION(remquol);
+ COMMON_INTERCEPT_FUNCTION_LDBL(remquol);
#else
#define INIT_REMQUO
#endif
@@ -4009,7 +4327,7 @@ INTERCEPTOR(long double, lgammal, long double x) {
#define INIT_LGAMMA \
COMMON_INTERCEPT_FUNCTION(lgamma); \
COMMON_INTERCEPT_FUNCTION(lgammaf); \
- COMMON_INTERCEPT_FUNCTION(lgammal);
+ COMMON_INTERCEPT_FUNCTION_LDBL(lgammal);
#else
#define INIT_LGAMMA
#endif
@@ -4020,7 +4338,7 @@ INTERCEPTOR(double, lgamma_r, double x, int *signp) {
COMMON_INTERCEPTOR_ENTER(ctx, lgamma_r, x, signp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
double res = REAL(lgamma_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
@@ -4030,7 +4348,7 @@ INTERCEPTOR(float, lgammaf_r, float x, int *signp) {
COMMON_INTERCEPTOR_ENTER(ctx, lgammaf_r, x, signp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
float res = REAL(lgammaf_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
@@ -4048,12 +4366,12 @@ INTERCEPTOR(long double, lgammal_r, long double x, int *signp) {
COMMON_INTERCEPTOR_ENTER(ctx, lgammal_r, x, signp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
long double res = REAL(lgammal_r)(x, signp);
if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
return res;
}
-#define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION(lgammal_r);
+#define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION_LDBL(lgammal_r);
#else
#define INIT_LGAMMAL_R
#endif
@@ -4064,7 +4382,7 @@ INTERCEPTOR(int, drand48_r, void *buffer, double *result) {
COMMON_INTERCEPTOR_ENTER(ctx, drand48_r, buffer, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(drand48_r)(buffer, result);
if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
@@ -4074,7 +4392,7 @@ INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
COMMON_INTERCEPTOR_ENTER(ctx, lrand48_r, buffer, result);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(lrand48_r)(buffer, result);
if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
return res;
@@ -4104,7 +4422,7 @@ INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
COMMON_INTERCEPTOR_ENTER(ctx, getline, lineptr, n, stream);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(getline)(lineptr, n, stream);
if (res > 0) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
@@ -4116,7 +4434,7 @@ INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
// FIXME: under ASan the call below may write to freed memory and corrupt its
// metadata. See
-// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+// https://github.com/google/sanitizers/issues/321.
#define GETDELIM_INTERCEPTOR_IMPL(vname) \
{ \
void *ctx; \
@@ -4163,7 +4481,7 @@ INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
void *outbuf_orig = outbuf ? *outbuf : nullptr;
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
if (res != (SIZE_T) - 1 && outbuf && *outbuf > outbuf_orig) {
SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
@@ -4182,7 +4500,7 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
COMMON_INTERCEPTOR_ENTER(ctx, times, tms);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
__sanitizer_clock_t res = REAL(times)(tms);
if (res != (__sanitizer_clock_t)-1 && tms)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tms, struct_tms_sz);
@@ -4194,6 +4512,7 @@ INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
#endif
#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#if !SANITIZER_S390
#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
// If you see any crashes around this functions, there are 2 known issues with
// it: 1. __tls_get_addr can be called with mis-aligned stack due to:
@@ -4214,6 +4533,67 @@ INTERCEPTOR(void *, __tls_get_addr, void *arg) {
}
return res;
}
+#if SANITIZER_PPC
+// On PowerPC, we also need to intercept __tls_get_addr_opt, which has
+// mostly the same semantics as __tls_get_addr, but its presence enables
+// some optimizations in linker (which are safe to ignore here).
+extern "C" __attribute__((alias("__interceptor___tls_get_addr"),
+ visibility("default")))
+void *__tls_get_addr_opt(void *arg);
+#endif
+#else // SANITIZER_S390
+// On s390, we have to intercept two functions here:
+// - __tls_get_addr_internal, which is a glibc-internal function that is like
+// the usual __tls_get_addr, but returns a TP-relative offset instead of
+// a proper pointer. It is used by dlsym for TLS symbols.
+// - __tls_get_offset, which is like the above, but also takes a GOT-relative
+// descriptor offset as an argument instead of a pointer. GOT address
+// is passed in r12, so it's necessary to write it in assembly. This is
+// the function used by the compiler.
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr_internal)
+INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
+ uptr res = REAL(__tls_get_addr_internal)(arg);
+ uptr tp = reinterpret_cast<uptr>(__builtin_thread_pointer());
+ void *ptr = reinterpret_cast<void *>(res + tp);
+ uptr tls_begin, tls_end;
+ COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, ptr, tls_begin, tls_end);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
+ return res;
+}
+// We need a protected symbol aliasing the above, so that we can jump
+// directly to it from the assembly below.
+extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
+ visibility("protected")))
+uptr __interceptor___tls_get_addr_internal_protected(void *arg);
+// Now carefully intercept __tls_get_offset.
+asm(
+ ".text\n"
+ ".global __tls_get_offset\n"
+ "__tls_get_offset:\n"
+// The __intercept_ version has to exist, so that gen_dynamic_list.py
+// exports our symbol.
+ ".global __interceptor___tls_get_offset\n"
+ "__interceptor___tls_get_offset:\n"
+#ifdef __s390x__
+ "la %r2, 0(%r2,%r12)\n"
+ "jg __interceptor___tls_get_addr_internal_protected\n"
+#else
+ "basr %r3,0\n"
+ "0: la %r2,0(%r2,%r12)\n"
+ "l %r4,1f-0b(%r3)\n"
+ "b 0(%r4,%r3)\n"
+ "1: .long __interceptor___tls_get_addr_internal_protected - 0b\n"
+#endif
+ ".type __tls_get_offset, @function\n"
+ ".size __tls_get_offset, .-__tls_get_offset\n"
+);
+#endif // SANITIZER_S390
#else
#define INIT_TLS_GET_ADDR
#endif
@@ -4225,7 +4605,7 @@ INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(listxattr)(path, list, size);
// Here and below, size == 0 is a special case where nothing is written to the
// buffer, and res contains the desired buffer size.
@@ -4238,7 +4618,7 @@ INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(llistxattr)(path, list, size);
if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
return res;
@@ -4248,7 +4628,7 @@ INTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) {
COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(flistxattr)(fd, list, size);
if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
return res;
@@ -4270,7 +4650,7 @@ INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(getxattr)(path, name, value, size);
if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
return res;
@@ -4283,7 +4663,7 @@ INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(lgetxattr)(path, name, value, size);
if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
return res;
@@ -4295,7 +4675,7 @@ INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
SSIZE_T res = REAL(fgetxattr)(fd, name, value, size);
if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
return res;
@@ -4314,7 +4694,7 @@ INTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) {
COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getresuid)(ruid, euid, suid);
if (res >= 0) {
if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz);
@@ -4328,7 +4708,7 @@ INTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) {
COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getresgid)(rgid, egid, sgid);
if (res >= 0) {
if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz);
@@ -4353,7 +4733,7 @@ INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(getifaddrs)(ifap);
if (res == 0 && ifap) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *));
@@ -4389,7 +4769,7 @@ INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
char *res = REAL(if_indextoname)(ifindex, ifname);
if (res && ifname)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
@@ -4417,7 +4797,7 @@ INTERCEPTOR(int, capget, void *hdrp, void *datap) {
COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(capget)(hdrp, datap);
if (res == 0 && datap)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
@@ -4518,7 +4898,7 @@ INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(ftime)(tp);
if (tp)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp));
@@ -4536,7 +4916,7 @@ INTERCEPTOR(void, xdrmem_create, __sanitizer_XDR *xdrs, uptr addr,
COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
REAL(xdrmem_create)(xdrs, addr, size, op);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
if (op == __sanitizer_XDR_ENCODE) {
@@ -4551,14 +4931,14 @@ INTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) {
COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
REAL(xdrstdio_create)(xdrs, file, op);
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
}
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
-// https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+// https://github.com/google/sanitizers/issues/321.
#define XDR_INTERCEPTOR(F, T) \
INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) { \
void *ctx; \
@@ -4612,7 +4992,7 @@ INTERCEPTOR(int, xdr_bytes, __sanitizer_XDR *xdrs, char **p, unsigned *sizep,
}
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize);
if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
@@ -4632,7 +5012,7 @@ INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
}
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
int res = REAL(xdr_string)(xdrs, p, maxsize);
if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
@@ -4684,7 +5064,7 @@ INTERCEPTOR(void *, tsearch, void *key, void **rootp,
COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
void *res = REAL(tsearch)(key, rootp, compar);
if (res && *(void **)res == key)
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *));
@@ -4766,7 +5146,7 @@ INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
void *ctx;
COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
- COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
__sanitizer_FILE *res = REAL(fopen)(path, mode);
COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
@@ -4837,7 +5217,7 @@ INTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) {
COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
__sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc);
if (res) {
COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
@@ -4868,7 +5248,7 @@ INTERCEPTOR(__sanitizer_FILE *, fmemopen, void *buf, SIZE_T size,
COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode);
// FIXME: under ASan the call below may write to freed memory and corrupt
// its metadata. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=321.
+ // https://github.com/google/sanitizers/issues/321.
__sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode);
if (res) unpoison_file(res);
return res;
@@ -5282,22 +5662,245 @@ INTERCEPTOR(SSIZE_T, process_vm_writev, int pid, __sanitizer_iovec *local_iov,
#define INIT_PROCESS_VM_READV
#endif
+#if SANITIZER_INTERCEPT_CTERMID
+INTERCEPTOR(char *, ctermid, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctermid, s);
+ char *res = REAL(ctermid)(s);
+ if (res) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_CTERMID COMMON_INTERCEPT_FUNCTION(ctermid);
+#else
+#define INIT_CTERMID
+#endif
+
+#if SANITIZER_INTERCEPT_CTERMID_R
+INTERCEPTOR(char *, ctermid_r, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctermid_r, s);
+ char *res = REAL(ctermid_r)(s);
+ if (res) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_CTERMID_R COMMON_INTERCEPT_FUNCTION(ctermid_r);
+#else
+#define INIT_CTERMID_R
+#endif
+
+#if SANITIZER_INTERCEPT_RECV_RECVFROM
+INTERCEPTOR(SSIZE_T, recv, int fd, void *buf, SIZE_T len, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recv, fd, buf, len, flags);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(recv)(fd, buf, len, flags);
+ if (res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ }
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags,
+ void *srcaddr, int *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recvfrom, fd, buf, len, flags, srcaddr,
+ addrlen);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SIZE_T srcaddr_sz;
+ if (srcaddr) srcaddr_sz = *addrlen;
+ (void)srcaddr_sz; // prevent "set but not used" warning
+ SSIZE_T res = REAL(recvfrom)(fd, buf, len, flags, srcaddr, addrlen);
+ if (res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ if (srcaddr)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(srcaddr,
+ Min((SIZE_T)*addrlen, srcaddr_sz));
+ }
+ return res;
+}
+#define INIT_RECV_RECVFROM \
+ COMMON_INTERCEPT_FUNCTION(recv); \
+ COMMON_INTERCEPT_FUNCTION(recvfrom);
+#else
+#define INIT_RECV_RECVFROM
+#endif
+
+#if SANITIZER_INTERCEPT_SEND_SENDTO
+INTERCEPTOR(SSIZE_T, send, int fd, void *buf, SIZE_T len, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, send, fd, buf, len, flags);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ SSIZE_T res = REAL(send)(fd, buf, len, flags);
+ if (common_flags()->intercept_send && res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, sendto, int fd, void *buf, SIZE_T len, int flags,
+ void *dstaddr, int addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sendto, fd, buf, len, flags, dstaddr, addrlen);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ // Can't check dstaddr as it may have uninitialized padding at the end.
+ SSIZE_T res = REAL(sendto)(fd, buf, len, flags, dstaddr, addrlen);
+ if (common_flags()->intercept_send && res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ return res;
+}
+#define INIT_SEND_SENDTO \
+ COMMON_INTERCEPT_FUNCTION(send); \
+ COMMON_INTERCEPT_FUNCTION(sendto);
+#else
+#define INIT_SEND_SENDTO
+#endif
+
+#if SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
+INTERCEPTOR(int, eventfd_read, int fd, u64 *value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, eventfd_read, fd, value);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ int res = REAL(eventfd_read)(fd, value);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, sizeof(*value));
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+INTERCEPTOR(int, eventfd_write, int fd, u64 value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, eventfd_write, fd, value);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ int res = REAL(eventfd_write)(fd, value);
+ return res;
+}
+#define INIT_EVENTFD_READ_WRITE \
+ COMMON_INTERCEPT_FUNCTION(eventfd_read); \
+ COMMON_INTERCEPT_FUNCTION(eventfd_write)
+#else
+#define INIT_EVENTFD_READ_WRITE
+#endif
+
+#if SANITIZER_INTERCEPT_STAT
+INTERCEPTOR(int, stat, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, stat, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(stat)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT_STAT COMMON_INTERCEPT_FUNCTION(stat)
+#else
+#define INIT_STAT
+#endif
+
+#if SANITIZER_INTERCEPT___XSTAT
+INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xstat, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__xstat)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT___XSTAT COMMON_INTERCEPT_FUNCTION(__xstat)
+#else
+#define INIT___XSTAT
+#endif
+
+#if SANITIZER_INTERCEPT___XSTAT64
+INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xstat64, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__xstat64)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT___XSTAT64 COMMON_INTERCEPT_FUNCTION(__xstat64)
+#else
+#define INIT___XSTAT64
+#endif
+
+#if SANITIZER_INTERCEPT___LXSTAT
+INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __lxstat, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__lxstat)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT___LXSTAT COMMON_INTERCEPT_FUNCTION(__lxstat)
+#else
+#define INIT___LXSTAT
+#endif
+
+#if SANITIZER_INTERCEPT___LXSTAT64
+INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __lxstat64, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__lxstat64)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT___LXSTAT64 COMMON_INTERCEPT_FUNCTION(__lxstat64)
+#else
+#define INIT___LXSTAT64
+#endif
+
+// FIXME: add other *stat interceptor
+
static void InitializeCommonInterceptors() {
static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
interceptor_metadata_map = new((void *)&metadata_mem) MetadataHashMap();
INIT_TEXTDOMAIN;
+ INIT_STRLEN;
+ INIT_STRNLEN;
INIT_STRCMP;
INIT_STRNCMP;
INIT_STRCASECMP;
INIT_STRNCASECMP;
INIT_STRSTR;
INIT_STRCASESTR;
+ INIT_STRCHR;
+ INIT_STRCHRNUL;
+ INIT_STRRCHR;
INIT_STRSPN;
INIT_STRPBRK;
+ INIT_MEMSET;
+ INIT_MEMMOVE;
+ INIT_MEMCPY;
INIT_MEMCHR;
INIT_MEMCMP;
INIT_MEMRCHR;
+ INIT_MEMMEM;
INIT_READ;
INIT_PREAD;
INIT_PREAD64;
@@ -5347,6 +5950,7 @@ static void InitializeCommonInterceptors() {
INIT_ACCEPT4;
INIT_MODF;
INIT_RECVMSG;
+ INIT_SENDMSG;
INIT_GETPEERNAME;
INIT_IOCTL;
INIT_INET_ATON;
@@ -5416,6 +6020,7 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_BARRIERATTR_GETPSHARED;
INIT_TMPNAM;
INIT_TMPNAM_R;
+ INIT_TTYNAME_R;
INIT_TEMPNAM;
INIT_PTHREAD_SETNAME_NP;
INIT_SINCOS;
@@ -5456,4 +6061,15 @@ static void InitializeCommonInterceptors() {
INIT_PTHREAD_SETCANCEL;
INIT_MINCORE;
INIT_PROCESS_VM_READV;
+ INIT_CTERMID;
+ INIT_CTERMID_R;
+ INIT_RECV_RECVFROM;
+ INIT_SEND_SENDTO;
+ INIT_STAT;
+ INIT_EVENTFD_READ_WRITE;
+ INIT___XSTAT;
+ INIT___XSTAT64;
+ INIT___LXSTAT;
+ INIT___LXSTAT64;
+ // FIXME: add other *stat interceptors.
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
index 6c5fda09fbf..a68534c5a0a 100755
--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
@@ -51,25 +51,9 @@ static void ioctl_table_fill() {
_(FIONBIO, READ, sizeof(int));
_(FIONCLEX, NONE, 0);
_(FIOSETOWN, READ, sizeof(int));
- _(SIOCADDMULTI, READ, struct_ifreq_sz);
_(SIOCATMARK, WRITE, sizeof(int));
- _(SIOCDELMULTI, READ, struct_ifreq_sz);
- _(SIOCGIFADDR, WRITE, struct_ifreq_sz);
- _(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);
_(SIOCGIFCONF, CUSTOM, 0);
- _(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);
- _(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);
- _(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);
- _(SIOCGIFMTU, WRITE, struct_ifreq_sz);
- _(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);
_(SIOCGPGRP, WRITE, sizeof(int));
- _(SIOCSIFADDR, READ, struct_ifreq_sz);
- _(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
- _(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
- _(SIOCSIFFLAGS, READ, struct_ifreq_sz);
- _(SIOCSIFMETRIC, READ, struct_ifreq_sz);
- _(SIOCSIFMTU, READ, struct_ifreq_sz);
- _(SIOCSIFNETMASK, READ, struct_ifreq_sz);
_(SIOCSPGRP, READ, sizeof(int));
_(TIOCCONS, NONE, 0);
_(TIOCEXCL, NONE, 0);
@@ -90,6 +74,25 @@ static void ioctl_table_fill() {
_(TIOCSTI, READ, sizeof(char));
_(TIOCSWINSZ, READ, struct_winsize_sz);
+#if !SANITIZER_IOS
+ _(SIOCADDMULTI, READ, struct_ifreq_sz);
+ _(SIOCDELMULTI, READ, struct_ifreq_sz);
+ _(SIOCGIFADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMTU, WRITE, struct_ifreq_sz);
+ _(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);
+ _(SIOCSIFADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFFLAGS, READ, struct_ifreq_sz);
+ _(SIOCSIFMETRIC, READ, struct_ifreq_sz);
+ _(SIOCSIFMTU, READ, struct_ifreq_sz);
+ _(SIOCSIFNETMASK, READ, struct_ifreq_sz);
+#endif
+
#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
_(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);
_(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);
@@ -578,7 +581,8 @@ static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
return;
if (request == IOCTL_SIOCGIFCONF) {
struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
- COMMON_INTERCEPTOR_READ_RANGE(ctx, &ifc->ifc_len, sizeof(ifc->ifc_len));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,
+ sizeof(ifc->ifc_len));
}
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
index 5a76c4ebd8b..8c9fa98f835 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_libcdep.cc
@@ -10,6 +10,8 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+
+#include "sanitizer_allocator_interface.h"
#include "sanitizer_flags.h"
#include "sanitizer_stackdepot.h"
#include "sanitizer_stacktrace.h"
@@ -43,7 +45,8 @@ void SetSandboxingCallback(void (*f)()) {
sandboxing_callback = f;
}
-void ReportErrorSummary(const char *error_type, StackTrace *stack) {
+void ReportErrorSummary(const char *error_type, const StackTrace *stack) {
+#if !SANITIZER_GO
if (!common_flags()->print_summary)
return;
if (stack->size == 0) {
@@ -56,6 +59,7 @@ void ReportErrorSummary(const char *error_type, StackTrace *stack) {
SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
ReportErrorSummary(error_type, frame->info);
frame->ClearAll();
+#endif
}
static void (*SoftRssLimitExceededCallback)(bool exceeded);
@@ -64,12 +68,22 @@ void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
SoftRssLimitExceededCallback = Callback;
}
+static AllocatorReleaseToOSCallback ReleseCallback;
+void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback) {
+ CHECK_EQ(ReleseCallback, nullptr);
+ ReleseCallback = Callback;
+}
+
+#if SANITIZER_LINUX && !SANITIZER_GO
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
+ bool heap_profile = common_flags()->heap_profile;
+ bool allocator_release_to_os = common_flags()->allocator_release_to_os;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
+ uptr rss_during_last_reported_profile = 0;
while (true) {
SleepForMillis(100);
uptr current_rss_mb = GetRSS() >> 20;
@@ -111,14 +125,43 @@ void BackgroundThread(void *arg) {
SoftRssLimitExceededCallback(false);
}
}
+ if (allocator_release_to_os && ReleseCallback) ReleseCallback();
+ if (heap_profile &&
+ current_rss_mb > rss_during_last_reported_profile * 1.1) {
+ Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
+ __sanitizer_print_memory_profile(90);
+ rss_during_last_reported_profile = current_rss_mb;
+ }
}
}
+#endif
+
+void WriteToSyslog(const char *msg) {
+ InternalScopedString msg_copy(kErrorMessageBufferSize);
+ msg_copy.append("%s", msg);
+ char *p = msg_copy.data();
+ char *q;
+
+ // Print one line at a time.
+ // syslog, at least on Android, has an implicit message length limit.
+ do {
+ q = internal_strchr(p, '\n');
+ if (q)
+ *q = '\0';
+ WriteOneLineToSyslog(p);
+ if (q)
+ p = q + 1;
+ } while (q);
+}
void MaybeStartBackgroudThread() {
-#if SANITIZER_LINUX // Need to implement/test on other platforms.
+#if SANITIZER_LINUX && \
+ !SANITIZER_GO // Need to implement/test on other platforms.
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
- !common_flags()->soft_rss_limit_mb) return;
+ !common_flags()->soft_rss_limit_mb &&
+ !common_flags()->allocator_release_to_os &&
+ !common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
#endif
@@ -128,7 +171,7 @@ void MaybeStartBackgroudThread() {
void NOINLINE
__sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args) {
- PrepareForSandboxing(args);
- if (sandboxing_callback)
- sandboxing_callback();
+ __sanitizer::PrepareForSandboxing(args);
+ if (__sanitizer::sandboxing_callback)
+ __sanitizer::sandboxing_callback();
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
index 5e83ce9786c..6fd5ef74274 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc
@@ -1235,17 +1235,15 @@ POST_SYSCALL(fcntl64)(long res, long fd, long cmd, long arg) {}
PRE_SYSCALL(pipe)(void *fildes) {}
POST_SYSCALL(pipe)(long res, void *fildes) {
- if (res >= 0) {
- if (fildes) POST_WRITE(fildes, sizeof(int));
- }
+ if (res >= 0)
+ if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
}
PRE_SYSCALL(pipe2)(void *fildes, long flags) {}
POST_SYSCALL(pipe2)(long res, void *fildes, long flags) {
- if (res >= 0) {
- if (fildes) POST_WRITE(fildes, sizeof(int));
- }
+ if (res >= 0)
+ if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
}
PRE_SYSCALL(dup)(long fildes) {}
@@ -1878,13 +1876,11 @@ PRE_SYSCALL(socket)(long arg0, long arg1, long arg2) {}
POST_SYSCALL(socket)(long res, long arg0, long arg1, long arg2) {}
-PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, void *arg3) {}
+PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, int *sv) {}
-POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2,
- void *arg3) {
- if (res >= 0) {
- if (arg3) POST_WRITE(arg3, sizeof(int));
- }
+POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2, int *sv) {
+ if (res >= 0)
+ if (sv) POST_WRITE(sv, sizeof(int) * 2);
}
PRE_SYSCALL(socketcall)(long call, void *args) {}
@@ -2299,7 +2295,7 @@ POST_SYSCALL(ni_syscall)(long res) {}
PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
#if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__))
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
if (data) {
if (request == ptrace_setregs) {
PRE_READ((void *)data, struct_user_regs_struct_sz);
@@ -2320,7 +2316,7 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
#if !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__))
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
if (res >= 0 && data) {
// Note that this is different from the interceptor in
// sanitizer_common_interceptors.inc.
@@ -2842,6 +2838,40 @@ PRE_SYSCALL(vfork)() {
POST_SYSCALL(vfork)(long res) {
COMMON_SYSCALL_POST_FORK(res);
}
+
+PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+ if (act) {
+ PRE_READ(&act->sigaction, sizeof(act->sigaction));
+ PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
+ PRE_READ(&act->sa_mask, sizeof(act->sa_mask));
+ }
+}
+
+POST_SYSCALL(sigaction)(long res, long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+ if (res >= 0 && oldact) POST_WRITE(oldact, sizeof(*oldact));
+}
+
+PRE_SYSCALL(rt_sigaction)(long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+ if (act) {
+ PRE_READ(&act->sigaction, sizeof(act->sigaction));
+ PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
+ PRE_READ(&act->sa_mask, sz);
+ }
+}
+
+POST_SYSCALL(rt_sigaction)(long res, long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+ if (res >= 0 && oldact) {
+ SIZE_T oldact_sz = ((char *)&oldact->sa_mask) - ((char *)oldact) + sz;
+ POST_WRITE(oldact, oldact_sz);
+ }
+}
} // extern "C"
#undef PRE_SYSCALL
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc
index c67880468b8..dd8620beaac 100644
--- a/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_coverage_libcdep.cc
@@ -45,8 +45,12 @@
#include "sanitizer_symbolizer.h"
#include "sanitizer_flags.h"
+using namespace __sanitizer;
+
static const u64 kMagic64 = 0xC0BFFFFFFFFFFF64ULL;
static const u64 kMagic32 = 0xC0BFFFFFFFFFFF32ULL;
+static const uptr kNumWordsForMagic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
+static const u64 kMagic = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
static atomic_uint32_t dump_once_guard; // Ensure that CovDump runs only once.
@@ -94,7 +98,7 @@ class CoverageData {
void DumpAll();
ALWAYS_INLINE
- void TraceBasicBlock(s32 *id);
+ void TraceBasicBlock(u32 *id);
void InitializeGuardArray(s32 *guards);
void InitializeGuards(s32 *guards, uptr n, const char *module_name,
@@ -105,17 +109,23 @@ class CoverageData {
uptr Update8bitCounterBitsetAndClearCounters(u8 *bitset);
uptr *data();
- uptr size();
+ uptr size() const;
private:
+ struct NamedPcRange {
+ const char *copied_module_name;
+ uptr beg, end; // elements [beg,end) in pc_array.
+ };
+
void DirectOpen();
void UpdateModuleNameVec(uptr caller_pc, uptr range_beg, uptr range_end);
+ void GetRangeOffsets(const NamedPcRange& r, Symbolizer* s,
+ InternalMmapVector<uptr>* offsets) const;
// Maximal size pc array may ever grow.
// We MmapNoReserve this space to ensure that the array is contiguous.
- static const uptr kPcArrayMaxSize = FIRST_32_SECOND_64(
- 1 << (SANITIZER_ANDROID ? 24 : (SANITIZER_WINDOWS ? 27 : 26)),
- 1 << 27);
+ static const uptr kPcArrayMaxSize =
+ FIRST_32_SECOND_64(1 << (SANITIZER_ANDROID ? 24 : 26), 1 << 27);
// The amount file mapping for the pc array is grown by.
static const uptr kPcArrayMmapSize = 64 * 1024;
@@ -134,11 +144,6 @@ class CoverageData {
// Vector of coverage guard arrays, protected by mu.
InternalMmapVectorNoCtor<s32*> guard_array_vec;
- struct NamedPcRange {
- const char *copied_module_name;
- uptr beg, end; // elements [beg,end) in pc_array.
- };
-
// Vector of module and compilation unit pc ranges.
InternalMmapVectorNoCtor<NamedPcRange> comp_unit_name_vec;
InternalMmapVectorNoCtor<NamedPcRange> module_name_vec;
@@ -510,7 +515,7 @@ uptr *CoverageData::data() {
return pc_array;
}
-uptr CoverageData::size() {
+uptr CoverageData::size() const {
return atomic_load(&pc_array_index, memory_order_relaxed);
}
@@ -680,11 +685,11 @@ void CoverageData::DumpCallerCalleePairs() {
// it once and then cache in the provided 'cache' storage.
//
// This function will eventually be inlined by the compiler.
-void CoverageData::TraceBasicBlock(s32 *id) {
+void CoverageData::TraceBasicBlock(u32 *id) {
// Will trap here if
// 1. coverage is not enabled at run-time.
// 2. The array tr_event_array is full.
- *tr_event_pointer = static_cast<u32>(*id - 1);
+ *tr_event_pointer = *id - 1;
tr_event_pointer++;
}
@@ -740,41 +745,96 @@ void CoverageData::DumpAsBitSet() {
}
}
+
+void CoverageData::GetRangeOffsets(const NamedPcRange& r, Symbolizer* sym,
+ InternalMmapVector<uptr>* offsets) const {
+ offsets->clear();
+ for (uptr i = 0; i < kNumWordsForMagic; i++)
+ offsets->push_back(0);
+ CHECK(r.copied_module_name);
+ CHECK_LE(r.beg, r.end);
+ CHECK_LE(r.end, size());
+ for (uptr i = r.beg; i < r.end; i++) {
+ uptr pc = UnbundlePc(pc_array[i]);
+ uptr counter = UnbundleCounter(pc_array[i]);
+ if (!pc) continue; // Not visited.
+ uptr offset = 0;
+ sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
+ offsets->push_back(BundlePcAndCounter(offset, counter));
+ }
+
+ CHECK_GE(offsets->size(), kNumWordsForMagic);
+ SortArray(offsets->data(), offsets->size());
+ for (uptr i = 0; i < offsets->size(); i++)
+ (*offsets)[i] = UnbundlePc((*offsets)[i]);
+}
+
+static void GenerateHtmlReport(const InternalMmapVector<char *> &cov_files) {
+ if (!common_flags()->html_cov_report) {
+ return;
+ }
+ char *sancov_path = FindPathToBinary(common_flags()->sancov_path);
+ if (sancov_path == nullptr) {
+ return;
+ }
+
+ InternalMmapVector<char *> sancov_argv(cov_files.size() * 2 + 3);
+ sancov_argv.push_back(sancov_path);
+ sancov_argv.push_back(internal_strdup("-html-report"));
+ auto argv_deleter = at_scope_exit([&] {
+ for (uptr i = 0; i < sancov_argv.size(); ++i) {
+ InternalFree(sancov_argv[i]);
+ }
+ });
+
+ for (const auto &cov_file : cov_files) {
+ sancov_argv.push_back(internal_strdup(cov_file));
+ }
+
+ {
+ ListOfModules modules;
+ modules.init();
+ for (const LoadedModule &module : modules) {
+ sancov_argv.push_back(internal_strdup(module.full_name()));
+ }
+ }
+
+ InternalScopedString report_path(kMaxPathLength);
+ fd_t report_fd =
+ CovOpenFile(&report_path, false /* packed */, GetProcessName(), "html");
+ int pid = StartSubprocess(sancov_argv[0], sancov_argv.data(),
+ kInvalidFd /* stdin */, report_fd /* std_out */);
+ if (pid > 0) {
+ int result = WaitForProcess(pid);
+ if (result == 0)
+ Printf("coverage report generated to %s\n", report_path.data());
+ }
+}
+
void CoverageData::DumpOffsets() {
auto sym = Symbolizer::GetOrInit();
if (!common_flags()->coverage_pcs) return;
CHECK_NE(sym, nullptr);
InternalMmapVector<uptr> offsets(0);
InternalScopedString path(kMaxPathLength);
- for (uptr m = 0; m < module_name_vec.size(); m++) {
- offsets.clear();
- uptr num_words_for_magic = SANITIZER_WORDSIZE == 64 ? 1 : 2;
- for (uptr i = 0; i < num_words_for_magic; i++)
- offsets.push_back(0);
- auto r = module_name_vec[m];
- CHECK(r.copied_module_name);
- CHECK_LE(r.beg, r.end);
- CHECK_LE(r.end, size());
- for (uptr i = r.beg; i < r.end; i++) {
- uptr pc = UnbundlePc(pc_array[i]);
- uptr counter = UnbundleCounter(pc_array[i]);
- if (!pc) continue; // Not visited.
- uptr offset = 0;
- sym->GetModuleNameAndOffsetForPC(pc, nullptr, &offset);
- offsets.push_back(BundlePcAndCounter(offset, counter));
+
+ InternalMmapVector<char *> cov_files(module_name_vec.size());
+ auto cov_files_deleter = at_scope_exit([&] {
+ for (uptr i = 0; i < cov_files.size(); ++i) {
+ InternalFree(cov_files[i]);
}
+ });
- CHECK_GE(offsets.size(), num_words_for_magic);
- SortArray(offsets.data(), offsets.size());
- for (uptr i = 0; i < offsets.size(); i++)
- offsets[i] = UnbundlePc(offsets[i]);
+ for (uptr m = 0; m < module_name_vec.size(); m++) {
+ auto r = module_name_vec[m];
+ GetRangeOffsets(r, sym, &offsets);
- uptr num_offsets = offsets.size() - num_words_for_magic;
+ uptr num_offsets = offsets.size() - kNumWordsForMagic;
u64 *magic_p = reinterpret_cast<u64*>(offsets.data());
CHECK_EQ(*magic_p, 0ULL);
// FIXME: we may want to write 32-bit offsets even in 64-mode
// if all the offsets are small enough.
- *magic_p = SANITIZER_WORDSIZE == 64 ? kMagic64 : kMagic32;
+ *magic_p = kMagic;
const char *module_name = StripModuleName(r.copied_module_name);
if (cov_sandboxed) {
@@ -789,11 +849,14 @@ void CoverageData::DumpOffsets() {
if (fd == kInvalidFd) continue;
WriteToFile(fd, offsets.data(), offsets.size() * sizeof(offsets[0]));
CloseFile(fd);
+ cov_files.push_back(internal_strdup(path.data()));
VReport(1, " CovDump: %s: %zd PCs written\n", path.data(), num_offsets);
}
}
if (cov_fd != kInvalidFd)
CloseFile(cov_fd);
+
+ GenerateHtmlReport(cov_files);
}
void CoverageData::DumpAll() {
@@ -918,11 +981,13 @@ uptr __sanitizer_get_total_unique_caller_callee_pairs() {
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cov_trace_func_enter(s32 *id) {
+void __sanitizer_cov_trace_func_enter(u32 *id) {
+ __sanitizer_cov_with_check(id);
coverage_data.TraceBasicBlock(id);
}
SANITIZER_INTERFACE_ATTRIBUTE
-void __sanitizer_cov_trace_basic_block(s32 *id) {
+void __sanitizer_cov_trace_basic_block(u32 *id) {
+ __sanitizer_cov_with_check(id);
coverage_data.TraceBasicBlock(id);
}
SANITIZER_INTERFACE_ATTRIBUTE
@@ -949,8 +1014,30 @@ uptr __sanitizer_update_counter_bitset_and_clear_counters(u8 *bitset) {
return coverage_data.Update8bitCounterBitsetAndClearCounters(bitset);
}
// Default empty implementations (weak). Users should redefine them.
+#if !SANITIZER_WINDOWS // weak does not work on Windows.
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_cmp() {}
SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp1() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp2() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp4() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_cmp8() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void __sanitizer_cov_trace_switch() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_div4() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_div8() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_gep() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_pc_guard() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_pc_indir() {}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_cov_trace_pc_guard_init() {}
+#endif // !SANITIZER_WINDOWS
} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
index ebac6812881..b2e724ab221 100644
--- a/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_coverage_mapping_libcdep.cc
@@ -70,26 +70,21 @@ void CovUpdateMapping(const char *coverage_dir, uptr caller_pc) {
InternalScopedString text(kMaxTextSize);
{
- InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
- CHECK(modules.data());
- int n_modules = GetListOfModules(modules.data(), kMaxNumberOfModules,
- /* filter */ nullptr);
-
text.append("%d\n", sizeof(uptr) * 8);
- for (int i = 0; i < n_modules; ++i) {
- const char *module_name = StripModuleName(modules[i].full_name());
- uptr base = modules[i].base_address();
- for (auto iter = modules[i].ranges(); iter.hasNext();) {
- const auto *range = iter.next();
- if (range->executable) {
- uptr start = range->beg;
- uptr end = range->end;
+ ListOfModules modules;
+ modules.init();
+ for (const LoadedModule &module : modules) {
+ const char *module_name = StripModuleName(module.full_name());
+ uptr base = module.base_address();
+ for (const auto &range : module.ranges()) {
+ if (range.executable) {
+ uptr start = range.beg;
+ uptr end = range.end;
text.append("%zx %zx %zx %s\n", start, end, base, module_name);
if (caller_pc && caller_pc >= start && caller_pc < end)
cached_mapping.SetModuleRange(start, end);
}
}
- modules[i].clear();
}
}
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
index 7a318c963f7..e2aedc24da9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector1.cc
@@ -117,11 +117,16 @@ void DD::MutexBeforeLock(DDCallback *cb,
void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
DDLogicalThread *lt = cb->lt;
- uptr path[10];
+ uptr path[20];
uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
- CHECK_GT(len, 0U); // Hm.. cycle of 10 locks? I'd like to see that.
+ if (len == 0U) {
+ // A cycle of 20+ locks? Well, that's a bit odd...
+ Printf("WARNING: too long mutex cycle found\n");
+ return;
+ }
CHECK_EQ(m->id, path[0]);
lt->report_pending = true;
+ len = Min<uptr>(len, DDReport::kMaxLoopSize);
DDReport *rep = &lt->rep;
rep->n = len;
for (uptr i = 0; i < len; i++) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
index 07c3755155f..f8da20612db 100644
--- a/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
+++ b/libsanitizer/sanitizer_common/sanitizer_deadlock_detector_interface.h
@@ -49,7 +49,7 @@ struct DDFlags {
};
struct DDReport {
- enum { kMaxLoopSize = 8 };
+ enum { kMaxLoopSize = 20 };
int n; // number of entries in loop
struct {
u64 thr_ctx; // user thread context
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.cc b/libsanitizer/sanitizer_common/sanitizer_flags.cc
index a24ff900011..5f69e58ffb2 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.cc
@@ -28,11 +28,6 @@ struct FlagDescription {
IntrusiveList<FlagDescription> flag_descriptions;
-// If set, the tool will install its own SEGV signal handler by default.
-#ifndef SANITIZER_NEEDS_SEGV
-# define SANITIZER_NEEDS_SEGV 1
-#endif
-
void CommonFlags::SetDefaults() {
#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
#include "sanitizer_flags.inc"
@@ -43,17 +38,44 @@ void CommonFlags::CopyFrom(const CommonFlags &other) {
internal_memcpy(this, &other, sizeof(*this));
}
-// Copy the string from "s" to "out", replacing "%b" with the binary basename.
-static void SubstituteBinaryName(const char *s, char *out, uptr out_size) {
+// Copy the string from "s" to "out", making the following substitutions:
+// %b = binary basename
+// %p = pid
+void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
char *out_end = out + out_size;
while (*s && out < out_end - 1) {
- if (s[0] != '%' || s[1] != 'b') { *out++ = *s++; continue; }
- const char *base = GetProcessName();
- CHECK(base);
- while (*base && out < out_end - 1)
- *out++ = *base++;
- s += 2; // skip "%b"
+ if (s[0] != '%') {
+ *out++ = *s++;
+ continue;
+ }
+ switch (s[1]) {
+ case 'b': {
+ const char *base = GetProcessName();
+ CHECK(base);
+ while (*base && out < out_end - 1)
+ *out++ = *base++;
+ s += 2; // skip "%b"
+ break;
+ }
+ case 'p': {
+ int pid = internal_getpid();
+ char buf[32];
+ char *buf_pos = buf + 32;
+ do {
+ *--buf_pos = (pid % 10) + '0';
+ pid /= 10;
+ } while (pid);
+ while (buf_pos < buf + 32 && out < out_end - 1)
+ *out++ = *buf_pos++;
+ s += 2; // skip "%p"
+ break;
+ }
+ default:
+ *out++ = *s++;
+ break;
+ }
}
+ CHECK(out < out_end - 1);
*out = '\0';
}
@@ -67,7 +89,7 @@ class FlagHandlerInclude : public FlagHandlerBase {
bool Parse(const char *value) final {
if (internal_strchr(value, '%')) {
char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
- SubstituteBinaryName(value, buf, kMaxPathLength);
+ SubstituteForFlagValue(value, buf, kMaxPathLength);
bool res = parser_->ParseFile(buf, ignore_missing_);
UnmapOrDie(buf, kMaxPathLength);
return res;
@@ -97,4 +119,10 @@ void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
RegisterIncludeFlags(parser, cf);
}
+void InitializeCommonFlags(CommonFlags *cf) {
+ // need to record coverage to generate coverage report.
+ cf->coverage |= cf->html_cov_report;
+ SetVerbosity(cf->verbosity);
+}
+
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.h b/libsanitizer/sanitizer_common/sanitizer_flags.h
index f1b872b79e2..ff64af10ff5 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.h
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.h
@@ -44,10 +44,17 @@ inline void OverrideCommonFlags(const CommonFlags &cf) {
common_flags_dont_use.CopyFrom(cf);
}
+void SubstituteForFlagValue(const char *s, char *out, uptr out_size);
+
class FlagParser;
void RegisterCommonFlags(FlagParser *parser,
CommonFlags *cf = &common_flags_dont_use);
void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
+
+// Should be called after parsing all flags. Sets up common flag values
+// and perform initializations common to all sanitizers (e.g. setting
+// verbosity).
+void InitializeCommonFlags(CommonFlags *cf = &common_flags_dont_use);
} // namespace __sanitizer
#endif // SANITIZER_FLAGS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_flags.inc b/libsanitizer/sanitizer_common/sanitizer_flags.inc
index bca1c4b4d0f..ccc0e416f4a 100644
--- a/libsanitizer/sanitizer_common/sanitizer_flags.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_flags.inc
@@ -54,7 +54,7 @@ COMMON_FLAG(
"Mention name of executable when reporting error and "
"append executable name to logs (as in \"log_path.exe_name.pid\").")
COMMON_FLAG(
- bool, log_to_syslog, SANITIZER_ANDROID,
+ bool, log_to_syslog, SANITIZER_ANDROID || SANITIZER_MAC,
"Write all sanitizer output to syslog in addition to other means of "
"logging.")
COMMON_FLAG(
@@ -73,10 +73,12 @@ COMMON_FLAG(bool, print_summary, true,
"If false, disable printing error summaries in addition to error "
"reports.")
COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
-COMMON_FLAG(bool, handle_segv, SANITIZER_NEEDS_SEGV,
+COMMON_FLAG(bool, handle_segv, true,
"If set, registers the tool's custom SIGSEGV/SIGBUS handler.")
COMMON_FLAG(bool, handle_abort, false,
"If set, registers the tool's custom SIGABRT handler.")
+COMMON_FLAG(bool, handle_sigill, false,
+ "If set, registers the tool's custom SIGILL handler.")
COMMON_FLAG(bool, handle_sigfpe, true,
"If set, registers the tool's custom SIGFPE handler.")
COMMON_FLAG(bool, allow_user_segv_handler, false,
@@ -114,6 +116,10 @@ COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
" until the RSS goes below the soft limit."
" This limit does not affect memory allocations other than"
" malloc/new.")
+COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
+COMMON_FLAG(bool, allocator_release_to_os, false,
+ "Experimental. If true, try to periodically release unused"
+ " memory to the OS.\n")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
@@ -146,10 +152,10 @@ COMMON_FLAG(bool, full_address_space, false,
COMMON_FLAG(bool, print_suppressions, true,
"Print matched suppressions at exit.")
COMMON_FLAG(
- bool, disable_coredump, (SANITIZER_WORDSIZE == 64),
- "Disable core dumping. By default, disable_core=1 on 64-bit to avoid "
- "dumping a 16T+ core file. Ignored on OSes that don't dump core by"
- "default and for sanitizers that don't reserve lots of virtual memory.")
+ bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO,
+ "Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid"
+ " dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ " default and for sanitizers that don't reserve lots of virtual memory.")
COMMON_FLAG(bool, use_madv_dontdump, true,
"If set, instructs kernel to not store the (huge) shadow "
"in core file.")
@@ -158,6 +164,11 @@ COMMON_FLAG(bool, symbolize_inline_frames, true,
COMMON_FLAG(bool, symbolize_vs_style, false,
"Print file locations in Visual Studio style (e.g: "
" file(10,42): ...")
+COMMON_FLAG(int, dedup_token_length, 0,
+ "If positive, after printing a stack trace also print a short "
+ "string token based on this number of frames that will simplify "
+ "deduplication of the reports. "
+ "Example: 'DEDUP_TOKEN: foo-bar-main'. Default is 0.")
COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
"Format string used to render stack frames. "
"See sanitizer_stacktrace_printer.h for the format description. "
@@ -175,18 +186,42 @@ COMMON_FLAG(bool, intercept_strspn, true,
COMMON_FLAG(bool, intercept_strpbrk, true,
"If set, uses custom wrappers for strpbrk function "
"to find more errors.")
+COMMON_FLAG(bool, intercept_strlen, true,
+ "If set, uses custom wrappers for strlen and strnlen functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strchr, true,
+ "If set, uses custom wrappers for strchr, strchrnul, and strrchr "
+ "functions to find more errors.")
COMMON_FLAG(bool, intercept_memcmp, true,
"If set, uses custom wrappers for memcmp function "
"to find more errors.")
COMMON_FLAG(bool, strict_memcmp, true,
"If true, assume that memcmp(p1, p2, n) always reads n bytes before "
"comparing p1 and p2.")
+COMMON_FLAG(bool, intercept_memmem, true,
+ "If set, uses a wrapper for memmem() to find more errors.")
+COMMON_FLAG(bool, intercept_intrin, true,
+ "If set, uses custom wrappers for memset/memcpy/memmove "
+ "intrinsics to find more errors.")
+COMMON_FLAG(bool, intercept_stat, true,
+ "If set, uses custom wrappers for *stat functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_send, true,
+ "If set, uses custom wrappers for send* functions "
+ "to find more errors.")
COMMON_FLAG(bool, decorate_proc_maps, false, "If set, decorate sanitizer "
"mappings in /proc/self/maps with "
"user-readable names")
COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
"found an error")
COMMON_FLAG(
- bool, abort_on_error, SANITIZER_MAC,
+ bool, abort_on_error, SANITIZER_ANDROID || SANITIZER_MAC,
"If set, the tool calls abort() instead of _exit() after printing the "
"error report.")
+COMMON_FLAG(bool, suppress_equal_pcs, true,
+ "Deduplicate multiple reports for single source location in "
+ "halt_on_error=false mode (asan only).")
+COMMON_FLAG(bool, print_cmdline, false, "Print command line on crash "
+ "(asan only).")
+COMMON_FLAG(bool, html_cov_report, false, "Generate html coverage report.")
+COMMON_FLAG(const char *, sancov_path, "sancov", "Sancov tool location.")
diff --git a/libsanitizer/sanitizer_common/sanitizer_interface_internal.h b/libsanitizer/sanitizer_common/sanitizer_interface_internal.h
index 0547f9927cb..34e80c3b50f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_interface_internal.h
+++ b/libsanitizer/sanitizer_common/sanitizer_interface_internal.h
@@ -23,6 +23,10 @@ extern "C" {
// The special values are "stdout" and "stderr".
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_set_report_path(const char *path);
+ // Tell the tools to write their reports to the provided file descriptor
+ // (casted to void *).
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_set_report_fd(void *fd);
typedef struct {
int coverage_sandboxed;
diff --git a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
index d76ed7570a7..b9c906669de 100644
--- a/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
+++ b/libsanitizer/sanitizer_common/sanitizer_internal_defs.h
@@ -22,7 +22,7 @@
# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
// FIXME find out what we need on Windows, if anything.
# define SANITIZER_WEAK_ATTRIBUTE
-#elif defined(SANITIZER_GO)
+#elif SANITIZER_GO
# define SANITIZER_INTERFACE_ATTRIBUTE
# define SANITIZER_WEAK_ATTRIBUTE
#else
@@ -30,7 +30,7 @@
# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
#endif
-#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !defined(SANITIZER_GO)
+#if (SANITIZER_LINUX || SANITIZER_WINDOWS) && !SANITIZER_GO
# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
#else
# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
@@ -87,6 +87,7 @@ typedef unsigned error_t;
typedef int fd_t;
typedef int error_t;
#endif
+typedef int pid_t;
// WARNING: OFF_T may be different from OS type off_t, depending on the value of
// _FILE_OFFSET_BITS. This definition of OFF_T matches the ABI of system calls
@@ -103,19 +104,25 @@ typedef u64 OFF64_T;
#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
typedef uptr operator_new_size_type;
#else
+# if defined(__s390__) && !defined(__s390x__)
+// Special case: 31-bit s390 has unsigned long as size_t.
+typedef unsigned long operator_new_size_type;
+# else
typedef u32 operator_new_size_type;
+# endif
#endif
-} // namespace __sanitizer
-using namespace __sanitizer; // NOLINT
// ----------- ATTENTION -------------
// This header should NOT include any other headers to avoid portability issues.
// Common defs.
#define INLINE inline
#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
-#define WEAK SANITIZER_WEAK_ATTRIBUTE
+#define SANITIZER_WEAK_DEFAULT_IMPL \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
+#define SANITIZER_WEAK_CXX_DEFAULT_IMPL \
+ extern "C++" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
// Platform-specific defs.
#if defined(_MSC_VER)
@@ -129,7 +136,7 @@ using namespace __sanitizer; // NOLINT
# define THREADLOCAL __declspec(thread)
# define LIKELY(x) (x)
# define UNLIKELY(x) (x)
-# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
+# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ (void)0
#else // _MSC_VER
# define ALWAYS_INLINE inline __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
@@ -173,7 +180,9 @@ typedef ALIGNED(1) s32 us32;
typedef ALIGNED(1) s64 us64;
#if SANITIZER_WINDOWS
+} // namespace __sanitizer
typedef unsigned long DWORD; // NOLINT
+namespace __sanitizer {
typedef DWORD thread_return_t;
# define THREAD_CALLING_CONV __stdcall
#else // _WIN32
@@ -183,14 +192,12 @@ typedef void* thread_return_t;
typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
// NOTE: Functions below must be defined in each run-time.
-namespace __sanitizer {
void NORETURN Die();
// FIXME: No, this shouldn't be in the sanitizer interface.
SANITIZER_INTERFACE_ATTRIBUTE
void NORETURN CheckFailed(const char *file, int line, const char *cond,
u64 v1, u64 v2);
-} // namespace __sanitizer
// Check macro
#define RAW_CHECK_MSG(expr, msg) do { \
@@ -282,14 +289,23 @@ enum LinkerInitialized { LINKER_INITIALIZED = 0 };
#if !defined(_MSC_VER) || defined(__clang__)
# define GET_CALLER_PC() (uptr)__builtin_return_address(0)
# define GET_CURRENT_FRAME() (uptr)__builtin_frame_address(0)
+inline void Trap() {
+ __builtin_trap();
+}
#else
extern "C" void* _ReturnAddress(void);
+extern "C" void* _AddressOfReturnAddress(void);
# pragma intrinsic(_ReturnAddress)
+# pragma intrinsic(_AddressOfReturnAddress)
# define GET_CALLER_PC() (uptr)_ReturnAddress()
// CaptureStackBackTrace doesn't need to know BP on Windows.
-// FIXME: This macro is still used when printing error reports though it's not
-// clear if the BP value is needed in the ASan reports on Windows.
-# define GET_CURRENT_FRAME() (uptr)0xDEADBEEF
+# define GET_CURRENT_FRAME() (((uptr)_AddressOfReturnAddress()) + sizeof(uptr))
+
+extern "C" void __ud2(void);
+# pragma intrinsic(__ud2)
+inline void Trap() {
+ __ud2();
+}
#endif
#define HANDLE_EINTR(res, f) \
@@ -308,4 +324,19 @@ extern "C" void* _ReturnAddress(void);
(void)enable_fp; \
} while (0)
+} // namespace __sanitizer
+
+namespace __asan { using namespace __sanitizer; } // NOLINT
+namespace __dsan { using namespace __sanitizer; } // NOLINT
+namespace __dfsan { using namespace __sanitizer; } // NOLINT
+namespace __esan { using namespace __sanitizer; } // NOLINT
+namespace __lsan { using namespace __sanitizer; } // NOLINT
+namespace __msan { using namespace __sanitizer; } // NOLINT
+namespace __tsan { using namespace __sanitizer; } // NOLINT
+namespace __scudo { using namespace __sanitizer; } // NOLINT
+namespace __ubsan { using namespace __sanitizer; } // NOLINT
+namespace __xray { using namespace __sanitizer; } // NOLINT
+namespace __interception { using namespace __sanitizer; } // NOLINT
+
+
#endif // SANITIZER_DEFS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.cc b/libsanitizer/sanitizer_common/sanitizer_libc.cc
index 05fdca622e4..82d37675e47 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.cc
@@ -72,7 +72,7 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
// Semi-fast bzero for 16-aligned data. Still far from peak performance.
void internal_bzero_aligned16(void *s, uptr n) {
- struct S16 { u64 a, b; } ALIGNED(16);
+ struct ALIGNED(16) S16 { u64 a, b; };
CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
p->a = p->b = 0;
@@ -173,6 +173,19 @@ uptr internal_strlen(const char *s) {
return i;
}
+uptr internal_strlcat(char *dst, const char *src, uptr maxlen) {
+ const uptr srclen = internal_strlen(src);
+ const uptr dstlen = internal_strnlen(dst, maxlen);
+ if (dstlen == maxlen) return maxlen + srclen;
+ if (srclen < maxlen - dstlen) {
+ internal_memmove(dst + dstlen, src, srclen + 1);
+ } else {
+ internal_memmove(dst + dstlen, src, maxlen - dstlen - 1);
+ dst[maxlen - 1] = '\0';
+ }
+ return dstlen + srclen;
+}
+
char *internal_strncat(char *dst, const char *src, uptr n) {
uptr len = internal_strlen(dst);
uptr i;
@@ -182,6 +195,17 @@ char *internal_strncat(char *dst, const char *src, uptr n) {
return dst;
}
+uptr internal_strlcpy(char *dst, const char *src, uptr maxlen) {
+ const uptr srclen = internal_strlen(src);
+ if (srclen < maxlen) {
+ internal_memmove(dst, src, srclen + 1);
+ } else if (maxlen != 0) {
+ internal_memmove(dst, src, maxlen - 1);
+ dst[maxlen - 1] = '\0';
+ }
+ return srclen;
+}
+
char *internal_strncpy(char *dst, const char *src, uptr n) {
uptr i;
for (i = 0; i < n && src[i]; i++)
@@ -208,6 +232,12 @@ char *internal_strstr(const char *haystack, const char *needle) {
return nullptr;
}
+uptr internal_wcslen(const wchar_t *s) {
+ uptr i = 0;
+ while (s[i]) i++;
+ return i;
+}
+
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base) {
CHECK_EQ(base, 10);
while (IsSpace(*nptr)) nptr++;
diff --git a/libsanitizer/sanitizer_common/sanitizer_libc.h b/libsanitizer/sanitizer_common/sanitizer_libc.h
index 1b3f8edfadb..d26ec8704c0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_libc.h
+++ b/libsanitizer/sanitizer_common/sanitizer_libc.h
@@ -41,12 +41,15 @@ uptr internal_strcspn(const char *s, const char *reject);
char *internal_strdup(const char *s);
char *internal_strndup(const char *s, uptr n);
uptr internal_strlen(const char *s);
+uptr internal_strlcat(char *dst, const char *src, uptr maxlen);
char *internal_strncat(char *dst, const char *src, uptr n);
int internal_strncmp(const char *s1, const char *s2, uptr n);
+uptr internal_strlcpy(char *dst, const char *src, uptr maxlen);
char *internal_strncpy(char *dst, const char *src, uptr n);
uptr internal_strnlen(const char *s, uptr maxlen);
char *internal_strrchr(const char *s, int c);
// This is O(N^2), but we are not using it in hot places.
+uptr internal_wcslen(const wchar_t *s);
char *internal_strstr(const char *haystack, const char *needle);
// Works only for base=10 and doesn't set errno.
s64 internal_simple_strtoll(const char *nptr, char **endptr, int base);
@@ -57,15 +60,18 @@ int internal_snprintf(char *buffer, uptr length, const char *format, ...);
bool mem_is_zero(const char *mem, uptr size);
// I/O
-const fd_t kInvalidFd = (fd_t)-1;
-const fd_t kStdinFd = 0;
-const fd_t kStdoutFd = (fd_t)1;
-const fd_t kStderrFd = (fd_t)2;
+// Define these as macros so we can use them in linker initialized global
+// structs without dynamic initialization.
+#define kInvalidFd ((fd_t)-1)
+#define kStdinFd ((fd_t)0)
+#define kStdoutFd ((fd_t)1)
+#define kStderrFd ((fd_t)2)
uptr internal_ftruncate(fd_t fd, uptr size);
// OS
void NORETURN internal__exit(int exitcode);
+unsigned int internal_sleep(unsigned int seconds);
uptr internal_getpid();
uptr internal_getppid();
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cc b/libsanitizer/sanitizer_common/sanitizer_linux.cc
index 2cefa20a5f0..806fcd5e284 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.cc
@@ -58,7 +58,10 @@
#include <unistd.h>
#if SANITIZER_FREEBSD
+#include <sys/exec.h>
#include <sys/sysctl.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
#include <machine/atomic.h>
extern "C" {
// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
@@ -87,12 +90,19 @@ const int FUTEX_WAKE = 1;
// Are we using 32-bit or 64-bit Linux syscalls?
// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
// but it still needs to use 64-bit syscalls.
-#if SANITIZER_LINUX && (defined(__x86_64__) || SANITIZER_WORDSIZE == 64)
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
+ SANITIZER_WORDSIZE == 64)
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
#else
# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
#endif
+#if defined(__x86_64__) || SANITIZER_MIPS64
+extern "C" {
+extern void internal_sigreturn();
+}
+#endif
+
namespace __sanitizer {
#if SANITIZER_LINUX && defined(__x86_64__)
@@ -104,6 +114,7 @@ namespace __sanitizer {
#endif
// --------------- sanitizer_libc.h
+#if !SANITIZER_S390
uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
OFF_T offset) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
@@ -116,6 +127,7 @@ uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
offset / 4096);
#endif
}
+#endif // !SANITIZER_S390
uptr internal_munmap(void *addr, uptr length) {
return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
@@ -238,7 +250,15 @@ uptr internal_lstat(const char *path, void *buf) {
return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path,
(uptr)buf, AT_SYMLINK_NOFOLLOW);
#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_MIPS64
+ // For mips64, lstat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(lstat), path, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
+# endif
#else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
@@ -249,7 +269,15 @@ uptr internal_lstat(const char *path, void *buf) {
uptr internal_fstat(fd_t fd, void *buf) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_MIPS64
+ // For mips64, fstat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
+# endif
#else
struct stat64 buf64;
int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
@@ -312,6 +340,15 @@ void internal__exit(int exitcode) {
Die(); // Unreachable.
}
+unsigned int internal_sleep(unsigned int seconds) {
+ struct timespec ts;
+ ts.tv_sec = 1;
+ ts.tv_nsec = 0;
+ int res = internal_syscall(SYSCALL(nanosleep), &ts, &ts);
+ if (res) return ts.tv_sec;
+ return 0;
+}
+
uptr internal_execve(const char *filename, char *const argv[],
char *const envp[]) {
return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
@@ -392,11 +429,13 @@ const char *GetEnv(const char *name) {
#endif
}
+#if !SANITIZER_FREEBSD
extern "C" {
SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
}
+#endif
-#if !SANITIZER_GO
+#if !SANITIZER_GO && !SANITIZER_FREEBSD
static void ReadNullSepFileToArray(const char *path, char ***arr,
int arr_size) {
char *buff;
@@ -421,7 +460,8 @@ static void ReadNullSepFileToArray(const char *path, char ***arr,
}
#endif
-static void GetArgsAndEnv(char*** argv, char*** envp) {
+static void GetArgsAndEnv(char ***argv, char ***envp) {
+#if !SANITIZER_FREEBSD
#if !SANITIZER_GO
if (&__libc_stack_end) {
#endif
@@ -436,6 +476,25 @@ static void GetArgsAndEnv(char*** argv, char*** envp) {
ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
}
#endif
+#else
+ // On FreeBSD, retrieving the argument and environment arrays is done via the
+ // kern.ps_strings sysctl, which returns a pointer to a structure containing
+ // this information. See also <sys/exec.h>.
+ ps_strings *pss;
+ size_t sz = sizeof(pss);
+ if (sysctlbyname("kern.ps_strings", &pss, &sz, NULL, 0) == -1) {
+ Printf("sysctl kern.ps_strings failed\n");
+ Die();
+ }
+ *argv = pss->ps_argvstr;
+ *envp = pss->ps_envstr;
+#endif
+}
+
+char **GetArgv() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return argv;
}
void ReExec() {
@@ -561,7 +620,8 @@ int internal_fork() {
#if SANITIZER_LINUX
#define SA_RESTORER 0x04000000
-// Doesn't set sa_restorer, use with caution (see below).
+// Doesn't set sa_restorer if the caller did not set it, so use with caution
+//(see below).
int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
__sanitizer_kernel_sigaction_t k_act, k_oldact;
internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));
@@ -583,7 +643,9 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
// rt_sigaction, so we need to do the same (we'll need to reimplement the
// restorers; for x86_64 the restorer address can be obtained from
// oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
k_act.sa_restorer = u_act->sa_restorer;
+#endif
}
uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
@@ -597,10 +659,31 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
sizeof(__sanitizer_kernel_sigset_t));
u_oldact->sa_flags = k_oldact.sa_flags;
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
u_oldact->sa_restorer = k_oldact.sa_restorer;
+#endif
}
return result;
}
+
+// Invokes sigaction via a raw syscall with a restorer, but does not support
+// all platforms yet.
+// We disable for Go simply because we have not yet added to buildgo.sh.
+#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO
+int internal_sigaction_syscall(int signum, const void *act, void *oldact) {
+ if (act == nullptr)
+ return internal_sigaction_norestorer(signum, act, oldact);
+ __sanitizer_sigaction u_adjust;
+ internal_memcpy(&u_adjust, act, sizeof(u_adjust));
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+ if (u_adjust.sa_restorer == nullptr) {
+ u_adjust.sa_restorer = internal_sigreturn;
+ }
+#endif
+ return internal_sigaction_norestorer(signum, (const void *)&u_adjust,
+ oldact);
+}
+#endif // defined(__x86_64__) && !SANITIZER_GO
#endif // SANITIZER_LINUX
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
@@ -620,6 +703,10 @@ void internal_sigfillset(__sanitizer_sigset_t *set) {
internal_memset(set, 0xff, sizeof(*set));
}
+void internal_sigemptyset(__sanitizer_sigset_t *set) {
+ internal_memset(set, 0, sizeof(*set));
+}
+
#if SANITIZER_LINUX
void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
signum -= 1;
@@ -630,6 +717,16 @@ void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
k_set->sig[idx] &= ~(1 << bit);
}
+
+bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
+ signum -= 1;
+ CHECK_GE(signum, 0);
+ CHECK_LT(signum, sizeof(*set) * 8);
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+ const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+ return k_set->sig[idx] & (1 << bit);
+}
#endif // SANITIZER_LINUX
// ThreadLister implementation.
@@ -701,7 +798,10 @@ bool ThreadLister::GetDirectoryEntries() {
}
uptr GetPageSize() {
-#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
+// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
+#if SANITIZER_ANDROID
+ return 4096;
+#elif SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
return EXEC_PAGESIZE;
#else
return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
@@ -908,8 +1008,18 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
"bnez $2,1f;\n"
/* Call "fn(arg)". */
+#if SANITIZER_WORDSIZE == 32
+#ifdef __BIG_ENDIAN__
+ "lw $25,4($29);\n"
+ "lw $4,12($29);\n"
+#else
+ "lw $25,0($29);\n"
+ "lw $4,8($29);\n"
+#endif
+#else
"ld $25,0($29);\n"
"ld $4,8($29);\n"
+#endif
"jal $25;\n"
/* Call _exit($v0). */
@@ -981,18 +1091,91 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
: "x30", "memory");
return res;
}
-#endif // defined(__x86_64__) && SANITIZER_LINUX
+#elif defined(__powerpc64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+/* Stack frame offsets. */
+#if _CALL_ELF != 2
+#define FRAME_MIN_SIZE 112
+#define FRAME_TOC_SAVE 40
+#else
+#define FRAME_MIN_SIZE 32
+#define FRAME_TOC_SAVE 24
+#endif
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
-#if SANITIZER_ANDROID
-#define PROP_VALUE_MAX 92
-extern "C" SANITIZER_WEAK_ATTRIBUTE int __system_property_get(const char *name,
- char *value);
-void GetExtraActivationFlags(char *buf, uptr size) {
- CHECK(size > PROP_VALUE_MAX);
- CHECK(&__system_property_get);
- __system_property_get("asan.options", buf);
+ register int (*__fn)(void *) __asm__("r3") = fn;
+ register void *__cstack __asm__("r4") = child_stack;
+ register int __flags __asm__("r5") = flags;
+ register void * __arg __asm__("r6") = arg;
+ register int * __ptidptr __asm__("r7") = parent_tidptr;
+ register void * __newtls __asm__("r8") = newtls;
+ register int * __ctidptr __asm__("r9") = child_tidptr;
+
+ __asm__ __volatile__(
+ /* fn, arg, child_stack are saved acrVoss the syscall */
+ "mr 28, %5\n\t"
+ "mr 29, %6\n\t"
+ "mr 27, %8\n\t"
+
+ /* syscall
+ r3 == flags
+ r4 == child_stack
+ r5 == parent_tidptr
+ r6 == newtls
+ r7 == child_tidptr */
+ "mr 3, %7\n\t"
+ "mr 5, %9\n\t"
+ "mr 6, %10\n\t"
+ "mr 7, %11\n\t"
+ "li 0, %3\n\t"
+ "sc\n\t"
+
+ /* Test if syscall was successful */
+ "cmpdi cr1, 3, 0\n\t"
+ "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
+ "bne- cr1, 1f\n\t"
+
+ /* Do the function call */
+ "std 2, %13(1)\n\t"
+#if _CALL_ELF != 2
+ "ld 0, 0(28)\n\t"
+ "ld 2, 8(28)\n\t"
+ "mtctr 0\n\t"
+#else
+ "mr 12, 28\n\t"
+ "mtctr 12\n\t"
+#endif
+ "mr 3, 27\n\t"
+ "bctrl\n\t"
+ "ld 2, %13(1)\n\t"
+
+ /* Call _exit(r3) */
+ "li 0, %4\n\t"
+ "sc\n\t"
+
+ /* Return to parent */
+ "1:\n\t"
+ "mr %0, 3\n\t"
+ : "=r" (res)
+ : "0" (-1), "i" (EINVAL),
+ "i" (__NR_clone), "i" (__NR_exit),
+ "r" (__fn), "r" (__cstack), "r" (__flags),
+ "r" (__arg), "r" (__ptidptr), "r" (__newtls),
+ "r" (__ctidptr), "i" (FRAME_MIN_SIZE), "i" (FRAME_TOC_SAVE)
+ : "cr0", "cr1", "memory", "ctr",
+ "r0", "r29", "r27", "r28");
+ return res;
}
+#endif // defined(__x86_64__) && SANITIZER_LINUX
+#if SANITIZER_ANDROID
#if __ANDROID_API__ < 21
extern "C" __attribute__((weak)) int dl_iterate_phdr(
int (*)(struct dl_phdr_info *, size_t, void *), void *);
@@ -1035,15 +1218,17 @@ AndroidApiLevel AndroidGetApiLevel() {
#endif
-bool IsDeadlySignal(int signum) {
+bool IsHandledDeadlySignal(int signum) {
if (common_flags()->handle_abort && signum == SIGABRT)
return true;
+ if (common_flags()->handle_sigill && signum == SIGILL)
+ return true;
if (common_flags()->handle_sigfpe && signum == SIGFPE)
return true;
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void *internal_start_thread(void(*func)(void *arg), void *arg) {
// Start the thread with signals blocked, otherwise it can steal user signals.
__sanitizer_sigset_t set, old;
@@ -1069,6 +1254,54 @@ void *internal_start_thread(void (*func)(void *), void *arg) { return 0; }
void internal_join_thread(void *th) {}
#endif
+#if defined(__aarch64__)
+// Android headers in the older NDK releases miss this definition.
+struct __sanitizer_esr_context {
+ struct _aarch64_ctx head;
+ uint64_t esr;
+};
+
+static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
+ static const u32 kEsrMagic = 0x45535201;
+ u8 *aux = ucontext->uc_mcontext.__reserved;
+ while (true) {
+ _aarch64_ctx *ctx = (_aarch64_ctx *)aux;
+ if (ctx->size == 0) break;
+ if (ctx->magic == kEsrMagic) {
+ *esr = ((__sanitizer_esr_context *)ctx)->esr;
+ return true;
+ }
+ aux += ctx->size;
+ }
+ return false;
+}
+#endif
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) {
+ ucontext_t *ucontext = (ucontext_t *)context;
+#if defined(__x86_64__) || defined(__i386__)
+ static const uptr PF_WRITE = 1U << 1;
+#if SANITIZER_FREEBSD
+ uptr err = ucontext->uc_mcontext.mc_err;
+#else
+ uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
+#endif
+ return err & PF_WRITE ? WRITE : READ;
+#elif defined(__arm__)
+ static const uptr FSR_WRITE = 1U << 11;
+ uptr fsr = ucontext->uc_mcontext.error_code;
+ return fsr & FSR_WRITE ? WRITE : READ;
+#elif defined(__aarch64__)
+ static const u64 ESR_ELx_WNR = 1U << 6;
+ u64 esr;
+ if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN;
+ return esr & ESR_ELx_WNR ? WRITE : READ;
+#else
+ (void)ucontext;
+ return UNKNOWN; // FIXME: Implement.
+#endif
+}
+
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
#if defined(__arm__)
ucontext_t *ucontext = (ucontext_t*)context;
@@ -1136,11 +1369,29 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*pc = ucontext->uc_mcontext.pc;
*bp = ucontext->uc_mcontext.gregs[30];
*sp = ucontext->uc_mcontext.gregs[29];
+#elif defined(__s390__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if defined(__s390x__)
+ *pc = ucontext->uc_mcontext.psw.addr;
+# else
+ *pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff;
+# endif
+ *bp = ucontext->uc_mcontext.gregs[11];
+ *sp = ucontext->uc_mcontext.gregs[15];
#else
# error "Unsupported arch"
#endif
}
+void MaybeReexec() {
+ // No need to re-exec on Linux.
+}
+
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
+ UNREACHABLE("FindAvailableMemoryRange is not available");
+ return 0;
+}
+
} // namespace __sanitizer
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h
index 44977020bce..895bfc18195 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux.h
+++ b/libsanitizer/sanitizer_common/sanitizer_linux.h
@@ -32,7 +32,6 @@ uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);
-void internal_sigfillset(__sanitizer_sigset_t *set);
// Linux-only syscalls.
#if SANITIZER_LINUX
@@ -41,8 +40,13 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
// (like the process-wide error reporting SEGV handler) must use
// internal_sigaction instead.
int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
+#if (defined(__x86_64__) || SANITIZER_MIPS64) && !SANITIZER_GO
+// Uses a raw system call to avoid interceptors.
+int internal_sigaction_syscall(int signum, const void *act, void *oldact);
+#endif
void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
-#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__)
+#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
+ || defined(__powerpc64__) || defined(__s390__)
uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
int *parent_tidptr, void *newtls, int *child_tidptr);
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
index ff69664e7b9..63e70660cf3 100644
--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cc
@@ -32,6 +32,7 @@
#include <pthread.h>
#include <signal.h>
#include <sys/resource.h>
+#include <syslog.h>
#if SANITIZER_FREEBSD
#include <pthread_np.h>
@@ -49,8 +50,6 @@
#if SANITIZER_ANDROID && __ANDROID_API__ < 21
#include <android/log.h>
-#else
-#include <syslog.h>
#endif
#if !SANITIZER_ANDROID
@@ -156,7 +155,6 @@ bool SanitizerGetThreadName(char *name, int max_len) {
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
static uptr g_tls_size;
-#endif
#ifdef __i386__
# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
@@ -164,26 +162,7 @@ static uptr g_tls_size;
# define DL_INTERNAL_FUNCTION
#endif
-#if defined(__mips__) || defined(__powerpc64__)
-// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
-// head structure. It lies before the static tls blocks.
-static uptr TlsPreTcbSize() {
-# if defined(__mips__)
- const uptr kTcbHead = 16; // sizeof (tcbhead_t)
-# elif defined(__powerpc64__)
- const uptr kTcbHead = 88; // sizeof (tcbhead_t)
-# endif
- const uptr kTlsAlign = 16;
- const uptr kTlsPreTcbSize =
- (ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
- InitTlsSize();
- g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
- return kTlsPreTcbSize;
-}
-#endif
-
void InitTlsSize() {
-#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
// all current supported platforms have 16 bytes stack alignment
const size_t kStackAlign = 16;
typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
@@ -199,11 +178,13 @@ void InitTlsSize() {
if (tls_align < kStackAlign)
tls_align = kStackAlign;
g_tls_size = RoundUpTo(tls_size, tls_align);
-#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
}
+#else
+void InitTlsSize() { }
+#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO
#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) \
- || defined(__aarch64__) || defined(__powerpc64__)) \
+ || defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__)) \
&& SANITIZER_LINUX && !SANITIZER_ANDROID
// sizeof(struct pthread) from glibc.
static atomic_uintptr_t kThreadDescriptorSize;
@@ -220,6 +201,11 @@ uptr ThreadDescriptorSize() {
char *end;
int minor = internal_simple_strtoll(buf + 8, &end, 10);
if (end != buf + 8 && (*end == '\0' || *end == '.')) {
+ int patch = 0;
+ if (*end == '.')
+ // strtoll will return 0 if no valid conversion could be performed
+ patch = internal_simple_strtoll(end + 1, nullptr, 10);
+
/* sizeof(struct pthread) values from various glibc versions. */
if (SANITIZER_X32)
val = 1728; // Assume only one particular version for x32.
@@ -233,9 +219,9 @@ uptr ThreadDescriptorSize() {
val = FIRST_32_SECOND_64(1136, 1712);
else if (minor == 10)
val = FIRST_32_SECOND_64(1168, 1776);
- else if (minor <= 12)
+ else if (minor == 11 || (minor == 12 && patch == 1))
val = FIRST_32_SECOND_64(1168, 2288);
- else if (minor == 13)
+ else if (minor <= 13)
val = FIRST_32_SECOND_64(1168, 2304);
else
val = FIRST_32_SECOND_64(1216, 2304);
@@ -260,6 +246,9 @@ uptr ThreadDescriptorSize() {
val = 1776; // from glibc.ppc64le 2.20-8.fc21
atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
return val;
+#elif defined(__s390__)
+ val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
+ atomic_store(&kThreadDescriptorSize, val, memory_order_relaxed);
#endif
return 0;
}
@@ -271,6 +260,24 @@ uptr ThreadSelfOffset() {
return kThreadSelfOffset;
}
+#if defined(__mips__) || defined(__powerpc64__)
+// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
+// head structure. It lies before the static tls blocks.
+static uptr TlsPreTcbSize() {
+# if defined(__mips__)
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+# elif defined(__powerpc64__)
+ const uptr kTcbHead = 88; // sizeof (tcbhead_t)
+# endif
+ const uptr kTlsAlign = 16;
+ const uptr kTlsPreTcbSize =
+ (ThreadDescriptorSize() + kTcbHead + kTlsAlign - 1) & ~(kTlsAlign - 1);
+ InitTlsSize();
+ g_tls_size = (g_tls_size + kTlsPreTcbSize + kTlsAlign -1) & ~(kTlsAlign - 1);
+ return kTlsPreTcbSize;
+}
+#endif
+
uptr ThreadSelf() {
uptr descr_addr;
# if defined(__i386__)
@@ -290,6 +297,9 @@ uptr ThreadSelf() {
.set pop" : "=r" (thread_pointer));
descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
# elif defined(__aarch64__)
+ descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
+ ThreadDescriptorSize();
+# elif defined(__s390__)
descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
# elif defined(__powerpc64__)
// PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
@@ -330,7 +340,7 @@ uptr ThreadSelf() {
#if !SANITIZER_GO
static void GetTls(uptr *addr, uptr *size) {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
-# if defined(__x86_64__) || defined(__i386__)
+# if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
*addr = ThreadSelf();
*size = GetTlsSize();
*addr -= *size;
@@ -410,17 +420,12 @@ typedef ElfW(Phdr) Elf_Phdr;
# endif
struct DlIteratePhdrData {
- LoadedModule *modules;
- uptr current_n;
+ InternalMmapVector<LoadedModule> *modules;
bool first;
- uptr max_n;
- string_predicate_t filter;
};
static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
- if (data->current_n == data->max_n)
- return 0;
InternalScopedString module_name(kMaxPathLength);
if (data->first) {
data->first = false;
@@ -431,20 +436,18 @@ static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
}
if (module_name[0] == '\0')
return 0;
- if (data->filter && !data->filter(module_name.data()))
- return 0;
- LoadedModule *cur_module = &data->modules[data->current_n];
- cur_module->set(module_name.data(), info->dlpi_addr);
- data->current_n++;
+ LoadedModule cur_module;
+ cur_module.set(module_name.data(), info->dlpi_addr);
for (int i = 0; i < info->dlpi_phnum; i++) {
const Elf_Phdr *phdr = &info->dlpi_phdr[i];
if (phdr->p_type == PT_LOAD) {
uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
uptr cur_end = cur_beg + phdr->p_memsz;
bool executable = phdr->p_flags & PF_X;
- cur_module->addAddressRange(cur_beg, cur_end, executable);
+ cur_module.addAddressRange(cur_beg, cur_end, executable);
}
}
+ data->modules->push_back(cur_module);
return 0;
}
@@ -453,8 +456,8 @@ extern "C" __attribute__((weak)) int dl_iterate_phdr(
int (*)(struct dl_phdr_info *, size_t, void *), void *);
#endif
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
- string_predicate_t filter) {
+void ListOfModules::init() {
+ clear();
#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
u32 api_level = AndroidGetApiLevel();
// Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
@@ -462,13 +465,12 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
// both K and L (and future) Android releases.
if (api_level <= ANDROID_LOLLIPOP_MR1) { // L or earlier
MemoryMappingLayout memory_mapping(false);
- return memory_mapping.DumpListOfModules(modules, max_modules, filter);
+ memory_mapping.DumpListOfModules(&modules_);
+ return;
}
#endif
- CHECK(modules);
- DlIteratePhdrData data = {modules, 0, true, max_modules, filter};
+ DlIteratePhdrData data = {&modules_, true};
dl_iterate_phdr(dl_iterate_phdr_cb, &data);
- return data.current_n;
}
// getrusage does not give us the current RSS, only the max RSS.
@@ -519,19 +521,20 @@ uptr GetRSS() {
static atomic_uint8_t android_log_initialized;
void AndroidLogInit() {
+ openlog(GetProcessName(), 0, LOG_USER);
atomic_store(&android_log_initialized, 1, memory_order_release);
}
-static bool IsSyslogAvailable() {
+static bool ShouldLogAfterPrintf() {
return atomic_load(&android_log_initialized, memory_order_acquire);
}
#else
void AndroidLogInit() {}
-static bool IsSyslogAvailable() { return true; }
+static bool ShouldLogAfterPrintf() { return true; }
#endif // SANITIZER_ANDROID
-static void WriteOneLineToSyslog(const char *s) {
+void WriteOneLineToSyslog(const char *s) {
#if SANITIZER_ANDROID &&__ANDROID_API__ < 21
__android_log_write(ANDROID_LOG_INFO, NULL, s);
#else
@@ -539,24 +542,11 @@ static void WriteOneLineToSyslog(const char *s) {
#endif
}
-void WriteToSyslog(const char *buffer) {
- if (!IsSyslogAvailable())
- return;
- char *copy = internal_strdup(buffer);
- char *p = copy;
- char *q;
- // syslog, at least on Android, has an implicit message length limit.
- // Print one line at a time.
- do {
- q = internal_strchr(p, '\n');
- if (q)
- *q = '\0';
- WriteOneLineToSyslog(p);
- if (q)
- p = q + 1;
- } while (q);
- InternalFree(copy);
+void LogMessageOnPrintf(const char *str) {
+ if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
+ WriteToSyslog(str);
}
+
#endif // SANITIZER_LINUX
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_mips64.S b/libsanitizer/sanitizer_common/sanitizer_linux_mips64.S
new file mode 100644
index 00000000000..0b76f3a473a
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_mips64.S
@@ -0,0 +1,21 @@
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Avoid being marked as needing an executable stack:
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+// Further contents are mips64 only:
+#if defined(__linux__) && defined(__mips64)
+
+.section .text
+.set noreorder
+.globl internal_sigreturn
+.type internal_sigreturn, @function
+internal_sigreturn:
+
+ li $v0,5211 // #5211 is for SYS_rt_sigreturn
+ syscall
+
+.size internal_sigreturn, .-internal_sigreturn
+
+#endif // defined(__linux__) && defined(__mips64)
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_s390.cc b/libsanitizer/sanitizer_common/sanitizer_linux_s390.cc
new file mode 100644
index 00000000000..c7d647528ab
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_s390.cc
@@ -0,0 +1,189 @@
+//===-- sanitizer_linux_s390.cc -------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements s390-linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX && SANITIZER_S390
+
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+
+#include <errno.h>
+#include <sys/syscall.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+namespace __sanitizer {
+
+// --------------- sanitizer_libc.h
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ OFF_T offset) {
+ struct s390_mmap_params {
+ unsigned long addr;
+ unsigned long length;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+ } params = {
+ (unsigned long)addr,
+ (unsigned long)length,
+ (unsigned long)prot,
+ (unsigned long)flags,
+ (unsigned long)fd,
+# ifdef __s390x__
+ (unsigned long)offset,
+# else
+ (unsigned long)(offset / 4096),
+# endif
+ };
+# ifdef __s390x__
+ return syscall(__NR_mmap, &params);
+# else
+ return syscall(__NR_mmap2, &params);
+# endif
+}
+
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ // Minimum frame size.
+#ifdef __s390x__
+ child_stack = (char *)child_stack - 160;
+#else
+ child_stack = (char *)child_stack - 96;
+#endif
+ // Terminate unwind chain.
+ ((unsigned long *)child_stack)[0] = 0;
+ // And pass parameters.
+ ((unsigned long *)child_stack)[1] = (uptr)fn;
+ ((unsigned long *)child_stack)[2] = (uptr)arg;
+ register long res __asm__("r2");
+ register void *__cstack __asm__("r2") = child_stack;
+ register int __flags __asm__("r3") = flags;
+ register int * __ptidptr __asm__("r4") = parent_tidptr;
+ register int * __ctidptr __asm__("r5") = child_tidptr;
+ register void * __newtls __asm__("r6") = newtls;
+
+ __asm__ __volatile__(
+ /* Clone. */
+ "svc %1\n"
+
+ /* if (%r2 != 0)
+ * return;
+ */
+#ifdef __s390x__
+ "cghi %%r2, 0\n"
+#else
+ "chi %%r2, 0\n"
+#endif
+ "jne 1f\n"
+
+ /* Call "fn(arg)". */
+#ifdef __s390x__
+ "lmg %%r1, %%r2, 8(%%r15)\n"
+#else
+ "lm %%r1, %%r2, 4(%%r15)\n"
+#endif
+ "basr %%r14, %%r1\n"
+
+ /* Call _exit(%r2). */
+ "svc %2\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r" (res)
+ : "i"(__NR_clone), "i"(__NR_exit),
+ "r"(__cstack),
+ "r"(__flags),
+ "r"(__ptidptr),
+ "r"(__ctidptr),
+ "r"(__newtls)
+ : "memory", "cc");
+ return res;
+}
+
+#if SANITIZER_S390_64
+static bool FixedCVE_2016_2143() {
+ // Try to determine if the running kernel has a fix for CVE-2016-2143,
+ // return false if in doubt (better safe than sorry). Distros may want to
+ // adjust this for their own kernels.
+ struct utsname buf;
+ unsigned int major, minor, patch = 0;
+ // This should never fail, but just in case...
+ if (uname(&buf))
+ return false;
+ char *ptr = buf.release;
+ major = internal_simple_strtoll(ptr, &ptr, 10);
+ // At least first 2 should be matched.
+ if (ptr[0] != '.')
+ return false;
+ minor = internal_simple_strtoll(ptr+1, &ptr, 10);
+ // Third is optional.
+ if (ptr[0] == '.')
+ patch = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (major < 3) {
+ // <3.0 is bad.
+ return false;
+ } else if (major == 3) {
+ // 3.2.79+ is OK.
+ if (minor == 2 && patch >= 79)
+ return true;
+ // 3.12.58+ is OK.
+ if (minor == 12 && patch >= 58)
+ return true;
+ // Otherwise, bad.
+ return false;
+ } else if (major == 4) {
+ // 4.1.21+ is OK.
+ if (minor == 1 && patch >= 21)
+ return true;
+ // 4.4.6+ is OK.
+ if (minor == 4 && patch >= 6)
+ return true;
+ // Otherwise, OK if 4.5+.
+ return minor >= 5;
+ } else {
+ // Linux 5 and up are fine.
+ return true;
+ }
+}
+
+void AvoidCVE_2016_2143() {
+ // Older kernels are affected by CVE-2016-2143 - they will crash hard
+ // if someone uses 4-level page tables (ie. virtual addresses >= 4TB)
+ // and fork() in the same process. Unfortunately, sanitizers tend to
+ // require such addresses. Since this is very likely to crash the whole
+ // machine (sanitizers themselves use fork() for llvm-symbolizer, for one),
+ // abort the process at initialization instead.
+ if (FixedCVE_2016_2143())
+ return;
+ if (GetEnv("SANITIZER_IGNORE_CVE_2016_2143"))
+ return;
+ Report(
+ "ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using ASan,\n"
+ "MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
+ "machine, or worse.\n"
+ "\n"
+ "If you are certain your kernel is not vulnerable (you have compiled it\n"
+ "yourself, or are using an unrecognized distribution kernel), you can\n"
+ "override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
+ "with any value.\n");
+ Die();
+}
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX && SANITIZER_S390
diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_x86_64.S b/libsanitizer/sanitizer_common/sanitizer_linux_x86_64.S
new file mode 100644
index 00000000000..6b892116ff4
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_linux_x86_64.S
@@ -0,0 +1,23 @@
+// This file is dual licensed under the MIT and the University of Illinois Open
+// Avoid being marked as needing an executable stack:
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+// Further contents are x86_64-only:
+#if defined(__linux__) && defined(__x86_64__)
+
+#include "../builtins/assembly.h"
+
+// If the "naked" function attribute were supported for x86 we could
+// do this via inline asm.
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(internal_sigreturn)
+ mov $0xf, %eax // 0xf == SYS_rt_sigreturn
+ mov %rcx, %r10
+ syscall
+ ret // Won't normally reach here.
+END_COMPILERRT_FUNCTION(internal_sigreturn)
+
+#endif // defined(__linux__) && defined(__x86_64__)
diff --git a/libsanitizer/sanitizer_common/sanitizer_list.h b/libsanitizer/sanitizer_common/sanitizer_list.h
index 9216ede67fb..190c7e67cf0 100644
--- a/libsanitizer/sanitizer_common/sanitizer_list.h
+++ b/libsanitizer/sanitizer_common/sanitizer_list.h
@@ -69,7 +69,9 @@ struct IntrusiveList {
}
Item *front() { return first_; }
+ const Item *front() const { return first_; }
Item *back() { return last_; }
+ const Item *back() const { return last_; }
void append_front(IntrusiveList<Item> *l) {
CHECK_NE(this, l);
@@ -114,24 +116,32 @@ struct IntrusiveList {
}
}
- template<class ListTy, class ItemTy>
+ template<class ItemTy>
class IteratorBase {
public:
- explicit IteratorBase(ListTy *list)
- : list_(list), current_(list->first_) { }
- ItemTy *next() {
- ItemTy *ret = current_;
- if (current_) current_ = current_->next;
- return ret;
+ explicit IteratorBase(ItemTy *current) : current_(current) {}
+ IteratorBase &operator++() {
+ current_ = current_->next;
+ return *this;
+ }
+ bool operator!=(IteratorBase other) const {
+ return current_ != other.current_;
+ }
+ ItemTy &operator*() {
+ return *current_;
}
- bool hasNext() const { return current_ != nullptr; }
private:
- ListTy *list_;
ItemTy *current_;
};
- typedef IteratorBase<IntrusiveList<Item>, Item> Iterator;
- typedef IteratorBase<const IntrusiveList<Item>, const Item> ConstIterator;
+ typedef IteratorBase<Item> Iterator;
+ typedef IteratorBase<const Item> ConstIterator;
+
+ Iterator begin() { return Iterator(first_); }
+ Iterator end() { return Iterator(0); }
+
+ ConstIterator begin() const { return ConstIterator(first_); }
+ ConstIterator end() const { return ConstIterator(0); }
// private, don't use directly.
uptr size_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_mac.cc
index 159db76f01e..4408d1dccb9 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.cc
@@ -11,6 +11,7 @@
#include "sanitizer_platform.h"
#if SANITIZER_MAC
+#include "sanitizer_mac.h"
// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
// the clients will most certainly use 64-bit ones as well.
@@ -23,7 +24,6 @@
#include "sanitizer_flags.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
-#include "sanitizer_mac.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_platform_limits_posix.h"
#include "sanitizer_procmaps.h"
@@ -34,6 +34,23 @@
extern char **environ;
#endif
+#if defined(__has_include) && __has_include(<os/trace.h>)
+#define SANITIZER_OS_TRACE 1
+#include <os/trace.h>
+#else
+#define SANITIZER_OS_TRACE 0
+#endif
+
+#if !SANITIZER_IOS
+#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
+#else
+extern "C" {
+ extern char ***_NSGetArgv(void);
+}
+#endif
+
+#include <asl.h>
+#include <dlfcn.h> // for dladdr()
#include <errno.h>
#include <fcntl.h>
#include <libkern/OSAtomic.h>
@@ -49,21 +66,45 @@ extern char **environ;
#include <sys/stat.h>
#include <sys/sysctl.h>
#include <sys/types.h>
+#include <sys/wait.h>
#include <unistd.h>
+#include <util.h>
+
+// From <crt_externs.h>, but we don't have that file on iOS.
+extern "C" {
+ extern char ***_NSGetArgv(void);
+ extern char ***_NSGetEnviron(void);
+}
+
+// From <mach/mach_vm.h>, but we don't have that file on iOS.
+extern "C" {
+ extern kern_return_t mach_vm_region_recurse(
+ vm_map_t target_task,
+ mach_vm_address_t *address,
+ mach_vm_size_t *size,
+ natural_t *nesting_depth,
+ vm_region_recurse_info_t info,
+ mach_msg_type_number_t *infoCnt);
+}
namespace __sanitizer {
#include "sanitizer_syscall_generic.inc"
+// Direct syscalls, don't call libmalloc hooks.
+extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes,
+ off_t off);
+extern "C" int __munmap(void *, size_t);
+
// ---------------------- sanitizer_libc.h
uptr internal_mmap(void *addr, size_t length, int prot, int flags,
int fd, u64 offset) {
if (fd == -1) fd = VM_MAKE_TAG(VM_MEMORY_ANALYSIS_TOOL);
- return (uptr)mmap(addr, length, prot, flags, fd, offset);
+ return (uptr)__mmap(addr, length, prot, flags, fd, offset);
}
uptr internal_munmap(void *addr, uptr length) {
- return munmap(addr, length);
+ return __munmap(addr, length);
}
int internal_mprotect(void *addr, uptr length, int prot) {
@@ -129,6 +170,10 @@ void internal__exit(int exitcode) {
_exit(exitcode);
}
+unsigned int internal_sleep(unsigned int seconds) {
+ return sleep(seconds);
+}
+
uptr internal_getpid() {
return getpid();
}
@@ -145,9 +190,34 @@ uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
return sigprocmask(how, set, oldset);
}
+// Doesn't call pthread_atfork() handlers.
+extern "C" pid_t __fork(void);
+
int internal_fork() {
- // TODO(glider): this may call user's pthread_atfork() handlers which is bad.
- return fork();
+ return __fork();
+}
+
+int internal_forkpty(int *amaster) {
+ int master, slave;
+ if (openpty(&master, &slave, nullptr, nullptr, nullptr) == -1) return -1;
+ int pid = __fork();
+ if (pid == -1) {
+ close(master);
+ close(slave);
+ return -1;
+ }
+ if (pid == 0) {
+ close(master);
+ if (login_tty(slave) != 0) {
+ // We already forked, there's not much we can do. Let's quit.
+ Report("login_tty failed (errno %d)\n", errno);
+ internal__exit(1);
+ }
+ } else {
+ *amaster = master;
+ close(slave);
+ }
+ return pid;
}
uptr internal_rename(const char *oldpath, const char *newpath) {
@@ -158,6 +228,15 @@ uptr internal_ftruncate(fd_t fd, uptr size) {
return ftruncate(fd, size);
}
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]) {
+ return execve(filename, argv, envp);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+ return waitpid(pid, status, options);
+}
+
// ----------------- sanitizer_common.h
bool FileExists(const char *filename) {
struct stat st;
@@ -168,7 +247,10 @@ bool FileExists(const char *filename) {
}
uptr GetTid() {
- return reinterpret_cast<uptr>(pthread_self());
+ // FIXME: This can potentially get truncated on 32-bit, where uptr is 4 bytes.
+ uint64_t tid;
+ pthread_threadid_np(nullptr, &tid);
+ return tid;
}
void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
@@ -178,7 +260,7 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
uptr stacksize = pthread_get_stacksize_np(pthread_self());
// pthread_get_stacksize_np() returns an incorrect stack size for the main
// thread on Mavericks. See
- // https://code.google.com/p/address-sanitizer/issues/detail?id=261
+ // https://github.com/google/sanitizers/issues/261
if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization &&
stacksize == (1 << 19)) {
struct rlimit rl;
@@ -289,7 +371,7 @@ void InitTlsSize() {
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr stack_top, stack_bottom;
GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
*stk_addr = stack_bottom;
@@ -304,13 +386,18 @@ void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
#endif
}
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
- string_predicate_t filter) {
+void ListOfModules::init() {
+ clear();
MemoryMappingLayout memory_mapping(false);
- return memory_mapping.DumpListOfModules(modules, max_modules, filter);
+ memory_mapping.DumpListOfModules(&modules_);
}
-bool IsDeadlySignal(int signum) {
+bool IsHandledDeadlySignal(int signum) {
+ if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
+ // Handling fatal signals on watchOS and tvOS devices is disallowed.
+ return false;
+ if (common_flags()->handle_abort && signum == SIGABRT)
+ return true;
return (signum == SIGSEGV || signum == SIGBUS) && common_flags()->handle_segv;
}
@@ -382,6 +469,70 @@ void *internal_start_thread(void(*func)(void *arg), void *arg) {
void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
+#if !SANITIZER_GO
+static BlockingMutex syslog_lock(LINKER_INITIALIZED);
+#endif
+
+void WriteOneLineToSyslog(const char *s) {
+#if !SANITIZER_GO
+ syslog_lock.CheckLocked();
+ asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
+#endif
+}
+
+void LogMessageOnPrintf(const char *str) {
+ // Log all printf output to CrashLog.
+ if (common_flags()->abort_on_error)
+ CRAppendCrashLogMessage(str);
+}
+
+void LogFullErrorReport(const char *buffer) {
+#if !SANITIZER_GO
+ // Log with os_trace. This will make it into the crash log.
+#if SANITIZER_OS_TRACE
+ if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) {
+ // os_trace requires the message (format parameter) to be a string literal.
+ if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
+ sizeof("AddressSanitizer") - 1) == 0)
+ os_trace("Address Sanitizer reported a failure.");
+ else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer",
+ sizeof("UndefinedBehaviorSanitizer") - 1) == 0)
+ os_trace("Undefined Behavior Sanitizer reported a failure.");
+ else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer",
+ sizeof("ThreadSanitizer") - 1) == 0)
+ os_trace("Thread Sanitizer reported a failure.");
+ else
+ os_trace("Sanitizer tool reported a failure.");
+
+ if (common_flags()->log_to_syslog)
+ os_trace("Consult syslog for more information.");
+ }
+#endif
+
+ // Log to syslog.
+ // The logging on OS X may call pthread_create so we need the threading
+ // environment to be fully initialized. Also, this should never be called when
+ // holding the thread registry lock since that may result in a deadlock. If
+ // the reporting thread holds the thread registry mutex, and asl_log waits
+ // for GCD to dispatch a new thread, the process will deadlock, because the
+ // pthread_create wrapper needs to acquire the lock as well.
+ BlockingMutexLock l(&syslog_lock);
+ if (common_flags()->log_to_syslog)
+ WriteToSyslog(buffer);
+
+ // The report is added to CrashLog as part of logging all of Printf output.
+#endif
+}
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag(void *context) {
+#if defined(__x86_64__) || defined(__i386__)
+ ucontext_t *ucontext = static_cast<ucontext_t*>(context);
+ return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ;
+#else
+ return UNKNOWN;
+#endif
+}
+
void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
ucontext_t *ucontext = (ucontext_t*)context;
# if defined(__aarch64__)
@@ -409,6 +560,237 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
# endif
}
+#if !SANITIZER_GO
+static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
+LowLevelAllocator allocator_for_env;
+
+// Change the value of the env var |name|, leaking the original value.
+// If |name_value| is NULL, the variable is deleted from the environment,
+// otherwise the corresponding "NAME=value" string is replaced with
+// |name_value|.
+void LeakyResetEnv(const char *name, const char *name_value) {
+ char **env = GetEnviron();
+ uptr name_len = internal_strlen(name);
+ while (*env != 0) {
+ uptr len = internal_strlen(*env);
+ if (len > name_len) {
+ const char *p = *env;
+ if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
+ // Match.
+ if (name_value) {
+ // Replace the old value with the new one.
+ *env = const_cast<char*>(name_value);
+ } else {
+ // Shift the subsequent pointers back.
+ char **del = env;
+ do {
+ del[0] = del[1];
+ } while (*del++);
+ }
+ }
+ }
+ env++;
+ }
+}
+
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool ReexecDisabled() {
+ return false;
+}
+
+extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
+static const double kMinDyldVersionWithAutoInterposition = 360.0;
+
+bool DyldNeedsEnvVariable() {
+ // Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
+ // still may want use them on older systems. On older Darwin platforms, dyld
+ // doesn't export dyldVersionNumber symbol and we simply return true.
+ if (!&dyldVersionNumber) return true;
+ // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
+ // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
+ // GetMacosVersion() doesn't work for the simulator. Let's instead check
+ // `dyldVersionNumber`, which is exported by dyld, against a known version
+ // number from the first OS release where this appeared.
+ return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
+}
+
+void MaybeReexec() {
+ if (ReexecDisabled()) return;
+
+ // Make sure the dynamic runtime library is preloaded so that the
+ // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
+ // ourselves.
+ Dl_info info;
+ RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info));
+ char *dyld_insert_libraries =
+ const_cast<char*>(GetEnv(kDyldInsertLibraries));
+ uptr old_env_len = dyld_insert_libraries ?
+ internal_strlen(dyld_insert_libraries) : 0;
+ uptr fname_len = internal_strlen(info.dli_fname);
+ const char *dylib_name = StripModuleName(info.dli_fname);
+ uptr dylib_name_len = internal_strlen(dylib_name);
+
+ bool lib_is_in_env = dyld_insert_libraries &&
+ internal_strstr(dyld_insert_libraries, dylib_name);
+ if (DyldNeedsEnvVariable() && !lib_is_in_env) {
+ // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
+ // library.
+ InternalScopedString program_name(1024);
+ uint32_t buf_size = program_name.size();
+ _NSGetExecutablePath(program_name.data(), &buf_size);
+ char *new_env = const_cast<char*>(info.dli_fname);
+ if (dyld_insert_libraries) {
+ // Append the runtime dylib name to the existing value of
+ // DYLD_INSERT_LIBRARIES.
+ new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
+ internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
+ new_env[old_env_len] = ':';
+ // Copy fname_len and add a trailing zero.
+ internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
+ fname_len + 1);
+ // Ok to use setenv() since the wrappers don't depend on the value of
+ // asan_inited.
+ setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
+ } else {
+ // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
+ setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
+ }
+ VReport(1, "exec()-ing the program with\n");
+ VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
+ VReport(1, "to enable wrappers.\n");
+ execv(program_name.data(), *_NSGetArgv());
+
+ // We get here only if execv() failed.
+ Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
+ "which is required for the sanitizer to work. We tried to set the "
+ "environment variable and re-execute itself, but execv() failed, "
+ "possibly because of sandbox restrictions. Make sure to launch the "
+ "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
+ RAW_CHECK("execv failed" && 0);
+ }
+
+ // Verify that interceptors really work. We'll use dlsym to locate
+ // "pthread_create", if interceptors are working, it should really point to
+ // "wrap_pthread_create" within our own dylib.
+ Dl_info info_pthread_create;
+ void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create");
+ RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create));
+ if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) {
+ Report(
+ "ERROR: Interceptors are not working. This may be because %s is "
+ "loaded too late (e.g. via dlopen). Please launch the executable "
+ "with:\n%s=%s\n",
+ SanitizerToolName, kDyldInsertLibraries, info.dli_fname);
+ RAW_CHECK("interceptors not installed" && 0);
+ }
+
+ if (!lib_is_in_env)
+ return;
+
+ // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
+ // the dylib from the environment variable, because interceptors are installed
+ // and we don't want our children to inherit the variable.
+
+ uptr env_name_len = internal_strlen(kDyldInsertLibraries);
+ // Allocate memory to hold the previous env var name, its value, the '='
+ // sign and the '\0' char.
+ char *new_env = (char*)allocator_for_env.Allocate(
+ old_env_len + 2 + env_name_len);
+ RAW_CHECK(new_env);
+ internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
+ internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
+ new_env[env_name_len] = '=';
+ char *new_env_pos = new_env + env_name_len + 1;
+
+ // Iterate over colon-separated pieces of |dyld_insert_libraries|.
+ char *piece_start = dyld_insert_libraries;
+ char *piece_end = NULL;
+ char *old_env_end = dyld_insert_libraries + old_env_len;
+ do {
+ if (piece_start[0] == ':') piece_start++;
+ piece_end = internal_strchr(piece_start, ':');
+ if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
+ if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
+ uptr piece_len = piece_end - piece_start;
+
+ char *filename_start =
+ (char *)internal_memrchr(piece_start, '/', piece_len);
+ uptr filename_len = piece_len;
+ if (filename_start) {
+ filename_start += 1;
+ filename_len = piece_len - (filename_start - piece_start);
+ } else {
+ filename_start = piece_start;
+ }
+
+ // If the current piece isn't the runtime library name,
+ // append it to new_env.
+ if ((dylib_name_len != filename_len) ||
+ (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
+ if (new_env_pos != new_env + env_name_len + 1) {
+ new_env_pos[0] = ':';
+ new_env_pos++;
+ }
+ internal_strncpy(new_env_pos, piece_start, piece_len);
+ new_env_pos += piece_len;
+ }
+ // Move on to the next piece.
+ piece_start = piece_end;
+ } while (piece_start < old_env_end);
+
+ // Can't use setenv() here, because it requires the allocator to be
+ // initialized.
+ // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
+ // a separate function called after InitializeAllocator().
+ if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
+ LeakyResetEnv(kDyldInsertLibraries, new_env);
+}
+#endif // SANITIZER_GO
+
+char **GetArgv() {
+ return *_NSGetArgv();
+}
+
+uptr FindAvailableMemoryRange(uptr shadow_size,
+ uptr alignment,
+ uptr left_padding) {
+ typedef vm_region_submap_short_info_data_64_t RegionInfo;
+ enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
+ // Start searching for available memory region past PAGEZERO, which is
+ // 4KB on 32-bit and 4GB on 64-bit.
+ mach_vm_address_t start_address =
+ (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
+
+ mach_vm_address_t address = start_address;
+ mach_vm_address_t free_begin = start_address;
+ kern_return_t kr = KERN_SUCCESS;
+ while (kr == KERN_SUCCESS) {
+ mach_vm_size_t vmsize = 0;
+ natural_t depth = 0;
+ RegionInfo vminfo;
+ mach_msg_type_number_t count = kRegionInfoSize;
+ kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
+ (vm_region_info_t)&vminfo, &count);
+ if (free_begin != address) {
+ // We found a free region [free_begin..address-1].
+ uptr shadow_address = RoundUpTo((uptr)free_begin + left_padding,
+ alignment);
+ if (shadow_address + shadow_size < (uptr)address) {
+ return shadow_address;
+ }
+ }
+ // Move to the next region.
+ address += vmsize;
+ free_begin = address;
+ }
+
+ // We looked at all free regions and could not find one large enough.
+ return 0;
+}
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
} // namespace __sanitizer
#endif // SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_mac.h b/libsanitizer/sanitizer_common/sanitizer_mac.h
index 2aac83e3657..4bea069189f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_mac.h
+++ b/libsanitizer/sanitizer_common/sanitizer_mac.h
@@ -11,6 +11,7 @@
#ifndef SANITIZER_MAC_H
#define SANITIZER_MAC_H
+#include "sanitizer_common.h"
#include "sanitizer_platform.h"
#if SANITIZER_MAC
#include "sanitizer_posix.h"
@@ -35,5 +36,22 @@ char **GetEnviron();
} // namespace __sanitizer
+extern "C" {
+static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] =
+ {};
+static const char *__crashreporter_info__ __attribute__((__used__)) =
+ &__crashreporter_info_buff__[0];
+asm(".desc ___crashreporter_info__, 0x10");
+} // extern "C"
+
+namespace __sanitizer {
+static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
+
+INLINE void CRAppendCrashLogMessage(const char *msg) {
+ BlockingMutexLock l(&crashreporter_info_mutex);
+ internal_strlcat(__crashreporter_info_buff__, msg,
+ sizeof(__crashreporter_info_buff__)); }
+} // namespace __sanitizer
+
#endif // SANITIZER_MAC
#endif // SANITIZER_MAC_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc b/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc
index 6ed0f69de97..93fb19e922c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc
+++ b/libsanitizer/sanitizer_common/sanitizer_malloc_mac.inc
@@ -25,7 +25,7 @@
#include "sanitizer_common/sanitizer_mac.h"
// Similar code is used in Google Perftools,
-// http://code.google.com/p/google-perftools.
+// https://github.com/gperftools/gperftools.
static malloc_zone_t sanitizer_zone;
@@ -54,7 +54,7 @@ INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
// FIXME: ASan should support purgeable allocations.
- // https://code.google.com/p/address-sanitizer/issues/detail?id=139
+ // https://github.com/google/sanitizers/issues/139
COMMON_MALLOC_ENTER();
return &sanitizer_zone;
}
@@ -182,28 +182,18 @@ void *__sanitizer_mz_valloc(malloc_zone_t *zone, size_t size) {
return p;
}
-#define GET_ZONE_FOR_PTR(ptr) \
- malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \
- const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name
-
-void ALWAYS_INLINE free_common(void *context, void *ptr) {
- if (!ptr) return;
- // FIXME: need to retire this flag.
- if (!COMMON_MALLOC_IGNORE_INVALID_FREE) {
- COMMON_MALLOC_FREE(ptr);
- } else {
- GET_ZONE_FOR_PTR(ptr);
- COMMON_MALLOC_REPORT_FREE_UNALLOCATED(ptr, zone_ptr, zone_name);
- }
-}
-
// TODO(glider): the allocation callbacks need to be refactored.
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {
- free_common(zone, ptr);
+ if (!ptr) return;
+ COMMON_MALLOC_FREE(ptr);
}
+#define GET_ZONE_FOR_PTR(ptr) \
+ malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \
+ const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name
+
extern "C"
SANITIZER_INTERFACE_ATTRIBUTE
void *__sanitizer_mz_realloc(malloc_zone_t *zone, void *ptr, size_t new_size) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h
index 7d0ff2896e9..428709d55ec 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform.h
@@ -47,12 +47,30 @@
# define SANITIZER_IOSSIM 0
#endif
+#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
+# define SANITIZER_WATCHOS 1
+#else
+# define SANITIZER_WATCHOS 0
+#endif
+
+#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV
+# define SANITIZER_TVOS 1
+#else
+# define SANITIZER_TVOS 0
+#endif
+
#if defined(_WIN32)
# define SANITIZER_WINDOWS 1
#else
# define SANITIZER_WINDOWS 0
#endif
+#if defined(_WIN64)
+# define SANITIZER_WINDOWS64 1
+#else
+# define SANITIZER_WINDOWS64 0
+#endif
+
#if defined(__ANDROID__)
# define SANITIZER_ANDROID 1
#else
@@ -79,14 +97,67 @@
# define SANITIZER_X32 0
#endif
-// VMA size definition for architecture that support multiple sizes.
-// AArch64 has 3 VMA sizes: 39, 42 and 48.
-#if !defined(SANITIZER_AARCH64_VMA)
-# define SANITIZER_AARCH64_VMA 39
+#if defined(__mips__)
+# define SANITIZER_MIPS 1
+# if defined(__mips64)
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 1
+# else
+# define SANITIZER_MIPS32 1
+# define SANITIZER_MIPS64 0
+# endif
#else
-# if SANITIZER_AARCH64_VMA != 39 && SANITIZER_AARCH64_VMA != 42
-# error "invalid SANITIZER_AARCH64_VMA size"
+# define SANITIZER_MIPS 0
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 0
+#endif
+
+#if defined(__s390__)
+# define SANITIZER_S390 1
+# if defined(__s390x__)
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 1
+# else
+# define SANITIZER_S390_31 1
+# define SANITIZER_S390_64 0
+# endif
+#else
+# define SANITIZER_S390 0
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 0
+#endif
+
+#if defined(__powerpc__)
+# define SANITIZER_PPC 1
+# if defined(__powerpc64__)
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 1
+// 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is
+// big-endian, and uses v1 ABI (known for its function descriptors),
+// while the new powerpc64le target is little-endian and uses v2.
+// In theory, you could convince gcc to compile for their evil twins
+// (eg. big-endian v2), but you won't find such combinations in the wild
+// (it'd require bootstrapping a whole system, which would be quite painful
+// - there's no target triple for that). LLVM doesn't support them either.
+# if _CALL_ELF == 2
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 1
+# else
+# define SANITIZER_PPC64V1 1
+# define SANITIZER_PPC64V2 0
+# endif
+# else
+# define SANITIZER_PPC32 1
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
# endif
+#else
+# define SANITIZER_PPC 0
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
#endif
// By default we allow to use SizeClassAllocator64 on 64-bit platform.
@@ -95,7 +166,9 @@
// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
#ifndef SANITIZER_CAN_USE_ALLOCATOR64
-# if defined(__mips64) || defined(__aarch64__)
+# if SANITIZER_ANDROID && defined(__aarch64__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 1
+# elif defined(__mips64) || defined(__aarch64__)
# define SANITIZER_CAN_USE_ALLOCATOR64 0
# else
# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
@@ -107,6 +180,8 @@
// will still work but will consume more memory for TwoLevelByteMap.
#if defined(__mips__)
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+#elif defined(__aarch64__)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
#else
# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
#endif
@@ -135,10 +210,8 @@
#define SANITIZER_USES_UID16_SYSCALLS 0
#endif
-#if defined(__mips__) || (defined(__aarch64__) && SANITIZER_AARCH64_VMA == 39)
+#if defined(__mips__)
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
-#elif defined(__aarch64__) && SANITIZER_AARCH64_VMA == 42
-# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 11)
#else
# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
#endif
@@ -160,4 +233,22 @@
# define MSC_PREREQ(version) 0
#endif
+#if defined(__arm64__) && SANITIZER_IOS
+# define SANITIZER_NON_UNIQUE_TYPEINFO 1
+#else
+# define SANITIZER_NON_UNIQUE_TYPEINFO 0
+#endif
+
+// On linux, some architectures had an ABI transition from 64-bit long double
+// (ie. same as double) to 128-bit long double. On those, glibc symbols
+// involving long doubles come in two versions, and we need to pass the
+// correct one to dlvsym when intercepting them.
+#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)
+#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
+#endif
+
+#if SANITIZER_GO == 0
+# define SANITIZER_GO 0
+#endif
+
#endif // SANITIZER_PLATFORM_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
index 040e030e088..2a886058187 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h
@@ -27,6 +27,12 @@
# define SI_LINUX_NOT_ANDROID 0
#endif
+#if SANITIZER_ANDROID
+# define SI_ANDROID 1
+#else
+# define SI_ANDROID 0
+#endif
+
#if SANITIZER_FREEBSD
# define SI_FREEBSD 1
#else
@@ -41,8 +47,10 @@
#if SANITIZER_MAC
# define SI_MAC 1
+# define SI_NOT_MAC 0
#else
# define SI_MAC 0
+# define SI_NOT_MAC 1
#endif
#if SANITIZER_IOS
@@ -51,14 +59,30 @@
# define SI_IOS 0
#endif
+#if !SANITIZER_WINDOWS && !SANITIZER_MAC
+# define SI_UNIX_NOT_MAC 1
+#else
+# define SI_UNIX_NOT_MAC 0
+#endif
+
+#define SANITIZER_INTERCEPT_STRLEN 1
+#define SANITIZER_INTERCEPT_STRNLEN SI_NOT_MAC
#define SANITIZER_INTERCEPT_STRCMP 1
#define SANITIZER_INTERCEPT_STRSTR 1
#define SANITIZER_INTERCEPT_STRCASESTR SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_STRCHR 1
+#define SANITIZER_INTERCEPT_STRCHRNUL SI_UNIX_NOT_MAC
+#define SANITIZER_INTERCEPT_STRRCHR 1
#define SANITIZER_INTERCEPT_STRSPN 1
#define SANITIZER_INTERCEPT_STRPBRK 1
#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRCASECMP SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_MEMSET 1
+#define SANITIZER_INTERCEPT_MEMMOVE 1
+#define SANITIZER_INTERCEPT_MEMCPY 1
#define SANITIZER_INTERCEPT_MEMCMP 1
+// FIXME: enable memmem on Windows.
+#define SANITIZER_INTERCEPT_MEMMEM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MEMCHR 1
#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
@@ -123,15 +147,21 @@
#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MODF SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_RECVMSG SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SENDMSG SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPEERNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_IOCTL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_INET_ATON SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
#define SANITIZER_INTERCEPT_READDIR SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_PTRACE SI_LINUX_NOT_ANDROID && \
+#if SI_LINUX_NOT_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__))
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
+#define SANITIZER_INTERCEPT_PTRACE 1
+#else
+#define SANITIZER_INTERCEPT_PTRACE 0
+#endif
#define SANITIZER_INTERCEPT_SETLOCALE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETCWD SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
@@ -203,6 +233,7 @@
#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_TTYNAME_R SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TEMPNAM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
@@ -235,7 +266,11 @@
#define SANITIZER_INTERCEPT_IF_INDEXTONAME \
SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
-#define SANITIZER_INTERCEPT_AEABI_MEM SI_LINUX && defined(__arm__)
+#if SI_LINUX && defined(__arm__)
+#define SANITIZER_INTERCEPT_AEABI_MEM 1
+#else
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+#endif
#define SANITIZER_INTERCEPT___BZERO SI_MAC
#define SANITIZER_INTERCEPT_FTIME !SI_FREEBSD && SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID
@@ -247,8 +282,12 @@
#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
+
+#ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
+#endif
+
#define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
@@ -258,7 +297,17 @@
#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MINCORE SI_LINUX
#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
+#define SANITIZER_INTERCEPT_CTERMID SI_LINUX || SI_MAC || SI_FREEBSD
+#define SANITIZER_INTERCEPT_CTERMID_R SI_MAC || SI_FREEBSD
#define SANITIZER_INTERCEPTOR_HOOKS SI_LINUX
+#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_SEND_SENDTO SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
+#define SANITIZER_INTERCEPT_STAT (SI_FREEBSD || SI_MAC || SI_ANDROID)
+#define SANITIZER_INTERCEPT___XSTAT !SANITIZER_INTERCEPT_STAT && SI_NOT_WINDOWS
+#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
+#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
index a1f04325033..edc67301935 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cc
@@ -26,7 +26,7 @@
// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
// are not defined anywhere in userspace headers. Fake them. This seems to work
// fine with newer headers, too.
-#include <asm/posix_types.h>
+#include <linux/posix_types.h>
#if defined(__x86_64__) || defined(__mips__)
#include <sys/stat.h>
#else
@@ -36,7 +36,6 @@
#define uid_t __kernel_uid_t
#define gid_t __kernel_gid_t
#define off_t __kernel_off_t
-#define time_t __kernel_time_t
// This header seems to contain the definitions of _kernel_ stat* structs.
#include <asm/stat.h>
#undef ino_t
@@ -54,6 +53,8 @@
#include <linux/perf_event.h>
#endif
+using namespace __sanitizer;
+
namespace __sanitizer {
#if !SANITIZER_ANDROID
unsigned struct_statfs64_sz = sizeof(struct statfs64);
@@ -61,7 +62,7 @@ namespace __sanitizer {
} // namespace __sanitizer
#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
- && !defined(__mips__) && !defined(__sparc__)
+ && !defined(__mips__) && !defined(__s390__)
COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
index 9866cc9e17a..7bf76c4e715 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cc
@@ -121,6 +121,10 @@
# include <asm/ptrace.h>
# ifdef __arm__
typedef struct user_fpregs elf_fpregset_t;
+# define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/)
+# if !defined(ARM_VFPREGS_SIZE)
+# define ARM_VFPREGS_SIZE ARM_VFPREGS_SIZE_ASAN
+# endif
# endif
# endif
# include <semaphore.h>
@@ -305,23 +309,28 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__))
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
#elif defined(__aarch64__)
unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
+#elif defined(__s390__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct);
#else
unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
#endif // __mips64 || __powerpc64__ || __aarch64__
#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
- defined(__aarch64__) || defined(__arm__)
+ defined(__aarch64__) || defined(__arm__) || defined(__s390__)
unsigned struct_user_fpxregs_struct_sz = 0;
#else
unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__
+// || __s390__
#ifdef __arm__
unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;
#else
@@ -926,6 +935,8 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
const int si_SEGV_ACCERR = SEGV_ACCERR;
} // namespace __sanitizer
+using namespace __sanitizer;
+
COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
@@ -1049,8 +1060,15 @@ COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
// Can't write checks for sa_handler and sa_sigaction due to them being
// preprocessor macros.
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+#if !defined(__s390x__) || __GLIBC_PREREQ (2, 20)
+// On s390x glibc 2.19 and earlier sa_flags was unsigned long, and sa_resv
+// didn't exist.
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
-#if SANITIZER_LINUX
+#endif
+#if SANITIZER_LINUX && (!SANITIZER_ANDROID || !SANITIZER_MIPS32)
CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
#endif
@@ -1121,9 +1139,6 @@ CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
-#ifndef __GLIBC_PREREQ
-#define __GLIBC_PREREQ(x, y) 0
-#endif
#if !defined(__aarch64__) || !SANITIZER_LINUX || __GLIBC_PREREQ (2, 21)
/* On aarch64 glibc 2.20 and earlier provided incorrect mode field. */
CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
@@ -1262,4 +1277,8 @@ CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close);
CHECK_TYPE_SIZE(sem_t);
#endif
+#if SANITIZER_LINUX && defined(__arm__)
+COMPILER_CHECK(ARM_VFPREGS_SIZE == ARM_VFPREGS_SIZE_ASAN);
+#endif
+
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
index b6f90eb3a74..17906d30336 100644
--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h
@@ -18,7 +18,7 @@
#if SANITIZER_FREEBSD
// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that
-// incroporates the map structure.
+// incorporates the map structure.
# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 544)))
#else
@@ -75,20 +75,16 @@ namespace __sanitizer {
const unsigned struct_kernel_stat_sz = 144;
const unsigned struct_kernel_stat64_sz = 104;
#elif defined(__mips__)
- #if SANITIZER_WORDSIZE == 64
- const unsigned struct_kernel_stat_sz = 216;
- #else
- const unsigned struct_kernel_stat_sz = 144;
- #endif
+ const unsigned struct_kernel_stat_sz =
+ SANITIZER_ANDROID ? FIRST_32_SECOND_64(104, 128) :
+ FIRST_32_SECOND_64(144, 216);
const unsigned struct_kernel_stat64_sz = 104;
-#elif defined(__sparc__) && defined(__arch64__)
- const unsigned struct___old_kernel_stat_sz = 0;
- const unsigned struct_kernel_stat_sz = 104;
- const unsigned struct_kernel_stat64_sz = 144;
-#elif defined(__sparc__) && !defined(__arch64__)
- const unsigned struct___old_kernel_stat_sz = 0;
+#elif defined(__s390__) && !defined(__s390x__)
const unsigned struct_kernel_stat_sz = 64;
const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__s390x__)
+ const unsigned struct_kernel_stat_sz = 144;
+ const unsigned struct_kernel_stat64_sz = 0;
#endif
struct __sanitizer_perf_event_attr {
unsigned type;
@@ -109,9 +105,9 @@ namespace __sanitizer {
#if SANITIZER_LINUX || SANITIZER_FREEBSD
-#if defined(__powerpc64__)
+#if defined(__powerpc64__) || defined(__s390__)
const unsigned struct___old_kernel_stat_sz = 0;
-#elif !defined(__sparc__)
+#else
const unsigned struct___old_kernel_stat_sz = 32;
#endif
@@ -196,24 +192,12 @@ namespace __sanitizer {
unsigned __seq;
u64 __unused1;
u64 __unused2;
-#elif defined(__mips__) || defined(__aarch64__)
+#elif defined(__mips__) || defined(__aarch64__) || defined(__s390x__)
unsigned int mode;
unsigned short __seq;
unsigned short __pad1;
unsigned long __unused1;
unsigned long __unused2;
-#elif defined(__sparc__)
-# if defined(__arch64__)
- unsigned mode;
- unsigned short __pad1;
-# else
- unsigned short __pad1;
- unsigned short mode;
- unsigned short __pad2;
-# endif
- unsigned short __seq;
- unsigned long long __unused1;
- unsigned long long __unused2;
#else
unsigned short mode;
unsigned short __pad1;
@@ -231,26 +215,6 @@ namespace __sanitizer {
struct __sanitizer_shmid_ds {
__sanitizer_ipc_perm shm_perm;
- #if defined(__sparc__)
- # if !defined(__arch64__)
- u32 __pad1;
- # endif
- long shm_atime;
- # if !defined(__arch64__)
- u32 __pad2;
- # endif
- long shm_dtime;
- # if !defined(__arch64__)
- u32 __pad3;
- # endif
- long shm_ctime;
- uptr shm_segsz;
- int shm_cpid;
- int shm_lpid;
- unsigned long shm_nattch;
- unsigned long __glibc_reserved1;
- unsigned long __glibc_reserved2;
- #else
#ifndef __powerpc__
uptr shm_segsz;
#elif !defined(__powerpc64__)
@@ -288,7 +252,6 @@ namespace __sanitizer {
uptr __unused4;
uptr __unused5;
#endif
-#endif
};
#elif SANITIZER_FREEBSD
struct __sanitizer_ipc_perm {
@@ -555,7 +518,11 @@ namespace __sanitizer {
};
#if SANITIZER_ANDROID
+# if SANITIZER_MIPS
+ typedef unsigned long __sanitizer_sigset_t[16/sizeof(unsigned long)];
+# else
typedef unsigned long __sanitizer_sigset_t;
+# endif
#elif SANITIZER_MAC
typedef unsigned __sanitizer_sigset_t;
#elif SANITIZER_LINUX
@@ -581,6 +548,15 @@ namespace __sanitizer {
__sanitizer_sigset_t sa_mask;
void (*sa_restorer)();
};
+#elif SANITIZER_ANDROID && SANITIZER_MIPS32 // check this before WORDSIZE == 32
+ struct __sanitizer_sigaction {
+ unsigned sa_flags;
+ union {
+ void (*sigaction)(int sig, void *siginfo, void *uctx);
+ void (*handler)(int sig);
+ };
+ __sanitizer_sigset_t sa_mask;
+ };
#elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)
struct __sanitizer_sigaction {
union {
@@ -604,21 +580,24 @@ namespace __sanitizer {
int sa_flags;
__sanitizer_sigset_t sa_mask;
#else
+#if defined(__s390x__)
+ int sa_resv;
+#else
__sanitizer_sigset_t sa_mask;
+#endif
#ifndef __mips__
-#if defined(__sparc__)
- unsigned long sa_flags;
-#else
int sa_flags;
#endif
#endif
-#endif
#if SANITIZER_LINUX
void (*sa_restorer)();
#endif
#if defined(__mips__) && (SANITIZER_WORDSIZE == 32)
int sa_resv[1];
#endif
+#if defined(__s390x__)
+ __sanitizer_sigset_t sa_mask;
+#endif
};
#endif // !SANITIZER_ANDROID
@@ -626,7 +605,7 @@ namespace __sanitizer {
typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
#elif defined(__mips__)
struct __sanitizer_kernel_sigset_t {
- u8 sig[16];
+ uptr sig[2];
};
#else
struct __sanitizer_kernel_sigset_t {
@@ -635,6 +614,17 @@ namespace __sanitizer {
#endif
// Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
+#if SANITIZER_MIPS
+ struct __sanitizer_kernel_sigaction_t {
+ unsigned int sa_flags;
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
+ };
+ __sanitizer_kernel_sigset_t sa_mask;
+ void (*sa_restorer)(void);
+ };
+#else
struct __sanitizer_kernel_sigaction_t {
union {
void (*handler)(int signo);
@@ -644,6 +634,7 @@ namespace __sanitizer {
void (*sa_restorer)(void);
__sanitizer_kernel_sigset_t sa_mask;
};
+#endif
extern uptr sig_ign;
extern uptr sig_dfl;
@@ -779,7 +770,8 @@ namespace __sanitizer {
#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
(defined(__i386) || defined(__x86_64) || defined(__mips64) || \
- defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__))
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
extern unsigned struct_user_regs_struct_sz;
extern unsigned struct_user_fpregs_struct_sz;
extern unsigned struct_user_fpxregs_struct_sz;
@@ -857,7 +849,7 @@ struct __sanitizer_cookie_io_functions_t {
#define IOC_NRBITS 8
#define IOC_TYPEBITS 8
-#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || defined(__sparc__)
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
#define IOC_SIZEBITS 13
#define IOC_DIRBITS 3
#define IOC_NONE 1U
@@ -887,17 +879,7 @@ struct __sanitizer_cookie_io_functions_t {
#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
-
-#if defined(__sparc__)
-// In sparc the 14 bits SIZE field overlaps with the
-// least significant bit of DIR, so either IOC_READ or
-// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
-# define IOC_SIZE(nr) \
- ((((((nr) >> 29) & 0x7) & (4U|2U)) == 0)? \
- 0 : (((nr) >> 16) & 0x3fff))
-#else
#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
-#endif
extern unsigned struct_ifreq_sz;
extern unsigned struct_termios_sz;
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.cc b/libsanitizer/sanitizer_common/sanitizer_posix.cc
index ed44633bc18..d10213d917f 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix.cc
@@ -87,7 +87,11 @@ static uptr GetKernelAreaSize() {
uptr GetMaxVirtualAddress() {
#if SANITIZER_WORDSIZE == 64
-# if defined(__powerpc64__) || defined(__aarch64__)
+# if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM
+ // Ideally, we would derive the upper bound from MACH_VM_MAX_ADDRESS. The
+ // upper bound can change depending on the device.
+ return 0x200000000 - 1;
+# elif defined(__powerpc64__) || defined(__aarch64__)
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
// We somehow need to figure out which one we are using now and choose
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
@@ -98,26 +102,32 @@ uptr GetMaxVirtualAddress() {
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
# elif defined(__mips64)
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
+# elif defined(__s390x__)
+ return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
# else
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
# endif
#else // SANITIZER_WORDSIZE == 32
+# if defined(__s390__)
+ return (1ULL << 31) - 1; // 0x7fffffff;
+# else
uptr res = (1ULL << 32) - 1; // 0xffffffff;
if (!common_flags()->full_address_space)
res -= GetKernelAreaSize();
CHECK_LT(reinterpret_cast<uptr>(&res), res);
return res;
+# endif
#endif // SANITIZER_WORDSIZE
}
-void *MmapOrDie(uptr size, const char *mem_type) {
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
size = RoundUpTo(size, GetPageSizeCached());
uptr res = internal_mmap(nullptr, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
int reserrno;
if (internal_iserror(res, &reserrno))
- ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
IncreaseTotalMmap(size);
return (void *)res;
}
@@ -133,6 +143,26 @@ void UnmapOrDie(void *addr, uptr size) {
DecreaseTotalMmap(size);
}
+// We want to map a chunk of address space aligned to 'alignment'.
+// We do it by maping a bit more and then unmaping redundant pieces.
+// We probably can do it with fewer syscalls in some OS-dependent way.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = size + alignment;
+ uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
+ uptr map_end = map_res + map_size;
+ uptr res = map_res;
+ if (res & (alignment - 1)) // Not aligned.
+ res = (map_res + alignment) & ~(alignment - 1);
+ uptr end = res + size;
+ if (res != map_res)
+ UnmapOrDie((void*)map_res, res - map_res);
+ if (end != map_end)
+ UnmapOrDie((void*)end, map_end - end);
+ return (void*)res;
+}
+
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
uptr PageSize = GetPageSizeCached();
uptr p = internal_mmap(nullptr,
@@ -169,6 +199,10 @@ bool MprotectNoAccess(uptr addr, uptr size) {
return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
}
+bool MprotectReadOnly(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void *)addr, size, PROT_READ);
+}
+
fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
int flags;
switch (mode) {
@@ -313,26 +347,13 @@ bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
}
SignalContext SignalContext::Create(void *siginfo, void *context) {
- uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
+ auto si = (siginfo_t *)siginfo;
+ uptr addr = (uptr)si->si_addr;
uptr pc, sp, bp;
GetPcSpBp(context, &pc, &sp, &bp);
- return SignalContext(context, addr, pc, sp, bp);
-}
-
-// This function check is the built VMA matches the runtime one for
-// architectures with multiple VMA size.
-void CheckVMASize() {
-#ifdef __aarch64__
- static const unsigned kBuiltVMA = SANITIZER_AARCH64_VMA;
- unsigned maxRuntimeVMA =
- (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
- if (kBuiltVMA != maxRuntimeVMA) {
- Printf("WARNING: %s runtime VMA is not the one built for.\n",
- SanitizerToolName);
- Printf("\tBuilt VMA: %u bits\n", kBuiltVMA);
- Printf("\tRuntime VMA: %u bits\n", maxRuntimeVMA);
- }
-#endif
+ WriteFlag write_flag = GetWriteFlag(context);
+ bool is_memory_access = si->si_signo == SIGSEGV;
+ return SignalContext(context, addr, pc, sp, bp, is_memory_access, write_flag);
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix.h b/libsanitizer/sanitizer_common/sanitizer_posix.h
index 8dd259e32ba..68b34babdeb 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix.h
+++ b/libsanitizer/sanitizer_common/sanitizer_posix.h
@@ -14,6 +14,7 @@
// ----------- ATTENTION -------------
// This header should NOT include any other headers from sanitizer runtime.
#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
#if !SANITIZER_POSIX
// Make it hard to accidentally use any of functions declared in this file:
@@ -52,6 +53,7 @@ uptr internal_ptrace(int request, int pid, void *addr, void *data);
uptr internal_waitpid(int pid, int *status, int options);
int internal_fork();
+int internal_forkpty(int *amaster);
// These functions call appropriate pthread_ functions directly, bypassing
// the interceptor. They are weak and may not be present in some tools.
@@ -74,8 +76,15 @@ int real_pthread_join(void *th, void **ret);
int my_pthread_attr_getstack(void *attr, void **addr, uptr *size);
+// A routine named real_sigaction() must be implemented by each sanitizer in
+// order for internal_sigaction() to bypass interceptors.
int internal_sigaction(int signum, const void *act, void *oldact);
+void internal_sigfillset(__sanitizer_sigset_t *set);
+void internal_sigemptyset(__sanitizer_sigset_t *set);
+bool internal_sigismember(__sanitizer_sigset_t *set, int signum);
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]);
} // namespace __sanitizer
#endif // SANITIZER_POSIX_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
index 4b7273b4cc0..335aad1660e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_posix_libcdep.cc
@@ -32,6 +32,7 @@
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
+#include <sys/wait.h>
#include <unistd.h>
#if SANITIZER_FREEBSD
@@ -41,6 +42,8 @@
#define MAP_NORESERVE 0
#endif
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+
namespace __sanitizer {
u32 GetUid() {
@@ -51,7 +54,7 @@ uptr GetThreadSelf() {
return (uptr)pthread_self();
}
-void FlushUnneededShadowMemory(uptr addr, uptr size) {
+void ReleaseMemoryToOS(uptr addr, uptr size) {
madvise((void*)addr, size, MADV_DONTNEED);
}
@@ -95,6 +98,10 @@ bool StackSizeIsUnlimited() {
return (stack_size == RLIM_INFINITY);
}
+uptr GetStackSizeLimitInBytes() {
+ return (uptr)getlim(RLIMIT_STACK);
+}
+
void SetStackSizeLimitInBytes(uptr limit) {
setlim(RLIMIT_STACK, (rlim_t)limit);
CHECK(!StackSizeIsUnlimited());
@@ -119,11 +126,21 @@ void SleepForMillis(int millis) {
}
void Abort() {
+#if !SANITIZER_GO
+ // If we are handling SIGABRT, unhandle it first.
+ if (IsHandledDeadlySignal(SIGABRT)) {
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
+ internal_sigaction(SIGABRT, &sigact, nullptr);
+ }
+#endif
+
abort();
}
int Atexit(void (*function)(void)) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
return atexit(function);
#else
return 0;
@@ -134,7 +151,7 @@ bool SupportsColoredOutput(fd_t fd) {
return isatty(fd) != 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// TODO(glider): different tools may require different altstack size.
static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
@@ -163,10 +180,9 @@ void UnsetAlternateSignalStack() {
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
}
-typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
static void MaybeInstallSigaction(int signum,
SignalHandlerType handler) {
- if (!IsDeadlySignal(signum))
+ if (!IsHandledDeadlySignal(signum))
return;
struct sigaction sigact;
internal_memset(&sigact, 0, sizeof(sigact));
@@ -188,6 +204,7 @@ void InstallDeadlySignalHandlers(SignalHandlerType handler) {
MaybeInstallSigaction(SIGBUS, handler);
MaybeInstallSigaction(SIGABRT, handler);
MaybeInstallSigaction(SIGFPE, handler);
+ MaybeInstallSigaction(SIGILL, handler);
}
#endif // SANITIZER_GO
@@ -266,7 +283,7 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
return (void *)p;
}
-void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) {
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
int fd = name ? GetNamedMappingFd(name, size) : -1;
unsigned flags = MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE;
if (fd == -1) flags |= MAP_ANON;
@@ -275,6 +292,11 @@ void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) {
0);
}
+void *MmapNoAccess(uptr size) {
+ unsigned flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
+ return (void *)internal_mmap(nullptr, size, PROT_NONE, flags, -1, 0);
+}
+
// This function is defined elsewhere if we intercepted pthread_attr_getstack.
extern "C" {
SANITIZER_WEAK_ATTRIBUTE int
@@ -317,6 +339,79 @@ void AdjustStackSize(void *attr_) {
}
#endif // !SANITIZER_GO
+pid_t StartSubprocess(const char *program, const char *const argv[],
+ fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
+ auto file_closer = at_scope_exit([&] {
+ if (stdin_fd != kInvalidFd) {
+ internal_close(stdin_fd);
+ }
+ if (stdout_fd != kInvalidFd) {
+ internal_close(stdout_fd);
+ }
+ if (stderr_fd != kInvalidFd) {
+ internal_close(stderr_fd);
+ }
+ });
+
+ int pid = internal_fork();
+
+ if (pid < 0) {
+ int rverrno;
+ if (internal_iserror(pid, &rverrno)) {
+ Report("WARNING: failed to fork (errno %d)\n", rverrno);
+ }
+ return pid;
+ }
+
+ if (pid == 0) {
+ // Child subprocess
+ if (stdin_fd != kInvalidFd) {
+ internal_close(STDIN_FILENO);
+ internal_dup2(stdin_fd, STDIN_FILENO);
+ internal_close(stdin_fd);
+ }
+ if (stdout_fd != kInvalidFd) {
+ internal_close(STDOUT_FILENO);
+ internal_dup2(stdout_fd, STDOUT_FILENO);
+ internal_close(stdout_fd);
+ }
+ if (stderr_fd != kInvalidFd) {
+ internal_close(STDERR_FILENO);
+ internal_dup2(stderr_fd, STDERR_FILENO);
+ internal_close(stderr_fd);
+ }
+
+ for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--) internal_close(fd);
+
+ execv(program, const_cast<char **>(&argv[0]));
+ internal__exit(1);
+ }
+
+ return pid;
+}
+
+bool IsProcessRunning(pid_t pid) {
+ int process_status;
+ uptr waitpid_status = internal_waitpid(pid, &process_status, WNOHANG);
+ int local_errno;
+ if (internal_iserror(waitpid_status, &local_errno)) {
+ VReport(1, "Waiting on the process failed (errno %d).\n", local_errno);
+ return false;
+ }
+ return waitpid_status == 0;
+}
+
+int WaitForProcess(pid_t pid) {
+ int process_status;
+ uptr waitpid_status = internal_waitpid(pid, &process_status, 0);
+ int local_errno;
+ if (internal_iserror(waitpid_status, &local_errno)) {
+ VReport(1, "Waiting on the process failed (errno %d).\n", local_errno);
+ return -1;
+ }
+ return process_status;
+}
+
} // namespace __sanitizer
#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_printf.cc b/libsanitizer/sanitizer_common/sanitizer_printf.cc
index 6688610bf0f..c11113da244 100644
--- a/libsanitizer/sanitizer_common/sanitizer_printf.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_printf.cc
@@ -211,7 +211,7 @@ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
void OnPrint(const char *str) {
(void)str;
}
-#elif defined(SANITIZER_GO) && defined(TSAN_EXTERNAL_HOOKS)
+#elif SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
void OnPrint(const char *str);
#else
void OnPrint(const char *str) {
@@ -276,9 +276,12 @@ static void SharedPrintfCode(bool append_pid, const char *format,
# undef CHECK_NEEDED_LENGTH
}
RawWrite(buffer);
- if (common_flags()->log_to_syslog)
- WriteToSyslog(buffer);
+
+ // Remove color sequences from the message.
+ RemoveANSIEscapeSequencesFromString(buffer);
CallPrintfAndReportCallback(buffer);
+ LogMessageOnPrintf(buffer);
+
// If we had mapped any memory, clean up.
if (buffer != local_buffer)
UnmapOrDie((void *)buffer, buffer_size);
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps.h b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
index 7477abf30b6..0183a094111 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps.h
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps.h
@@ -41,9 +41,8 @@ class MemoryMappingLayout {
// instead of aborting.
static void CacheMemoryMappings();
- // Stores the list of mapped objects into an array.
- uptr DumpListOfModules(LoadedModule *modules, uptr max_modules,
- string_predicate_t filter);
+ // Adds all mapped objects into a vector.
+ void DumpListOfModules(InternalMmapVector<LoadedModule> *modules);
// Memory protection masks.
static const uptr kProtectionRead = 1;
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc
index 1cf3f8510fb..c725c2e7b66 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_common.cc
@@ -114,22 +114,17 @@ void MemoryMappingLayout::LoadFromCache() {
}
}
-uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
- uptr max_modules,
- string_predicate_t filter) {
+void MemoryMappingLayout::DumpListOfModules(
+ InternalMmapVector<LoadedModule> *modules) {
Reset();
uptr cur_beg, cur_end, cur_offset, prot;
InternalScopedString module_name(kMaxPathLength);
- uptr n_modules = 0;
- for (uptr i = 0; n_modules < max_modules &&
- Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
- module_name.size(), &prot);
+ for (uptr i = 0; Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
+ module_name.size(), &prot);
i++) {
const char *cur_name = module_name.data();
if (cur_name[0] == '\0')
continue;
- if (filter && !filter(cur_name))
- continue;
// Don't subtract 'cur_beg' from the first entry:
// * If a binary is compiled w/o -pie, then the first entry in
// process maps is likely the binary itself (all dynamic libs
@@ -142,12 +137,11 @@ uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
// shadow memory of the tool), so the module can't be the
// first entry.
uptr base_address = (i ? cur_beg : 0) - cur_offset;
- LoadedModule *cur_module = &modules[n_modules];
- cur_module->set(cur_name, base_address);
- cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
- n_modules++;
+ LoadedModule cur_module;
+ cur_module.set(cur_name, base_address);
+ cur_module.addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
+ modules->push_back(cur_module);
}
- return n_modules;
}
void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
index e77e33fb2d7..10918e54398 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_linux.cc
@@ -65,7 +65,7 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
while (IsDecimal(*current_))
current_++;
// Qemu may lack the trailing space.
- // http://code.google.com/p/address-sanitizer/issues/detail?id=160
+ // https://github.com/google/sanitizers/issues/160
// CHECK_EQ(*current_++, ' ');
// Skip spaces.
while (current_ < next_line && *current_ == ' ')
diff --git a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
index 393243b7c28..81829a7c284 100644
--- a/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_procmaps_mac.cc
@@ -63,7 +63,7 @@ void MemoryMappingLayout::LoadFromCache() {
}
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
-// Google Perftools, http://code.google.com/p/google-perftools.
+// Google Perftools, https://github.com/gperftools/gperftools.
// NextSegmentLoad scans the current image for the next segment load command
// and returns the start and end addresses and file offset of the corresponding
@@ -153,34 +153,28 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
return false;
}
-uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules,
- uptr max_modules,
- string_predicate_t filter) {
+void MemoryMappingLayout::DumpListOfModules(
+ InternalMmapVector<LoadedModule> *modules) {
Reset();
uptr cur_beg, cur_end, prot;
InternalScopedString module_name(kMaxPathLength);
- uptr n_modules = 0;
- for (uptr i = 0; n_modules < max_modules &&
- Next(&cur_beg, &cur_end, 0, module_name.data(),
- module_name.size(), &prot);
+ for (uptr i = 0; Next(&cur_beg, &cur_end, 0, module_name.data(),
+ module_name.size(), &prot);
i++) {
const char *cur_name = module_name.data();
if (cur_name[0] == '\0')
continue;
- if (filter && !filter(cur_name))
- continue;
LoadedModule *cur_module = nullptr;
- if (n_modules > 0 &&
- 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) {
- cur_module = &modules[n_modules - 1];
+ if (!modules->empty() &&
+ 0 == internal_strcmp(cur_name, modules->back().full_name())) {
+ cur_module = &modules->back();
} else {
- cur_module = &modules[n_modules];
+ modules->push_back(LoadedModule());
+ cur_module = &modules->back();
cur_module->set(cur_name, cur_beg);
- n_modules++;
}
cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute);
}
- return n_modules;
}
} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_quarantine.h b/libsanitizer/sanitizer_common/sanitizer_quarantine.h
index a635871be45..9e9268f2a5d 100644
--- a/libsanitizer/sanitizer_common/sanitizer_quarantine.h
+++ b/libsanitizer/sanitizer_common/sanitizer_quarantine.h
@@ -99,10 +99,12 @@ class Quarantine {
void NOINLINE DoRecycle(Cache *c, Callback cb) {
while (QuarantineBatch *b = c->DequeueBatch()) {
const uptr kPrefetch = 16;
+ CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
for (uptr i = 0; i < kPrefetch; i++)
PREFETCH(b->batch[i]);
- for (uptr i = 0; i < b->count; i++) {
- PREFETCH(b->batch[i + kPrefetch]);
+ for (uptr i = 0, count = b->count; i < count; i++) {
+ if (i + kPrefetch < count)
+ PREFETCH(b->batch[i + kPrefetch]);
cb.Recycle((Node*)b->batch[i]);
}
cb.Deallocate(b);
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
index 796d472a1eb..531f256c048 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
@@ -38,11 +38,6 @@ void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
top_frame_bp = 0;
}
-// Check if given pointer points into allocated stack area.
-static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
- return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
-}
-
// In GCC on ARM bp points to saved lr, not fp, so we should check the next
// cell in stack to be a saved frame pointer. GetCanonicFrame returns the
// pointer to saved frame pointer in any case.
@@ -60,8 +55,8 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
// Nope, this does not look right either. This means the frame after next does
// not have a valid frame pointer, but we can still extract the caller PC.
// Unfortunately, there is no way to decide between GCC and LLVM frame
- // layouts. Assume GCC.
- return bp_prev - 1;
+ // layouts. Assume LLVM.
+ return bp_prev;
#else
return (uhwptr*)bp;
#endif
@@ -69,6 +64,7 @@ static inline uhwptr *GetCanonicFrame(uptr bp,
void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top,
uptr stack_bottom, u32 max_depth) {
+ const uptr kPageSize = GetPageSizeCached();
CHECK_GE(max_depth, 2);
trace_buffer[0] = pc;
size = 1;
@@ -90,9 +86,16 @@ void BufferedStackTrace::FastUnwindStack(uptr pc, uptr bp, uptr stack_top,
!IsAligned((uptr)caller_frame, sizeof(uhwptr)))
break;
uhwptr pc1 = caller_frame[2];
+#elif defined(__s390__)
+ uhwptr pc1 = frame[14];
#else
uhwptr pc1 = frame[1];
#endif
+ // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
+ // x86_64) is invalid and stop unwinding here. If we're adding support for
+ // a platform where this isn't true, we need to reconsider this check.
+ if (pc1 < kPageSize)
+ break;
if (pc1 != pc) {
trace_buffer[size++] = (uptr) pc1;
}
@@ -116,7 +119,7 @@ void BufferedStackTrace::PopStackFrames(uptr count) {
uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
// Use threshold to find PC in stack trace, as PC we want to unwind from may
// slightly differ from return address in the actual unwinded stack trace.
- const int kPcThreshold = 304;
+ const int kPcThreshold = 350;
for (uptr i = 0; i < size; ++i) {
if (MatchPc(pc, trace[i], kPcThreshold))
return i;
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
index 7f22455ac9e..c59dbd55883 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h
@@ -27,7 +27,7 @@ static const u32 kStackTraceMax = 256;
// Fast unwind is the only option on Mac for now; we will need to
// revisit this macro when slow unwind works on Mac, see
-// https://code.google.com/p/address-sanitizer/issues/detail?id=137
+// https://github.com/google/sanitizers/issues/137
#if SANITIZER_MAC
# define SANITIZER_CAN_SLOW_UNWIND 0
#else
@@ -108,6 +108,11 @@ struct BufferedStackTrace : public StackTrace {
void operator=(const BufferedStackTrace &);
};
+// Check if given pointer points into allocated stack area.
+static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
+ return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
+}
+
} // namespace __sanitizer
// Use this macro if you want to print stack trace with the caller
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
index addf44f7327..ac3ee3a019c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_libcdep.cc
@@ -23,6 +23,8 @@ void StackTrace::Print() const {
return;
}
InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ InternalScopedString dedup_token(GetPageSizeCached());
+ int dedup_frames = common_flags()->dedup_token_length;
uptr frame_num = 0;
for (uptr i = 0; i < size && trace[i]; i++) {
// PCs in stack traces are actually the return addresses, that is,
@@ -36,11 +38,18 @@ void StackTrace::Print() const {
cur->info, common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
Printf("%s\n", frame_desc.data());
+ if (dedup_frames-- > 0) {
+ if (dedup_token.length())
+ dedup_token.append("--");
+ dedup_token.append(cur->info.function);
+ }
}
frames->ClearAll();
}
// Always print a trailing empty line after stack trace.
Printf("\n");
+ if (dedup_token.length())
+ Printf("DEDUP_TOKEN: %s\n", dedup_token.data());
}
void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
@@ -72,3 +81,38 @@ void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
}
} // namespace __sanitizer
+using namespace __sanitizer;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
+ uptr out_buf_size) {
+ if (!out_buf_size) return;
+ pc = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ if (!frame) {
+ internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+ return;
+ }
+ InternalScopedString frame_desc(GetPageSizeCached());
+ RenderFrame(&frame_desc, fmt, 0, frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ internal_strncpy(out_buf, frame_desc.data(), out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
+ char *out_buf, uptr out_buf_size) {
+ if (!out_buf_size) return;
+ out_buf[0] = 0;
+ DataInfo DI;
+ if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
+ InternalScopedString data_desc(GetPageSizeCached());
+ RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
+ internal_strncpy(out_buf, data_desc.data(), out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+}
+} // extern "C"
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc
index bcc9de78ec1..de78c7ac8d1 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.cc
@@ -114,6 +114,35 @@ void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
}
}
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix) {
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%') {
+ buffer->append("%c", *p);
+ continue;
+ }
+ p++;
+ switch (*p) {
+ case '%':
+ buffer->append("%%");
+ break;
+ case 's':
+ buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
+ break;
+ case 'l':
+ buffer->append("%d", DI->line);
+ break;
+ case 'g':
+ buffer->append("%s", DI->name);
+ break;
+ default:
+ Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
+ *p);
+ Die();
+ }
+ }
+}
+
void RenderSourceLocation(InternalScopedString *buffer, const char *file,
int line, int column, bool vs_style,
const char *strip_path_prefix) {
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h
index a553568ea6f..6726f141f22 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace_printer.h
@@ -57,6 +57,13 @@ void RenderSourceLocation(InternalScopedString *buffer, const char *file,
void RenderModuleLocation(InternalScopedString *buffer, const char *module,
uptr offset, const char *strip_path_prefix);
+// Same as RenderFrame, but for data section (global variables).
+// Accepts %s, %l from above.
+// Also accepts:
+// %g - name of the global variable.
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix = "");
+
} // namespace __sanitizer
#endif // SANITIZER_STACKTRACE_PRINTER_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
index c919e4f6e97..891386dc0ba 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
@@ -13,7 +13,8 @@
#include "sanitizer_platform.h"
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
- defined(__aarch64__))
+ defined(__aarch64__) || defined(__powerpc64__) || \
+ defined(__s390__))
#include "sanitizer_stoptheworld.h"
@@ -36,6 +37,9 @@
# include <asm/ptrace.h>
# endif
# include <sys/user.h> // for user_regs_struct
+# if SANITIZER_ANDROID && SANITIZER_MIPS
+# include <asm/reg.h> // for mips SP register in sys/user.h
+# endif
#endif
#include <sys/wait.h> // for signal-related stuff
@@ -73,10 +77,10 @@
// thread-local variables used by libc will be shared between the tracer task
// and the thread which spawned it.
-COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
-
namespace __sanitizer {
+COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
+
// Structure for passing arguments into the tracer thread.
struct TracerThreadArgument {
StopTheWorldCallback callback;
@@ -184,6 +188,7 @@ void ThreadSuspender::KillAllThreads() {
bool ThreadSuspender::SuspendAllThreads() {
ThreadLister thread_lister(pid_);
bool added_threads;
+ bool first_iteration = true;
do {
// Run through the directory entries once.
added_threads = false;
@@ -193,12 +198,13 @@ bool ThreadSuspender::SuspendAllThreads() {
added_threads = true;
tid = thread_lister.GetNextTID();
}
- if (thread_lister.error()) {
+ if (thread_lister.error() || (first_iteration && !added_threads)) {
// Detach threads and fail.
ResumeAllThreads();
return false;
}
thread_lister.Reset();
+ first_iteration = false;
} while (added_threads);
return true;
}
@@ -227,8 +233,8 @@ static void TracerThreadDieCallback() {
// Signal handler to wake up suspended threads when the tracer thread dies.
static void TracerThreadSignalHandler(int signum, void *siginfo, void *uctx) {
SignalContext ctx = SignalContext::Create(siginfo, uctx);
- VPrintf(1, "Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n",
- signum, ctx.addr, ctx.pc, ctx.sp);
+ Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
+ ctx.addr, ctx.pc, ctx.sp);
ThreadSuspender *inst = thread_suspender_instance;
if (inst) {
if (signum == SIGABRT)
@@ -465,13 +471,22 @@ typedef pt_regs regs_struct;
#elif defined(__mips__)
typedef struct user regs_struct;
-#define REG_SP regs[EF_REG29]
+# if SANITIZER_ANDROID
+# define REG_SP regs[EF_R29]
+# else
+# define REG_SP regs[EF_REG29]
+# endif
#elif defined(__aarch64__)
typedef struct user_pt_regs regs_struct;
#define REG_SP sp
#define ARCH_IOVEC_FOR_GETREGSET
+#elif defined(__s390__)
+typedef _user_regs_struct regs_struct;
+#define REG_SP gprs[15]
+#define ARCH_IOVEC_FOR_GETREGSET
+
#else
#error "Unsupported architecture"
#endif // SANITIZER_ANDROID && defined(__arm__)
@@ -509,5 +524,6 @@ uptr SuspendedThreadsList::RegisterCount() {
}
} // namespace __sanitizer
-#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
- // || defined(__aarch64__)
+#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
+ // || defined(__aarch64__) || defined(__powerpc64__)
+ // || defined(__s390__)
diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
index 2b6a78155fd..bfdff59a35c 100644
--- a/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.cc
@@ -125,7 +125,7 @@ void SuppressionContext::Parse(const char *str) {
Printf("%s: failed to parse suppressions\n", SanitizerToolName);
Die();
}
- Suppression s = {};
+ Suppression s;
s.type = suppression_types_[type];
s.templ = (char*)InternalAlloc(end2 - line + 1);
internal_memcpy(s.templ, line, end2 - line);
diff --git a/libsanitizer/sanitizer_common/sanitizer_suppressions.h b/libsanitizer/sanitizer_common/sanitizer_suppressions.h
index efec926476b..ed6d7baae84 100644
--- a/libsanitizer/sanitizer_common/sanitizer_suppressions.h
+++ b/libsanitizer/sanitizer_common/sanitizer_suppressions.h
@@ -18,6 +18,7 @@
namespace __sanitizer {
struct Suppression {
+ Suppression() { internal_memset(this, 0, sizeof(*this)); }
const char *type;
char *templ;
atomic_uint32_t hit_count;
@@ -40,7 +41,7 @@ class SuppressionContext {
void GetMatched(InternalMmapVector<Suppression *> *matched);
private:
- static const int kMaxSuppressionTypes = 16;
+ static const int kMaxSuppressionTypes = 32;
const char **const suppression_types_;
const int suppression_types_num_;
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
index 0e58d5fed6e..3557415aeab 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.cc
@@ -58,6 +58,7 @@ DataInfo::DataInfo() {
void DataInfo::Clear() {
InternalFree(module);
+ InternalFree(file);
InternalFree(name);
internal_memset(this, 0, sizeof(DataInfo));
}
@@ -94,7 +95,7 @@ const char *Symbolizer::ModuleNameOwner::GetOwnedCopy(const char *str) {
}
Symbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools)
- : module_names_(&mu_), n_modules_(0), modules_fresh_(false), tools_(tools),
+ : module_names_(&mu_), modules_(), modules_fresh_(false), tools_(tools),
start_hook_(0), end_hook_(0) {}
Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
index 0a443a70115..2b90b42e2ba 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer.h
@@ -63,6 +63,8 @@ struct DataInfo {
// (de)allocated using sanitizer internal allocator.
char *module;
uptr module_offset;
+ char *file;
+ uptr line;
char *name;
uptr start;
uptr size;
@@ -78,6 +80,7 @@ class Symbolizer final {
/// Initialize and return platform-specific implementation of symbolizer
/// (if it wasn't already initialized).
static Symbolizer *GetOrInit();
+ static void LateInitialize();
// Returns a list of symbolized frames for a given address (containing
// all inlined functions, if necessary).
SymbolizedStack *SymbolizePC(uptr address);
@@ -111,6 +114,8 @@ class Symbolizer final {
void AddHooks(StartSymbolizationHook start_hook,
EndSymbolizationHook end_hook);
+ const LoadedModule *FindModuleForAddress(uptr address);
+
private:
// GetModuleNameAndOffsetForPC has to return a string to the caller.
// Since the corresponding module might get unloaded later, we should create
@@ -137,9 +142,7 @@ class Symbolizer final {
bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,
uptr *module_offset);
- LoadedModule *FindModuleForAddress(uptr address);
- LoadedModule modules_[kMaxNumberOfModules];
- uptr n_modules_;
+ ListOfModules modules_;
// If stale, need to reload the modules before looking up addresses.
bool modules_fresh_;
@@ -155,7 +158,6 @@ class Symbolizer final {
// always synchronized.
BlockingMutex mu_;
- typedef IntrusiveList<SymbolizerTool>::Iterator Iterator;
IntrusiveList<SymbolizerTool> tools_;
explicit Symbolizer(IntrusiveList<SymbolizerTool> tools);
@@ -173,6 +175,10 @@ class Symbolizer final {
};
};
+#ifdef SANITIZER_WINDOWS
+void InitializeDbgHelpIfNeeded();
+#endif
+
} // namespace __sanitizer
#endif // SANITIZER_SYMBOLIZER_H
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h b/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h
index a87964b4636..119cb688463 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_internal.h
@@ -26,7 +26,7 @@ const char *ExtractUptr(const char *str, const char *delims, uptr *result);
const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
char **result);
-const char *DemangleCXXABI(const char *name);
+const char *DemangleSwiftAndCXX(const char *name);
// SymbolizerTool is an interface that is implemented by individual "tools"
// that can perform symbolication (external llvm-symbolizer, libbacktrace,
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
index 4264b5e3ddc..45eb11a4d3b 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cc
@@ -67,10 +67,9 @@ SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
return res;
// Always fill data about module name and offset.
res->info.FillModuleInfo(module_name, module_offset);
- for (auto iter = Iterator(&tools_); iter.hasNext();) {
- auto *tool = iter.next();
+ for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
- if (tool->SymbolizePC(addr, res)) {
+ if (tool.SymbolizePC(addr, res)) {
return res;
}
}
@@ -86,10 +85,9 @@ bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
info->Clear();
info->module = internal_strdup(module_name);
info->module_offset = module_offset;
- for (auto iter = Iterator(&tools_); iter.hasNext();) {
- auto *tool = iter.next();
+ for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
- if (tool->SymbolizeData(addr, info)) {
+ if (tool.SymbolizeData(addr, info)) {
return true;
}
}
@@ -111,19 +109,17 @@ bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
void Symbolizer::Flush() {
BlockingMutexLock l(&mu_);
- for (auto iter = Iterator(&tools_); iter.hasNext();) {
- auto *tool = iter.next();
+ for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
- tool->Flush();
+ tool.Flush();
}
}
const char *Symbolizer::Demangle(const char *name) {
BlockingMutexLock l(&mu_);
- for (auto iter = Iterator(&tools_); iter.hasNext();) {
- auto *tool = iter.next();
+ for (auto &tool : tools_) {
SymbolizerScope sym_scope(this);
- if (const char *demangled = tool->Demangle(name))
+ if (const char *demangled = tool.Demangle(name))
return demangled;
}
return PlatformDemangle(name);
@@ -137,27 +133,23 @@ void Symbolizer::PrepareForSandboxing() {
bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,
const char **module_name,
uptr *module_offset) {
- LoadedModule *module = FindModuleForAddress(address);
- if (module == 0)
+ const LoadedModule *module = FindModuleForAddress(address);
+ if (module == nullptr)
return false;
*module_name = module->full_name();
*module_offset = address - module->base_address();
return true;
}
-LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
+const LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
bool modules_were_reloaded = false;
if (!modules_fresh_) {
- for (uptr i = 0; i < n_modules_; i++)
- modules_[i].clear();
- n_modules_ =
- GetListOfModules(modules_, kMaxNumberOfModules, /* filter */ nullptr);
- CHECK_GT(n_modules_, 0);
- CHECK_LT(n_modules_, kMaxNumberOfModules);
+ modules_.init();
+ RAW_CHECK(modules_.size() > 0);
modules_fresh_ = true;
modules_were_reloaded = true;
}
- for (uptr i = 0; i < n_modules_; i++) {
+ for (uptr i = 0; i < modules_.size(); i++) {
if (modules_[i].containsAddress(address)) {
return &modules_[i];
}
@@ -211,10 +203,18 @@ class LLVMSymbolizerProcess : public SymbolizerProcess {
const char* const kSymbolizerArch = "--default-arch=x86_64";
#elif defined(__i386__)
const char* const kSymbolizerArch = "--default-arch=i386";
-#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)
+#elif defined(__aarch64__)
+ const char* const kSymbolizerArch = "--default-arch=arm64";
+#elif defined(__arm__)
+ const char* const kSymbolizerArch = "--default-arch=arm";
+#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
const char* const kSymbolizerArch = "--default-arch=powerpc64";
-#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)
+#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
const char* const kSymbolizerArch = "--default-arch=powerpc64le";
+#elif defined(__s390x__)
+ const char* const kSymbolizerArch = "--default-arch=s390x";
+#elif defined(__s390__)
+ const char* const kSymbolizerArch = "--default-arch=s390";
#else
const char* const kSymbolizerArch = "--default-arch=unknown";
#endif
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc
index e65976c18d0..249ccdf8377 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_mac.cc
@@ -17,8 +17,6 @@
#include "sanitizer_mac.h"
#include "sanitizer_symbolizer_mac.h"
-namespace __sanitizer {
-
#include <dlfcn.h>
#include <errno.h>
#include <stdlib.h>
@@ -26,11 +24,14 @@ namespace __sanitizer {
#include <unistd.h>
#include <util.h>
+namespace __sanitizer {
+
bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
Dl_info info;
int result = dladdr((const void *)addr, &info);
if (!result) return false;
- const char *demangled = DemangleCXXABI(info.dli_sname);
+ const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
+ if (!demangled) return false;
stack->info.function = internal_strdup(demangled);
return true;
}
@@ -39,7 +40,7 @@ bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
Dl_info info;
int result = dladdr((const void *)addr, &info);
if (!result) return false;
- const char *demangled = DemangleCXXABI(info.dli_sname);
+ const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
datainfo->name = internal_strdup(demangled);
datainfo->start = (uptr)info.dli_saddr;
return true;
@@ -77,23 +78,6 @@ class AtosSymbolizerProcess : public SymbolizerProcess {
char pid_str_[16];
};
-static const char *kAtosErrorMessages[] = {
- "atos cannot examine process",
- "unable to get permission to examine process",
- "An admin user name and password is required",
- "could not load inserted library",
- "architecture mismatch between analysis process",
-};
-
-static bool IsAtosErrorMessage(const char *str) {
- for (uptr i = 0; i < ARRAY_SIZE(kAtosErrorMessages); i++) {
- if (internal_strstr(str, kAtosErrorMessages[i])) {
- return true;
- }
- }
- return false;
-}
-
static bool ParseCommandOutput(const char *str, uptr addr, char **out_name,
char **out_module, char **out_file, uptr *line,
uptr *start_address) {
@@ -110,15 +94,15 @@ static bool ParseCommandOutput(const char *str, uptr addr, char **out_name,
// 0xdeadbeef (in library.dylib)
// 0xdeadbeef
- if (IsAtosErrorMessage(trim)) {
- Report("atos returned an error: %s\n", trim);
+ const char *rest = trim;
+ char *symbol_name;
+ rest = ExtractTokenUpToDelimiter(rest, " (in ", &symbol_name);
+ if (rest[0] == '\0') {
+ InternalFree(symbol_name);
InternalFree(trim);
return false;
}
- const char *rest = trim;
- char *symbol_name;
- rest = ExtractTokenUpToDelimiter(rest, " (in ", &symbol_name);
if (internal_strncmp(symbol_name, "0x", 2) != 0)
*out_name = symbol_name;
else
@@ -149,6 +133,7 @@ AtosSymbolizer::AtosSymbolizer(const char *path, LowLevelAllocator *allocator)
bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
if (!process_) return false;
+ if (addr == 0) return false;
char command[32];
internal_snprintf(command, sizeof(command), "0x%zx\n", addr);
const char *buf = process_->SendCommand(command);
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
index e4ff525d6b0..3fcd7d04880 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
@@ -24,7 +24,9 @@
#include "sanitizer_symbolizer_libbacktrace.h"
#include "sanitizer_symbolizer_mac.h"
+#include <dlfcn.h> // for dlsym()
#include <errno.h>
+#include <stdint.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
@@ -59,6 +61,44 @@ const char *DemangleCXXABI(const char *name) {
return name;
}
+// As of now, there are no headers for the Swift runtime. Once they are
+// present, we will weakly link since we do not require Swift runtime to be
+// linked.
+typedef char *(*swift_demangle_ft)(const char *mangledName,
+ size_t mangledNameLength, char *outputBuffer,
+ size_t *outputBufferSize, uint32_t flags);
+static swift_demangle_ft swift_demangle_f;
+
+// This must not happen lazily at symbolication time, because dlsym uses
+// malloc and thread-local storage, which is not a good thing to do during
+// symbolication.
+static void InitializeSwiftDemangler() {
+ swift_demangle_f = (swift_demangle_ft)dlsym(RTLD_DEFAULT, "swift_demangle");
+}
+
+// Attempts to demangle a Swift name. The demangler will return nullptr if a
+// non-Swift name is passed in.
+const char *DemangleSwift(const char *name) {
+ if (!name) return nullptr;
+
+ // Check if we are dealing with a Swift mangled name first.
+ if (name[0] != '_' || name[1] != 'T') {
+ return nullptr;
+ }
+
+ if (swift_demangle_f)
+ return swift_demangle_f(name, internal_strlen(name), 0, 0, 0);
+
+ return nullptr;
+}
+
+const char *DemangleSwiftAndCXX(const char *name) {
+ if (!name) return nullptr;
+ if (const char *swift_demangled_name = DemangleSwift(name))
+ return swift_demangled_name;
+ return DemangleCXXABI(name);
+}
+
bool SymbolizerProcess::StartSymbolizerSubprocess() {
if (!FileExists(path_)) {
if (!reported_invalid_path_) {
@@ -72,8 +112,15 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
if (use_forkpty_) {
#if SANITIZER_MAC
fd_t fd = kInvalidFd;
+
+ // forkpty redirects stdout and stderr into a single stream, so we would
+ // receive error messages as standard replies. To avoid that, let's dup
+ // stderr and restore it in the child.
+ int saved_stderr = dup(STDERR_FILENO);
+ CHECK_GE(saved_stderr, 0);
+
// Use forkpty to disable buffering in the new terminal.
- pid = forkpty(&fd, 0, 0, 0);
+ pid = internal_forkpty(&fd);
if (pid == -1) {
// forkpty() failed.
Report("WARNING: failed to fork external symbolizer (errno: %d)\n",
@@ -81,6 +128,11 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
return false;
} else if (pid == 0) {
// Child subprocess.
+
+ // Restore stderr.
+ CHECK_GE(dup2(saved_stderr, STDERR_FILENO), 0);
+ close(saved_stderr);
+
const char *argv[kArgVMax];
GetArgV(path_, argv);
execv(path_, const_cast<char **>(&argv[0]));
@@ -90,6 +142,8 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
// Continue execution in parent process.
input_fd_ = output_fd_ = fd;
+ close(saved_stderr);
+
// Disable echo in the new terminal, disable CR.
struct termios termflags;
tcgetattr(fd, &termflags);
@@ -135,47 +189,23 @@ bool SymbolizerProcess::StartSymbolizerSubprocess() {
CHECK(infd);
CHECK(outfd);
- // Real fork() may call user callbacks registered with pthread_atfork().
- pid = internal_fork();
- if (pid == -1) {
- // Fork() failed.
+ const char *argv[kArgVMax];
+ GetArgV(path_, argv);
+ pid = StartSubprocess(path_, argv, /* stdin */ outfd[0],
+ /* stdout */ infd[1]);
+ if (pid < 0) {
internal_close(infd[0]);
- internal_close(infd[1]);
- internal_close(outfd[0]);
internal_close(outfd[1]);
- Report("WARNING: failed to fork external symbolizer "
- " (errno: %d)\n", errno);
return false;
- } else if (pid == 0) {
- // Child subprocess.
- internal_close(STDOUT_FILENO);
- internal_close(STDIN_FILENO);
- internal_dup2(outfd[0], STDIN_FILENO);
- internal_dup2(infd[1], STDOUT_FILENO);
- internal_close(outfd[0]);
- internal_close(outfd[1]);
- internal_close(infd[0]);
- internal_close(infd[1]);
- for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--)
- internal_close(fd);
- const char *argv[kArgVMax];
- GetArgV(path_, argv);
- execv(path_, const_cast<char **>(&argv[0]));
- internal__exit(1);
}
- // Continue execution in parent process.
- internal_close(outfd[0]);
- internal_close(infd[1]);
input_fd_ = infd[0];
output_fd_ = outfd[1];
}
// Check that symbolizer subprocess started successfully.
- int pid_status;
SleepForMillis(kSymbolizerStartupTimeMillis);
- int exited_pid = waitpid(pid, &pid_status, WNOHANG);
- if (exited_pid != 0) {
+ if (!IsProcessRunning(pid)) {
// Either waitpid failed, or child has already exited.
Report("WARNING: external symbolizer didn't start up correctly!\n");
return false;
@@ -372,7 +402,7 @@ class InternalSymbolizer : public SymbolizerTool {
#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
const char *Symbolizer::PlatformDemangle(const char *name) {
- return DemangleCXXABI(name);
+ return DemangleSwiftAndCXX(name);
}
void Symbolizer::PlatformPrepareForSandboxing() {}
@@ -431,7 +461,9 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Symbolizer is disabled.\n");
return;
}
- if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
+ if (IsReportingOOM()) {
+ VReport(2, "Cannot use internal symbolizer: out of memory\n");
+ } else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
list->push_back(tool);
return;
@@ -450,10 +482,6 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Using dladdr symbolizer.\n");
list->push_back(new(*allocator) DlAddrSymbolizer());
#endif // SANITIZER_MAC
-
- if (list->size() == 0) {
- Report("WARNING: no internal or external symbolizer found.\n");
- }
}
Symbolizer *Symbolizer::PlatformInit() {
@@ -463,6 +491,11 @@ Symbolizer *Symbolizer::PlatformInit() {
return new(symbolizer_allocator_) Symbolizer(list);
}
+void Symbolizer::LateInitialize() {
+ Symbolizer::GetOrInit();
+ InitializeSwiftDemangler();
+}
+
} // namespace __sanitizer
#endif // SANITIZER_POSIX
diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
index dadb0eaaf6e..95fda7ec42e 100644
--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_win.cc
@@ -40,6 +40,8 @@ bool TrySymInitialize() {
// FIXME: We don't call SymCleanup() on exit yet - should we?
}
+} // namespace
+
// Initializes DbgHelp library, if it's not yet initialized. Calls to this
// function should be synchronized with respect to other calls to DbgHelp API
// (e.g. from WinSymbolizerTool).
@@ -95,8 +97,6 @@ void InitializeDbgHelpIfNeeded() {
}
}
-} // namespace
-
bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
InitializeDbgHelpIfNeeded();
@@ -277,6 +277,10 @@ Symbolizer *Symbolizer::PlatformInit() {
return new(symbolizer_allocator_) Symbolizer(list);
}
+void Symbolizer::LateInitialize() {
+ Symbolizer::GetOrInit();
+}
+
} // namespace __sanitizer
#endif // _WIN32
diff --git a/libsanitizer/sanitizer_common/sanitizer_termination.cc b/libsanitizer/sanitizer_common/sanitizer_termination.cc
new file mode 100644
index 00000000000..6d7335112b3
--- /dev/null
+++ b/libsanitizer/sanitizer_common/sanitizer_termination.cc
@@ -0,0 +1,84 @@
+//===-- sanitizer_termination.cc --------------------------------*- C++ -*-===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains the Sanitizer termination functions CheckFailed and Die,
+/// and the callback functionalities associated with them.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+static const int kMaxNumOfInternalDieCallbacks = 5;
+static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];
+
+bool AddDieCallback(DieCallbackType callback) {
+ for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
+ if (InternalDieCallbacks[i] == nullptr) {
+ InternalDieCallbacks[i] = callback;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool RemoveDieCallback(DieCallbackType callback) {
+ for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
+ if (InternalDieCallbacks[i] == callback) {
+ internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],
+ sizeof(InternalDieCallbacks[0]) *
+ (kMaxNumOfInternalDieCallbacks - i - 1));
+ InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;
+ return true;
+ }
+ }
+ return false;
+}
+
+static DieCallbackType UserDieCallback;
+void SetUserDieCallback(DieCallbackType callback) {
+ UserDieCallback = callback;
+}
+
+void NORETURN Die() {
+ if (UserDieCallback)
+ UserDieCallback();
+ for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {
+ if (InternalDieCallbacks[i])
+ InternalDieCallbacks[i]();
+ }
+ if (common_flags()->abort_on_error)
+ Abort();
+ internal__exit(common_flags()->exitcode);
+}
+
+static CheckFailedCallbackType CheckFailedCallback;
+void SetCheckFailedCallback(CheckFailedCallbackType callback) {
+ CheckFailedCallback = callback;
+}
+
+const int kSecondsToSleepWhenRecursiveCheckFailed = 2;
+
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) > 10) {
+ SleepForSeconds(kSecondsToSleepWhenRecursiveCheckFailed);
+ Trap();
+ }
+
+ if (CheckFailedCallback) {
+ CheckFailedCallback(file, line, cond, v1, v2);
+ }
+ Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
+ v1, v2);
+ Die();
+}
+
+} // namespace __sanitizer
diff --git a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
index bfa610443c1..c865d2cad84 100644
--- a/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_thread_registry.cc
@@ -129,7 +129,7 @@ u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
tctx = context_factory_(tid);
threads_[tid] = tctx;
} else {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
SanitizerToolName, max_threads_);
#else
@@ -275,6 +275,8 @@ void ThreadRegistry::StartThread(u32 tid, uptr os_id, void *arg) {
}
void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
+ if (tctx->tid == 0)
+ return; // Don't reuse the main thread. It's a special snowflake.
dead_threads_.push_back(tctx);
if (dead_threads_.size() <= thread_quarantine_size_)
return;
diff --git a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
index 7c6ef4f9028..229225d95dd 100644
--- a/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_tls_get_addr.cc
@@ -76,7 +76,7 @@ void DTLS_Destroy() {
DTLS_Deallocate(dtls.dtv, s);
}
-#if defined(__powerpc64__)
+#if defined(__powerpc64__) || defined(__mips__)
// This is glibc's TLS_DTV_OFFSET:
// "Dynamic thread vector pointers point 0x8000 past the start of each
// TLS block."
diff --git a/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc b/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc
index 408c21c913b..eb1c133e4f4 100644
--- a/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_unwind_linux_libcdep.cc
@@ -46,6 +46,11 @@ unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
#if SANITIZER_ANDROID
void SanitizerInitializeUnwinder() {
+ if (AndroidGetApiLevel() >= ANDROID_LOLLIPOP_MR1) return;
+
+ // Pre-lollipop Android can not unwind through signal handler frames with
+ // libgcc unwinder, but it has a libcorkscrew.so library with the necessary
+ // workarounds.
void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
if (!p) {
VReport(1,
@@ -101,6 +106,11 @@ _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
UnwindTraceArg *arg = (UnwindTraceArg*)param;
CHECK_LT(arg->stack->size, arg->max_depth);
uptr pc = Unwind_GetIP(ctx);
+ const uptr kPageSize = GetPageSizeCached();
+ // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
+ // x86_64) is invalid and stop unwinding here. If we're adding support for
+ // a platform where this isn't true, we need to reconsider this check.
+ if (pc < kPageSize) return UNWIND_STOP;
arg->stack->trace_buffer[arg->stack->size++] = pc;
if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
return UNWIND_CONTINUE;
diff --git a/libsanitizer/sanitizer_common/sanitizer_win.cc b/libsanitizer/sanitizer_common/sanitizer_win.cc
index 0c0a29cea44..785883ce767 100644
--- a/libsanitizer/sanitizer_common/sanitizer_win.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_win.cc
@@ -25,7 +25,9 @@
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
namespace __sanitizer {
@@ -33,13 +35,15 @@ namespace __sanitizer {
// --------------------- sanitizer_common.h
uptr GetPageSize() {
- // FIXME: there is an API for getting the system page size (GetSystemInfo or
- // GetNativeSystemInfo), but if we use it here we get test failures elsewhere.
- return 1U << 14;
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
}
uptr GetMmapGranularity() {
- return 1U << 16; // FIXME: is this configurable?
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwAllocationGranularity;
}
uptr GetMaxVirtualAddress() {
@@ -81,10 +85,11 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
}
#endif // #if !SANITIZER_GO
-void *MmapOrDie(uptr size, const char *mem_type) {
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (rv == 0)
- ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
+ ReportMmapFailureAndDie(size, mem_type, "allocate",
+ GetLastError(), raw_report);
return rv;
}
@@ -92,20 +97,90 @@ void UnmapOrDie(void *addr, uptr size) {
if (!size || !addr)
return;
- if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
- Report("ERROR: %s failed to "
- "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
- SanitizerToolName, size, size, addr, GetLastError());
- CHECK("unable to unmap" && 0);
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
+
+ // MEM_RELEASE can only be used to unmap whole regions previously mapped with
+ // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
+ // fails try MEM_DECOMMIT.
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
+ if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
+ Report("ERROR: %s failed to "
+ "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
+ SanitizerToolName, size, size, addr, GetLastError());
+ CHECK("unable to unmap" && 0);
+ }
}
}
+// We want to map a chunk of address space aligned to 'alignment'.
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+
+ // Windows will align our allocations to at least 64K.
+ alignment = Max(alignment, GetMmapGranularity());
+
+ uptr mapped_addr =
+ (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (!mapped_addr)
+ ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
+
+ // If we got it right on the first try, return. Otherwise, unmap it and go to
+ // the slow path.
+ if (IsAligned(mapped_addr, alignment))
+ return (void*)mapped_addr;
+ if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
+ ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
+
+ // If we didn't get an aligned address, overallocate, find an aligned address,
+ // unmap, and try to allocate at that aligned address.
+ int retries = 0;
+ const int kMaxRetries = 10;
+ for (; retries < kMaxRetries &&
+ (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
+ retries++) {
+ // Overallocate size + alignment bytes.
+ mapped_addr =
+ (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
+ if (!mapped_addr)
+ ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
+ GetLastError());
+
+ // Find the aligned address.
+ uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
+
+ // Free the overallocation.
+ if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
+ ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
+
+ // Attempt to allocate exactly the number of bytes we need at the aligned
+ // address. This may fail for a number of reasons, in which case we continue
+ // the loop.
+ mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ }
+
+ // Fail if we can't make this work quickly.
+ if (retries == kMaxRetries && mapped_addr == 0)
+ ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
+
+ return (void *)mapped_addr;
+}
+
void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
// FIXME: is this really "NoReserve"? On Win32 this does not matter much,
// but on Win64 it does.
- (void)name; // unsupported
- void *p = VirtualAlloc((LPVOID)fixed_addr, size,
- MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ (void)name; // unsupported
+#if !SANITIZER_GO && SANITIZER_WINDOWS64
+ // On asan/Windows64, use MEM_COMMIT would result in error
+ // 1455:ERROR_COMMITMENT_LIMIT.
+ // Asan uses exception handler to commit page on demand.
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
+#else
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
+ PAGE_READWRITE);
+#endif
if (p == 0)
Report("ERROR: %s failed to "
"allocate %p (%zd) bytes at %p (error code: %d)\n",
@@ -113,8 +188,18 @@ void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
return p;
}
+// Memory space mapped by 'MmapFixedOrDie' must have been reserved by
+// 'MmapFixedNoAccess'.
void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
- return MmapFixedNoReserve(fixed_addr, size);
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0) {
+ char mem_type[30];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
+ }
+ return p;
}
void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
@@ -122,10 +207,10 @@ void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type);
}
-void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) {
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
(void)name; // unsupported
void *res = VirtualAlloc((LPVOID)fixed_addr, size,
- MEM_RESERVE | MEM_COMMIT, PAGE_NOACCESS);
+ MEM_RESERVE, PAGE_NOACCESS);
if (res == 0)
Report("WARNING: %s failed to "
"mprotect %p (%zd) bytes at %p (error code: %d)\n",
@@ -133,19 +218,28 @@ void *MmapNoAccess(uptr fixed_addr, uptr size, const char *name) {
return res;
}
+void *MmapNoAccess(uptr size) {
+ void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes (error code: %d)\n",
+ SanitizerToolName, size, size, GetLastError());
+ return res;
+}
+
bool MprotectNoAccess(uptr addr, uptr size) {
DWORD old_protection;
return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
}
-void FlushUnneededShadowMemory(uptr addr, uptr size) {
+void ReleaseMemoryToOS(uptr addr, uptr size) {
// This is almost useless on 32-bits.
// FIXME: add madvise-analog when we move to 64-bits.
}
void NoHugePagesInRegion(uptr addr, uptr size) {
- // FIXME: probably similar to FlushUnneededShadowMemory.
+ // FIXME: probably similar to ReleaseMemoryToOS.
}
void DontDumpShadowMemory(uptr addr, uptr length) {
@@ -153,6 +247,26 @@ void DontDumpShadowMemory(uptr addr, uptr length) {
// FIXME: add madvise-analog when we move to 64-bits.
}
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
+ uptr address = 0;
+ while (true) {
+ MEMORY_BASIC_INFORMATION info;
+ if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ return 0;
+
+ if (info.State == MEM_FREE) {
+ uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
+ alignment);
+ if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
+ return shadow_address;
+ }
+
+ // Move to the next region.
+ address = (uptr)info.BaseAddress + info.RegionSize;
+ }
+ return 0;
+}
+
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
MEMORY_BASIC_INFORMATION mbi;
CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
@@ -218,7 +332,7 @@ struct ModuleInfo {
uptr end_address;
};
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
int CompareModulesBase(const void *pl, const void *pr) {
const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
if (l->base_address < r->base_address)
@@ -228,18 +342,18 @@ int CompareModulesBase(const void *pl, const void *pr) {
#endif
} // namespace
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void DumpProcessMap() {
Report("Dumping process modules:\n");
- InternalScopedBuffer<LoadedModule> modules(kMaxNumberOfModules);
- uptr num_modules =
- GetListOfModules(modules.data(), kMaxNumberOfModules, nullptr);
+ ListOfModules modules;
+ modules.init();
+ uptr num_modules = modules.size();
InternalScopedBuffer<ModuleInfo> module_infos(num_modules);
for (size_t i = 0; i < num_modules; ++i) {
module_infos[i].filepath = modules[i].full_name();
- module_infos[i].base_address = modules[i].base_address();
- module_infos[i].end_address = modules[i].ranges().next()->end;
+ module_infos[i].base_address = modules[i].ranges().front()->beg;
+ module_infos[i].end_address = modules[i].ranges().back()->end;
}
qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
CompareModulesBase);
@@ -314,6 +428,7 @@ void Abort() {
internal__exit(3);
}
+#if !SANITIZER_GO
// Read the file to extract the ImageBase field from the PE header. If ASLR is
// disabled and this virtual address is available, the loader will typically
// load the image at this address. Therefore, we call it the preferred base. Any
@@ -366,9 +481,8 @@ static uptr GetPreferredBase(const char *modname) {
return (uptr)pe_header->ImageBase;
}
-#ifndef SANITIZER_GO
-uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
- string_predicate_t filter) {
+void ListOfModules::init() {
+ clear();
HANDLE cur_process = GetCurrentProcess();
// Query the list of modules. Start by assuming there are no more than 256
@@ -390,10 +504,8 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
}
// |num_modules| is the number of modules actually present,
- // |count| is the number of modules we return.
- size_t nun_modules = bytes_required / sizeof(HMODULE),
- count = 0;
- for (size_t i = 0; i < nun_modules && count < max_modules; ++i) {
+ size_t num_modules = bytes_required / sizeof(HMODULE);
+ for (size_t i = 0; i < num_modules; ++i) {
HMODULE handle = hmodules[i];
MODULEINFO mi;
if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
@@ -411,9 +523,6 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
&module_name[0], kMaxPathLength, NULL, NULL);
module_name[module_name_len] = '\0';
- if (filter && !filter(module_name))
- continue;
-
uptr base_address = (uptr)mi.lpBaseOfDll;
uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
@@ -424,15 +533,13 @@ uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
uptr preferred_base = GetPreferredBase(&module_name[0]);
uptr adjusted_base = base_address - preferred_base;
- LoadedModule *cur_module = &modules[count];
- cur_module->set(module_name, adjusted_base);
+ LoadedModule cur_module;
+ cur_module.set(module_name, adjusted_base);
// We add the whole module as one single address range.
- cur_module->addAddressRange(base_address, end_address, /*executable*/ true);
- count++;
+ cur_module.addAddressRange(base_address, end_address, /*executable*/ true);
+ modules_.push_back(cur_module);
}
UnmapOrDie(hmodules, modules_buffer_size);
-
- return count;
};
// We can't use atexit() directly at __asan_init time as the CRT is not fully
@@ -459,14 +566,15 @@ __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
// ------------------ sanitizer_libc.h
fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
+ // FIXME: Use the wide variants to handle Unicode filenames.
fd_t res;
if (mode == RdOnly) {
- res = CreateFile(filename, GENERIC_READ,
- FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
- nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
+ res = CreateFileA(filename, GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
} else if (mode == WrOnly) {
- res = CreateFile(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
- FILE_ATTRIBUTE_NORMAL, nullptr);
+ res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, nullptr);
} else {
UNIMPLEMENTED();
}
@@ -613,7 +721,7 @@ void InitTlsSize() {
void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
uptr *tls_addr, uptr *tls_size) {
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
*stk_addr = 0;
*stk_size = 0;
*tls_addr = 0;
@@ -634,7 +742,7 @@ void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
// FIXME: CaptureStackBackTrace might be too slow for us.
// FIXME: Compare with StackWalk64.
// FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
- size = CaptureStackBackTrace(2, Min(max_depth, kStackTraceMax),
+ size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax),
(void**)trace, 0);
if (size == 0)
return;
@@ -649,6 +757,9 @@ void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
CONTEXT ctx = *(CONTEXT *)context;
STACKFRAME64 stack_frame;
memset(&stack_frame, 0, sizeof(stack_frame));
+
+ InitializeDbgHelpIfNeeded();
+
size = 0;
#if defined(_WIN64)
int machine_type = IMAGE_FILE_MACHINE_AMD64;
@@ -697,7 +808,7 @@ void InstallDeadlySignalHandlers(SignalHandlerType handler) {
// FIXME: Decide what to do on Windows.
}
-bool IsDeadlySignal(int signum) {
+bool IsHandledDeadlySignal(int signum) {
// FIXME: Decide what to do on Windows.
return false;
}
@@ -728,8 +839,8 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
}
SignalContext SignalContext::Create(void *siginfo, void *context) {
- EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD*)siginfo;
- CONTEXT *context_record = (CONTEXT*)context;
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+ CONTEXT *context_record = (CONTEXT *)context;
uptr pc = (uptr)exception_record->ExceptionAddress;
#ifdef _WIN64
@@ -741,7 +852,19 @@ SignalContext SignalContext::Create(void *siginfo, void *context) {
#endif
uptr access_addr = exception_record->ExceptionInformation[1];
- return SignalContext(context, access_addr, pc, sp, bp);
+ // The contents of this array are documented at
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
+ // The first element indicates read as 0, write as 1, or execute as 8. The
+ // second element is the faulting address.
+ WriteFlag write_flag = SignalContext::UNKNOWN;
+ switch (exception_record->ExceptionInformation[0]) {
+ case 0: write_flag = SignalContext::READ; break;
+ case 1: write_flag = SignalContext::WRITE; break;
+ case 8: write_flag = SignalContext::UNKNOWN; break;
+ }
+ bool is_memory_access = write_flag != SignalContext::UNKNOWN;
+ return SignalContext(context, access_addr, pc, sp, bp, is_memory_access,
+ write_flag);
}
uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
@@ -759,6 +882,49 @@ void CheckVMASize() {
// Do nothing.
}
+void MaybeReexec() {
+ // No need to re-exec on Windows.
+}
+
+char **GetArgv() {
+ // FIXME: Actually implement this function.
+ return 0;
+}
+
+pid_t StartSubprocess(const char *program, const char *const argv[],
+ fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
+ // FIXME: implement on this platform
+ // Should be implemented based on
+ // SymbolizerProcess::StarAtSymbolizerSubprocess
+ // from lib/sanitizer_common/sanitizer_symbolizer_win.cc.
+ return -1;
+}
+
+bool IsProcessRunning(pid_t pid) {
+ // FIXME: implement on this platform.
+ return false;
+}
+
+int WaitForProcess(pid_t pid) { return -1; }
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+
} // namespace __sanitizer
+#if !SANITIZER_GO
+// Workaround to implement weak hooks on Windows. COFF doesn't directly support
+// weak symbols, but it does support /alternatename, which is similar. If the
+// user does not override the hook, we will use this default definition instead
+// of null.
+extern "C" void __sanitizer_print_memory_profile(int top_percent) {}
+
+#ifdef _WIN64
+#pragma comment(linker, "/alternatename:__sanitizer_print_memory_profile=__sanitizer_default_print_memory_profile") // NOLINT
+#else
+#pragma comment(linker, "/alternatename:___sanitizer_print_memory_profile=___sanitizer_default_print_memory_profile") // NOLINT
+#endif
+#endif
+
#endif // _WIN32
diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am
index 9833de20bd8..7a0c2971143 100644
--- a/libsanitizer/tsan/Makefile.am
+++ b/libsanitizer/tsan/Makefile.am
@@ -10,13 +10,16 @@ AM_CXXFLAGS += -std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
+nodist_toolexeclib_HEADERS = libtsan_preinit.o
tsan_files = \
tsan_clock.cc \
+ tsan_debugging.cc \
tsan_fd.cc \
tsan_flags.cc \
tsan_ignoreset.cc \
tsan_interceptors.cc \
+ tsan_interceptors_mac.cc \
tsan_interface_ann.cc \
tsan_interface_atomic.cc \
tsan_interface.cc \
@@ -35,6 +38,7 @@ tsan_files = \
tsan_report.cc \
tsan_rtl.cc \
tsan_rtl_mutex.cc \
+ tsan_rtl_proc.cc \
tsan_rtl_report.cc \
tsan_rtl_thread.cc \
tsan_stack_trace.cc \
@@ -44,7 +48,7 @@ tsan_files = \
tsan_sync.cc
libtsan_la_SOURCES = $(tsan_files)
-EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S
+EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S
libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS)
libtsan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS)
if LIBBACKTRACE_SUPPORTED
@@ -54,6 +58,9 @@ endif
libtsan_la_LIBADD += $(LIBSTDCXX_RAW_CXX_LDFLAGS)
libtsan_la_LDFLAGS = -version-info `grep -v '^\#' $(srcdir)/libtool-version` $(link_libtsan)
+libtsan_preinit.o: tsan_preinit.o
+ cp $< $@
+
# Work around what appears to be a GNU make bug handling MAKEFLAGS
# values defined in terms of make variables, as is the case for CC and
# friends when we are called from the top level Makefile.
diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in
index de8de7c6674..4d4ec47becc 100644
--- a/libsanitizer/tsan/Makefile.in
+++ b/libsanitizer/tsan/Makefile.in
@@ -15,6 +15,7 @@
@SET_MAKE@
+
VPATH = @srcdir@
am__make_dryrun = \
{ \
@@ -101,20 +102,22 @@ am__uninstall_files_from_dir = { \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
-am__installdirs = "$(DESTDIR)$(toolexeclibdir)"
+am__installdirs = "$(DESTDIR)$(toolexeclibdir)" \
+ "$(DESTDIR)$(toolexeclibdir)"
LTLIBRARIES = $(toolexeclib_LTLIBRARIES)
am__DEPENDENCIES_1 =
-am__objects_1 = tsan_clock.lo tsan_fd.lo tsan_flags.lo \
- tsan_ignoreset.lo tsan_interceptors.lo tsan_interface_ann.lo \
+am__objects_1 = tsan_clock.lo tsan_debugging.lo tsan_fd.lo \
+ tsan_flags.lo tsan_ignoreset.lo tsan_interceptors.lo \
+ tsan_interceptors_mac.lo tsan_interface_ann.lo \
tsan_interface_atomic.lo tsan_interface.lo \
tsan_interface_java.lo tsan_libdispatch_mac.lo \
tsan_malloc_mac.lo tsan_md5.lo tsan_mman.lo tsan_mutex.lo \
tsan_mutexset.lo tsan_new_delete.lo tsan_platform_linux.lo \
tsan_platform_mac.lo tsan_platform_posix.lo \
tsan_platform_windows.lo tsan_report.lo tsan_rtl.lo \
- tsan_rtl_mutex.lo tsan_rtl_report.lo tsan_rtl_thread.lo \
- tsan_stack_trace.lo tsan_stat.lo tsan_suppressions.lo \
- tsan_symbolize.lo tsan_sync.lo
+ tsan_rtl_mutex.lo tsan_rtl_proc.lo tsan_rtl_report.lo \
+ tsan_rtl_thread.lo tsan_stack_trace.lo tsan_stat.lo \
+ tsan_suppressions.lo tsan_symbolize.lo tsan_sync.lo
am_libtsan_la_OBJECTS = $(am__objects_1)
libtsan_la_OBJECTS = $(am_libtsan_la_OBJECTS)
libtsan_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
@@ -153,6 +156,7 @@ am__can_run_installinfo = \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
+HEADERS = $(nodist_toolexeclib_HEADERS)
ETAGS = etags
CTAGS = ctags
ACLOCAL = @ACLOCAL@
@@ -225,6 +229,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
@@ -309,12 +314,15 @@ AM_CXXFLAGS = -Wall -W -Wno-unused-parameter -Wwrite-strings -pedantic \
-std=gnu++11
ACLOCAL_AMFLAGS = -I m4
toolexeclib_LTLIBRARIES = libtsan.la
+nodist_toolexeclib_HEADERS = libtsan_preinit.o
tsan_files = \
tsan_clock.cc \
+ tsan_debugging.cc \
tsan_fd.cc \
tsan_flags.cc \
tsan_ignoreset.cc \
tsan_interceptors.cc \
+ tsan_interceptors_mac.cc \
tsan_interface_ann.cc \
tsan_interface_atomic.cc \
tsan_interface.cc \
@@ -333,6 +341,7 @@ tsan_files = \
tsan_report.cc \
tsan_rtl.cc \
tsan_rtl_mutex.cc \
+ tsan_rtl_proc.cc \
tsan_rtl_report.cc \
tsan_rtl_thread.cc \
tsan_stack_trace.cc \
@@ -342,7 +351,7 @@ tsan_files = \
tsan_sync.cc
libtsan_la_SOURCES = $(tsan_files)
-EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S
+EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S
libtsan_la_LIBADD = \
$(top_builddir)/sanitizer_common/libsanitizer_common.la \
$(top_builddir)/interception/libinterception.la \
@@ -469,10 +478,12 @@ distclean-compile:
-rm -f *.tab.c
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_clock.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_debugging.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_fd.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_flags.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_ignoreset.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interceptors.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interceptors_mac.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_ann.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_interface_atomic.Plo@am__quote@
@@ -492,7 +503,10 @@ distclean-compile:
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_aarch64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_amd64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mips64.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_mutex.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_ppc64.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_proc.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_report.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_rtl_thread.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tsan_stack_trace.Plo@am__quote@
@@ -548,6 +562,27 @@ mostlyclean-libtool:
clean-libtool:
-rm -rf .libs _libs
+install-nodist_toolexeclibHEADERS: $(nodist_toolexeclib_HEADERS)
+ @$(NORMAL_INSTALL)
+ @list='$(nodist_toolexeclib_HEADERS)'; test -n "$(toolexeclibdir)" || list=; \
+ if test -n "$$list"; then \
+ echo " $(MKDIR_P) '$(DESTDIR)$(toolexeclibdir)'"; \
+ $(MKDIR_P) "$(DESTDIR)$(toolexeclibdir)" || exit 1; \
+ fi; \
+ for p in $$list; do \
+ if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
+ echo "$$d$$p"; \
+ done | $(am__base_list) | \
+ while read files; do \
+ echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(toolexeclibdir)'"; \
+ $(INSTALL_HEADER) $$files "$(DESTDIR)$(toolexeclibdir)" || exit $$?; \
+ done
+
+uninstall-nodist_toolexeclibHEADERS:
+ @$(NORMAL_UNINSTALL)
+ @list='$(nodist_toolexeclib_HEADERS)'; test -n "$(toolexeclibdir)" || list=; \
+ files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \
+ dir='$(DESTDIR)$(toolexeclibdir)'; $(am__uninstall_files_from_dir)
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
@@ -602,9 +637,9 @@ distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
check-am: all-am
check: check-am
-all-am: Makefile $(LTLIBRARIES)
+all-am: Makefile $(LTLIBRARIES) $(HEADERS)
installdirs:
- for dir in "$(DESTDIR)$(toolexeclibdir)"; do \
+ for dir in "$(DESTDIR)$(toolexeclibdir)" "$(DESTDIR)$(toolexeclibdir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
done
install: install-am
@@ -666,7 +701,8 @@ install-dvi: install-dvi-am
install-dvi-am:
-install-exec-am: install-toolexeclibLTLIBRARIES
+install-exec-am: install-nodist_toolexeclibHEADERS \
+ install-toolexeclibLTLIBRARIES
install-html: install-html-am
@@ -706,7 +742,8 @@ ps: ps-am
ps-am:
-uninstall-am: uninstall-toolexeclibLTLIBRARIES
+uninstall-am: uninstall-nodist_toolexeclibHEADERS \
+ uninstall-toolexeclibLTLIBRARIES
.MAKE: install-am install-strip
@@ -717,13 +754,18 @@ uninstall-am: uninstall-toolexeclibLTLIBRARIES
install-am install-data install-data-am install-dvi \
install-dvi-am install-exec install-exec-am install-html \
install-html-am install-info install-info-am install-man \
- install-pdf install-pdf-am install-ps install-ps-am \
- install-strip install-toolexeclibLTLIBRARIES installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-compile \
- mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
- tags uninstall uninstall-am uninstall-toolexeclibLTLIBRARIES
-
+ install-nodist_toolexeclibHEADERS install-pdf install-pdf-am \
+ install-ps install-ps-am install-strip \
+ install-toolexeclibLTLIBRARIES installcheck installcheck-am \
+ installdirs maintainer-clean maintainer-clean-generic \
+ mostlyclean mostlyclean-compile mostlyclean-generic \
+ mostlyclean-libtool pdf pdf-am ps ps-am tags uninstall \
+ uninstall-am uninstall-nodist_toolexeclibHEADERS \
+ uninstall-toolexeclibLTLIBRARIES
+
+
+libtsan_preinit.o: tsan_preinit.o
+ cp $< $@
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
diff --git a/libsanitizer/tsan/tsan_clock.cc b/libsanitizer/tsan/tsan_clock.cc
index 037afc83fc6..23f9228a672 100644
--- a/libsanitizer/tsan/tsan_clock.cc
+++ b/libsanitizer/tsan/tsan_clock.cc
@@ -80,7 +80,7 @@
// We don't have ThreadState in these methods, so this is an ugly hack that
// works only in C++.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
#else
# define CPP_STAT_INC(typ) (void)0
diff --git a/libsanitizer/tsan/tsan_debugging.cc b/libsanitizer/tsan/tsan_debugging.cc
new file mode 100644
index 00000000000..d26d4828b15
--- /dev/null
+++ b/libsanitizer/tsan/tsan_debugging.cc
@@ -0,0 +1,160 @@
+//===-- tsan_debugging.cc -------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// TSan debugging API implementation.
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan;
+
+static const char *ReportTypeDescription(ReportType typ) {
+ if (typ == ReportTypeRace) return "data-race";
+ if (typ == ReportTypeVptrRace) return "data-race-vptr";
+ if (typ == ReportTypeUseAfterFree) return "heap-use-after-free";
+ if (typ == ReportTypeVptrUseAfterFree) return "heap-use-after-free-vptr";
+ if (typ == ReportTypeThreadLeak) return "thread-leak";
+ if (typ == ReportTypeMutexDestroyLocked) return "locked-mutex-destroy";
+ if (typ == ReportTypeMutexDoubleLock) return "mutex-double-lock";
+ if (typ == ReportTypeMutexInvalidAccess) return "mutex-invalid-access";
+ if (typ == ReportTypeMutexBadUnlock) return "mutex-bad-unlock";
+ if (typ == ReportTypeMutexBadReadLock) return "mutex-bad-read-lock";
+ if (typ == ReportTypeMutexBadReadUnlock) return "mutex-bad-read-unlock";
+ if (typ == ReportTypeSignalUnsafe) return "signal-unsafe-call";
+ if (typ == ReportTypeErrnoInSignal) return "errno-in-signal-handler";
+ if (typ == ReportTypeDeadlock) return "lock-order-inversion";
+ return "";
+}
+
+static const char *ReportLocationTypeDescription(ReportLocationType typ) {
+ if (typ == ReportLocationGlobal) return "global";
+ if (typ == ReportLocationHeap) return "heap";
+ if (typ == ReportLocationStack) return "stack";
+ if (typ == ReportLocationTLS) return "tls";
+ if (typ == ReportLocationFD) return "fd";
+ return "";
+}
+
+static void CopyTrace(SymbolizedStack *first_frame, void **trace,
+ uptr trace_size) {
+ uptr i = 0;
+ for (SymbolizedStack *frame = first_frame; frame != nullptr;
+ frame = frame->next) {
+ trace[i++] = (void *)frame->info.address;
+ if (i >= trace_size) break;
+ }
+}
+
+// Meant to be called by the debugger.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report() {
+ return const_cast<ReportDesc*>(cur_thread()->current_report);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *description = ReportTypeDescription(rep->typ);
+ *count = rep->count;
+ *stack_count = rep->stacks.Size();
+ *mop_count = rep->mops.Size();
+ *loc_count = rep->locs.Size();
+ *mutex_count = rep->mutexes.Size();
+ *thread_count = rep->threads.Size();
+ *unique_tid_count = rep->unique_tids.Size();
+ if (rep->sleep) CopyTrace(rep->sleep->frames, sleep_trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->stacks.Size());
+ ReportStack *stack = rep->stacks[idx];
+ if (stack) CopyTrace(stack->frames, trace, trace_size);
+ return stack ? 1 : 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mops.Size());
+ ReportMop *mop = rep->mops[idx];
+ *tid = mop->tid;
+ *addr = (void *)mop->addr;
+ *size = mop->size;
+ *write = mop->write ? 1 : 0;
+ *atomic = mop->atomic ? 1 : 0;
+ if (mop->stack) CopyTrace(mop->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *type = ReportLocationTypeDescription(loc->type);
+ *addr = (void *)loc->global.start;
+ *start = loc->heap_chunk_start;
+ *size = loc->heap_chunk_size;
+ *tid = loc->tid;
+ *fd = loc->fd;
+ *suppressable = loc->suppressable;
+ if (loc->stack) CopyTrace(loc->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mutexes.Size());
+ ReportMutex *mutex = rep->mutexes[idx];
+ *mutex_id = mutex->id;
+ *addr = (void *)mutex->addr;
+ *destroyed = mutex->destroyed;
+ if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->threads.Size());
+ ReportThread *thread = rep->threads[idx];
+ *tid = thread->id;
+ *os_id = thread->os_id;
+ *running = thread->running;
+ *name = thread->name;
+ *parent_tid = thread->parent_tid;
+ if (thread->stack) CopyTrace(thread->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->unique_tids.Size());
+ *tid = rep->unique_tids[idx];
+ return 1;
+}
diff --git a/libsanitizer/tsan/tsan_defs.h b/libsanitizer/tsan/tsan_defs.h
index 259b30bee5d..e540526caa2 100644
--- a/libsanitizer/tsan/tsan_defs.h
+++ b/libsanitizer/tsan/tsan_defs.h
@@ -27,27 +27,18 @@
#endif
#ifndef TSAN_CONTAINS_UBSAN
-# define TSAN_CONTAINS_UBSAN (CAN_SANITIZE_UB && !defined(SANITIZER_GO))
+# if CAN_SANITIZE_UB && !SANITIZER_GO
+# define TSAN_CONTAINS_UBSAN 1
+# else
+# define TSAN_CONTAINS_UBSAN 0
+# endif
#endif
namespace __tsan {
-#ifdef SANITIZER_GO
-const bool kGoMode = true;
-const bool kCppMode = false;
-const char *const kTsanOptionsEnv = "GORACE";
-// Go linker does not support weak symbols.
-#define CPP_WEAK
-#else
-const bool kGoMode = false;
-const bool kCppMode = true;
-const char *const kTsanOptionsEnv = "TSAN_OPTIONS";
-#define CPP_WEAK WEAK
-#endif
-
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
#else
const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
@@ -146,6 +137,7 @@ struct MD5Hash {
MD5Hash md5_hash(const void *data, uptr size);
+struct Processor;
struct ThreadState;
class ThreadContext;
struct Context;
diff --git a/libsanitizer/tsan/tsan_dense_alloc.h b/libsanitizer/tsan/tsan_dense_alloc.h
index 651c112c78a..780ff6f4c8e 100644
--- a/libsanitizer/tsan/tsan_dense_alloc.h
+++ b/libsanitizer/tsan/tsan_dense_alloc.h
@@ -106,7 +106,7 @@ class DenseSlabAlloc {
// Reserve 0 as invalid index.
IndexT start = fillpos_ == 0 ? 1 : 0;
for (IndexT i = start; i < kL2Size; i++) {
- new(batch + i) T();
+ new(batch + i) T;
*(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
}
*(IndexT*)(batch + kL2Size - 1) = 0;
diff --git a/libsanitizer/tsan/tsan_flags.cc b/libsanitizer/tsan/tsan_flags.cc
index 4cec0ac09f4..5064a0fe653 100644
--- a/libsanitizer/tsan/tsan_flags.cc
+++ b/libsanitizer/tsan/tsan_flags.cc
@@ -27,8 +27,8 @@ Flags *flags() {
#ifdef TSAN_EXTERNAL_HOOKS
extern "C" const char* __tsan_default_options();
#else
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE
-const char *WEAK __tsan_default_options() {
+SANITIZER_WEAK_DEFAULT_IMPL
+const char *__tsan_default_options() {
return "";
}
#endif
@@ -59,7 +59,7 @@ void InitializeFlags(Flags *f, const char *env) {
CommonFlags cf;
cf.CopyFrom(*common_flags());
cf.allow_addr2line = true;
- if (kGoMode) {
+ if (SANITIZER_GO) {
// Does not work as expected for Go: runtime handles SIGABRT and crashes.
cf.abort_on_error = false;
// Go does not have mutexes.
@@ -69,6 +69,7 @@ void InitializeFlags(Flags *f, const char *env) {
cf.print_suppressions = false;
cf.stack_trace_format = " #%n %f %S %M";
cf.exitcode = 66;
+ cf.intercept_tls_get_addr = true;
OverrideCommonFlags(cf);
}
@@ -106,7 +107,7 @@ void InitializeFlags(Flags *f, const char *env) {
f->report_signal_unsafe = false;
}
- SetVerbosity(common_flags()->verbosity);
+ InitializeCommonFlags();
if (Verbosity()) ReportUnrecognizedFlags();
diff --git a/libsanitizer/tsan/tsan_flags.inc b/libsanitizer/tsan/tsan_flags.inc
index 822e560b622..78f5e80fd5c 100644
--- a/libsanitizer/tsan/tsan_flags.inc
+++ b/libsanitizer/tsan/tsan_flags.inc
@@ -59,8 +59,9 @@ TSAN_FLAG(bool, stop_on_start, false,
"Stops on start until __tsan_resume() is called (for debugging).")
TSAN_FLAG(bool, running_on_valgrind, false,
"Controls whether RunningOnValgrind() returns true or false.")
+// There are a lot of goroutines in Go, so we use smaller history.
TSAN_FLAG(
- int, history_size, kGoMode ? 1 : 3, // There are a lot of goroutines in Go.
+ int, history_size, SANITIZER_GO ? 1 : 3,
"Per-thread history size, controls how many previous memory accesses "
"are remembered per thread. Possible values are [0..7]. "
"history_size=0 amounts to 32K memory accesses. Each next value doubles "
@@ -74,3 +75,7 @@ TSAN_FLAG(int, io_sync, 1,
TSAN_FLAG(bool, die_after_fork, true,
"Die after multi-threaded fork if the child creates new threads.")
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+TSAN_FLAG(bool, ignore_interceptors_accesses, false,
+ "Ignore reads and writes from all interceptors.")
+TSAN_FLAG(bool, shared_ptr_interceptor, true,
+ "Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
diff --git a/libsanitizer/tsan/tsan_interceptors.cc b/libsanitizer/tsan/tsan_interceptors.cc
index 0c71c008d70..bf5f2d5b66c 100644
--- a/libsanitizer/tsan/tsan_interceptors.cc
+++ b/libsanitizer/tsan/tsan_interceptors.cc
@@ -17,6 +17,7 @@
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "interception/interception.h"
#include "tsan_interceptors.h"
#include "tsan_interface.h"
@@ -38,14 +39,9 @@ using namespace __tsan; // NOLINT
#define stderr __stderrp
#endif
-#if SANITIZER_FREEBSD
-#define __libc_realloc __realloc
-#define __libc_calloc __calloc
-#elif SANITIZER_MAC
-#define __libc_malloc REAL(malloc)
-#define __libc_realloc REAL(realloc)
-#define __libc_calloc REAL(calloc)
-#define __libc_free REAL(free)
+#if SANITIZER_ANDROID
+#define __errno_location __errno
+#define mallopt(a, b)
#endif
#if SANITIZER_LINUX || SANITIZER_FREEBSD
@@ -77,9 +73,9 @@ struct ucontext_t {
};
#endif
-#if defined(__x86_64__) || defined(__mips__)
+#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
-#elif defined(__aarch64__)
+#elif defined(__aarch64__) || SANITIZER_PPC64V2
#define PTHREAD_ABI_BASE "GLIBC_2.17"
#endif
@@ -90,10 +86,6 @@ extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
extern "C" int pthread_setspecific(unsigned key, const void *v);
DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
-extern "C" int pthread_sigmask(int how, const __sanitizer_sigset_t *set,
- __sanitizer_sigset_t *oldset);
-// REAL(sigfillset) defined in common interceptors.
-DECLARE_REAL(int, sigfillset, __sanitizer_sigset_t *set)
DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
@@ -101,19 +93,22 @@ extern "C" void *pthread_self();
extern "C" void _exit(int status);
extern "C" int *__errno_location();
extern "C" int fileno_unlocked(void *stream);
-extern "C" void *__libc_calloc(uptr size, uptr n);
-extern "C" void *__libc_realloc(void *ptr, uptr size);
extern "C" int dirfd(void *dirp);
-#if !SANITIZER_FREEBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
extern "C" int mallopt(int param, int value);
#endif
extern __sanitizer_FILE *stdout, *stderr;
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
const int PTHREAD_MUTEX_RECURSIVE = 1;
const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
+#else
+const int PTHREAD_MUTEX_RECURSIVE = 2;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
+#endif
const int EINVAL = 22;
const int EBUSY = 16;
const int EOWNERDEAD = 130;
-#if !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
const int EPOLL_CTL_ADD = 1;
#endif
const int SIGILL = 4;
@@ -122,7 +117,7 @@ const int SIGFPE = 8;
const int SIGSEGV = 11;
const int SIGPIPE = 13;
const int SIGTERM = 15;
-#ifdef __mips__
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC
const int SIGBUS = 10;
const int SIGSYS = 12;
#else
@@ -147,6 +142,17 @@ typedef long long_t; // NOLINT
typedef void (*sighandler_t)(int sig);
typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
+#if SANITIZER_ANDROID
+struct sigaction_t {
+ u32 sa_flags;
+ union {
+ sighandler_t sa_handler;
+ sigactionhandler_t sa_sigaction;
+ };
+ __sanitizer_sigset_t sa_mask;
+ void (*sa_restorer)();
+};
+#else
struct sigaction_t {
#ifdef __mips__
u32 sa_flags;
@@ -158,6 +164,9 @@ struct sigaction_t {
#if SANITIZER_FREEBSD
int sa_flags;
__sanitizer_sigset_t sa_mask;
+#elif SANITIZER_MAC
+ __sanitizer_sigset_t sa_mask;
+ int sa_flags;
#else
__sanitizer_sigset_t sa_mask;
#ifndef __mips__
@@ -166,11 +175,12 @@ struct sigaction_t {
void (*sa_restorer)();
#endif
};
+#endif
const sighandler_t SIG_DFL = (sighandler_t)0;
const sighandler_t SIG_IGN = (sighandler_t)1;
const sighandler_t SIG_ERR = (sighandler_t)-1;
-#if SANITIZER_FREEBSD
+#if SANITIZER_FREEBSD || SANITIZER_MAC
const int SA_SIGINFO = 0x40;
const int SIG_SETMASK = 3;
#elif defined(__mips__)
@@ -199,6 +209,9 @@ struct ThreadSignalContext {
atomic_uintptr_t in_blocking_func;
atomic_uintptr_t have_pending_signals;
SignalDesc pending_signals[kSigCount];
+ // emptyset and oldset are too big for stack.
+ __sanitizer_sigset_t emptyset;
+ __sanitizer_sigset_t oldset;
};
// The object is 64-byte aligned, because we want hot data to be located in
@@ -231,26 +244,33 @@ static ThreadSignalContext *SigCtx(ThreadState *thr) {
return ctx;
}
+#if !SANITIZER_MAC
static unsigned g_thread_finalize_key;
+#endif
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
: thr_(thr)
, pc_(pc)
, in_ignored_lib_(false) {
- if (!thr_->ignore_interceptors) {
- Initialize(thr);
+ Initialize(thr);
+ if (!thr_->is_inited)
+ return;
+ if (!thr_->ignore_interceptors)
FuncEntry(thr, pc);
- }
DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
in_ignored_lib_ = true;
thr_->in_ignored_lib = true;
ThreadIgnoreBegin(thr_, pc_);
}
+ if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
}
ScopedInterceptor::~ScopedInterceptor() {
+ if (!thr_->is_inited)
+ return;
+ if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
if (in_ignored_lib_) {
thr_->in_ignored_lib = false;
ThreadIgnoreEnd(thr_, pc_);
@@ -262,6 +282,22 @@ ScopedInterceptor::~ScopedInterceptor() {
}
}
+void ScopedInterceptor::UserCallbackStart() {
+ if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
+ if (in_ignored_lib_) {
+ thr_->in_ignored_lib = false;
+ ThreadIgnoreEnd(thr_, pc_);
+ }
+}
+
+void ScopedInterceptor::UserCallbackEnd() {
+ if (in_ignored_lib_) {
+ thr_->in_ignored_lib = true;
+ ThreadIgnoreBegin(thr_, pc_);
+ }
+ if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
+}
+
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
#if SANITIZER_FREEBSD
# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
@@ -340,12 +376,13 @@ static void at_exit_wrapper(void *arg) {
Acquire(thr, pc, (uptr)arg);
AtExitCtx *ctx = (AtExitCtx*)arg;
((void(*)(void *arg))ctx->f)(ctx->arg);
- __libc_free(ctx);
+ InternalFree(ctx);
}
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
void *arg, void *dso);
+#if !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
if (cur_thread()->in_symbolizer)
return 0;
@@ -354,6 +391,7 @@ TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
SCOPED_INTERCEPTOR_RAW(atexit, f);
return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
}
+#endif
TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
if (cur_thread()->in_symbolizer)
@@ -364,7 +402,7 @@ TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
void *arg, void *dso) {
- AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
ctx->f = f;
ctx->arg = arg;
Release(thr, pc, (uptr)ctx);
@@ -383,14 +421,14 @@ static void on_exit_wrapper(int status, void *arg) {
Acquire(thr, pc, (uptr)arg);
AtExitCtx *ctx = (AtExitCtx*)arg;
((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
- __libc_free(ctx);
+ InternalFree(ctx);
}
TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
if (cur_thread()->in_symbolizer)
return 0;
SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
- AtExitCtx *ctx = (AtExitCtx*)__libc_malloc(sizeof(AtExitCtx));
+ AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
ctx->f = (void(*)())f;
ctx->arg = arg;
Release(thr, pc, (uptr)ctx);
@@ -409,7 +447,7 @@ static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
JmpBuf *buf = &thr->jmp_bufs[i];
if (buf->sp <= sp) {
uptr sz = thr->jmp_bufs.Size();
- thr->jmp_bufs[i] = thr->jmp_bufs[sz - 1];
+ internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
thr->jmp_bufs.PopBack();
i--;
}
@@ -436,15 +474,19 @@ static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
}
static void LongJmp(ThreadState *thr, uptr *env) {
-#if SANITIZER_FREEBSD
+#ifdef __powerpc__
+ uptr mangled_sp = env[0];
+#elif SANITIZER_FREEBSD || SANITIZER_MAC
uptr mangled_sp = env[2];
#elif defined(SANITIZER_LINUX)
# ifdef __aarch64__
uptr mangled_sp = env[13];
+# elif defined(__mips64)
+ uptr mangled_sp = env[1];
# else
uptr mangled_sp = env[6];
# endif
-#endif // SANITIZER_FREEBSD
+#endif
// Find the saved buf by mangled_sp.
for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
JmpBuf *buf = &thr->jmp_bufs[i];
@@ -474,6 +516,11 @@ extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) {
SetJmp(cur_thread(), sp, mangled_sp);
}
+#if SANITIZER_MAC
+TSAN_INTERCEPTOR(int, setjmp, void *env);
+TSAN_INTERCEPTOR(int, _setjmp, void *env);
+TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
+#else // SANITIZER_MAC
// Not called. Merely to satisfy TSAN_INTERCEPT().
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
int __interceptor_setjmp(void *env);
@@ -512,10 +559,14 @@ DEFINE_REAL(int, setjmp, void *env)
DEFINE_REAL(int, _setjmp, void *env)
DEFINE_REAL(int, sigsetjmp, void *env)
DEFINE_REAL(int, __sigsetjmp, void *env)
+#endif // SANITIZER_MAC
TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
+ // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
+ // bad things will happen. We will jump over ScopedInterceptor dtor and can
+ // leave thr->in_ignored_lib set.
{
- SCOPED_TSAN_INTERCEPTOR(longjmp, env, val);
+ SCOPED_INTERCEPTOR_RAW(longjmp, env, val);
}
LongJmp(cur_thread(), env);
REAL(longjmp)(env, val);
@@ -523,7 +574,7 @@ TSAN_INTERCEPTOR(void, longjmp, uptr *env, int val) {
TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
{
- SCOPED_TSAN_INTERCEPTOR(siglongjmp, env, val);
+ SCOPED_INTERCEPTOR_RAW(siglongjmp, env, val);
}
LongJmp(cur_thread(), env);
REAL(siglongjmp)(env, val);
@@ -532,7 +583,7 @@ TSAN_INTERCEPTOR(void, siglongjmp, uptr *env, int val) {
#if !SANITIZER_MAC
TSAN_INTERCEPTOR(void*, malloc, uptr size) {
if (cur_thread()->in_symbolizer)
- return __libc_malloc(size);
+ return InternalAlloc(size);
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(malloc, size);
@@ -549,7 +600,7 @@ TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
if (cur_thread()->in_symbolizer)
- return __libc_calloc(size, n);
+ return InternalCalloc(size, n);
void *p = 0;
{
SCOPED_INTERCEPTOR_RAW(calloc, size, n);
@@ -561,7 +612,7 @@ TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
if (cur_thread()->in_symbolizer)
- return __libc_realloc(p, size);
+ return InternalRealloc(p, size);
if (p)
invoke_free_hook(p);
{
@@ -576,7 +627,7 @@ TSAN_INTERCEPTOR(void, free, void *p) {
if (p == 0)
return;
if (cur_thread()->in_symbolizer)
- return __libc_free(p);
+ return InternalFree(p);
invoke_free_hook(p);
SCOPED_INTERCEPTOR_RAW(free, p);
user_free(thr, pc, p);
@@ -586,7 +637,7 @@ TSAN_INTERCEPTOR(void, cfree, void *p) {
if (p == 0)
return;
if (cur_thread()->in_symbolizer)
- return __libc_free(p);
+ return InternalFree(p);
invoke_free_hook(p);
SCOPED_INTERCEPTOR_RAW(cfree, p);
user_free(thr, pc, p);
@@ -598,69 +649,6 @@ TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
}
#endif
-TSAN_INTERCEPTOR(uptr, strlen, const char *s) {
- SCOPED_TSAN_INTERCEPTOR(strlen, s);
- uptr len = internal_strlen(s);
- MemoryAccessRange(thr, pc, (uptr)s, len + 1, false);
- return len;
-}
-
-TSAN_INTERCEPTOR(void*, memset, void *dst, int v, uptr size) {
- // On FreeBSD we get here from libthr internals on thread initialization.
- if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- SCOPED_TSAN_INTERCEPTOR(memset, dst, v, size);
- MemoryAccessRange(thr, pc, (uptr)dst, size, true);
- }
- return internal_memset(dst, v, size);
-}
-
-TSAN_INTERCEPTOR(void*, memcpy, void *dst, const void *src, uptr size) {
- // On FreeBSD we get here from libthr internals on thread initialization.
- if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- SCOPED_TSAN_INTERCEPTOR(memcpy, dst, src, size);
- MemoryAccessRange(thr, pc, (uptr)dst, size, true);
- MemoryAccessRange(thr, pc, (uptr)src, size, false);
- }
- // On OS X, calling internal_memcpy here will cause memory corruptions,
- // because memcpy and memmove are actually aliases of the same implementation.
- // We need to use internal_memmove here.
- return internal_memmove(dst, src, size);
-}
-
-TSAN_INTERCEPTOR(void*, memmove, void *dst, void *src, uptr n) {
- if (!COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) {
- SCOPED_TSAN_INTERCEPTOR(memmove, dst, src, n);
- MemoryAccessRange(thr, pc, (uptr)dst, n, true);
- MemoryAccessRange(thr, pc, (uptr)src, n, false);
- }
- return REAL(memmove)(dst, src, n);
-}
-
-TSAN_INTERCEPTOR(char*, strchr, char *s, int c) {
- SCOPED_TSAN_INTERCEPTOR(strchr, s, c);
- char *res = REAL(strchr)(s, c);
- uptr len = internal_strlen(s);
- uptr n = res ? (char*)res - (char*)s + 1 : len + 1;
- READ_STRING_OF_LEN(thr, pc, s, len, n);
- return res;
-}
-
-#if !SANITIZER_MAC
-TSAN_INTERCEPTOR(char*, strchrnul, char *s, int c) {
- SCOPED_TSAN_INTERCEPTOR(strchrnul, s, c);
- char *res = REAL(strchrnul)(s, c);
- uptr len = (char*)res - (char*)s + 1;
- READ_STRING(thr, pc, s, len);
- return res;
-}
-#endif
-
-TSAN_INTERCEPTOR(char*, strrchr, char *s, int c) {
- SCOPED_TSAN_INTERCEPTOR(strrchr, s, c);
- MemoryAccessRange(thr, pc, (uptr)s, internal_strlen(s) + 1, false);
- return REAL(strrchr)(s, c);
-}
-
TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
uptr srclen = internal_strlen(src);
@@ -706,7 +694,11 @@ TSAN_INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags,
if (res != MAP_FAILED) {
if (fd > 0)
FdAccess(thr, pc, fd);
- MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)res, sz);
}
return res;
}
@@ -721,7 +713,11 @@ TSAN_INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags,
if (res != MAP_FAILED) {
if (fd > 0)
FdAccess(thr, pc, fd);
- MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)res, sz);
}
return res;
}
@@ -735,7 +731,8 @@ TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
if (sz != 0) {
// If sz == 0, munmap will return EINVAL and don't unmap any memory.
DontNeedShadowFor((uptr)addr, sz);
- ctx->metamap.ResetRange(thr, pc, (uptr)addr, (uptr)sz);
+ ScopedGlobalProcessor sgp;
+ ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz);
}
int res = REAL(munmap)(addr, sz);
return res;
@@ -782,8 +779,25 @@ TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
}
#endif
+// __cxa_guard_acquire and friends need to be intercepted in a special way -
+// regular interceptors will break statically-linked libstdc++. Linux
+// interceptors are especially defined as weak functions (so that they don't
+// cause link errors when user defines them as well). So they silently
+// auto-disable themselves when such symbol is already present in the binary. If
+// we link libstdc++ statically, it will bring own __cxa_guard_acquire which
+// will silently replace our interceptor. That's why on Linux we simply export
+// these interceptors with INTERFACE_ATTRIBUTE.
+// On OS X, we don't support statically linking, so we just use a regular
+// interceptor.
+#if SANITIZER_MAC
+#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+#else
+#define STDCXX_INTERCEPTOR(rettype, name, ...) \
+ extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
+#endif
+
// Used in thread-safe function static initialization.
-extern "C" int INTERFACE_ATTRIBUTE __cxa_guard_acquire(atomic_uint32_t *g) {
+STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
for (;;) {
u32 cmp = atomic_load(g, memory_order_acquire);
@@ -799,13 +813,13 @@ extern "C" int INTERFACE_ATTRIBUTE __cxa_guard_acquire(atomic_uint32_t *g) {
}
}
-extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_release(atomic_uint32_t *g) {
+STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
Release(thr, pc, (uptr)g);
atomic_store(g, 1, memory_order_release);
}
-extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_abort(atomic_uint32_t *g) {
+STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
atomic_store(g, 0, memory_order_relaxed);
}
@@ -813,16 +827,21 @@ extern "C" void INTERFACE_ATTRIBUTE __cxa_guard_abort(atomic_uint32_t *g) {
namespace __tsan {
void DestroyThreadState() {
ThreadState *thr = cur_thread();
+ Processor *proc = thr->proc();
ThreadFinish(thr);
+ ProcUnwire(proc, thr);
+ ProcDestroy(proc);
ThreadSignalContext *sctx = thr->signal_ctx;
if (sctx) {
thr->signal_ctx = 0;
UnmapOrDie(sctx, sizeof(*sctx));
}
+ DTLS_Destroy();
cur_thread_finalize();
}
} // namespace __tsan
+#if !SANITIZER_MAC
static void thread_finalize(void *v) {
uptr iter = (uptr)v;
if (iter > 1) {
@@ -834,6 +853,7 @@ static void thread_finalize(void *v) {
}
DestroyThreadState();
}
+#endif
struct ThreadParam {
@@ -851,6 +871,7 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
ThreadState *thr = cur_thread();
// Thread-local state is not initialized yet.
ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_MAC
ThreadIgnoreBegin(thr, 0);
if (pthread_setspecific(g_thread_finalize_key,
(void *)GetPthreadDestructorIterations())) {
@@ -858,8 +879,11 @@ extern "C" void *__tsan_thread_start_func(void *arg) {
Die();
}
ThreadIgnoreEnd(thr, 0);
+#endif
while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
internal_sched_yield();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
ThreadStart(thr, tid, GetTid());
atomic_store(&p->tid, 0, memory_order_release);
}
@@ -1017,12 +1041,12 @@ INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
return REAL(pthread_cond_init)(cond, a);
}
-INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
- void *cond = init_cond(c);
- SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
+ int (*fn)(void *c, void *m, void *abstime), void *c,
+ void *m, void *t) {
MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
MutexUnlock(thr, pc, (uptr)m);
- CondMutexUnlockCtx arg = {&si, thr, pc, m};
+ CondMutexUnlockCtx arg = {si, thr, pc, m};
int res = 0;
// This ensures that we handle mutex lock even in case of pthread_cancel.
// See test/tsan/cond_cancel.cc.
@@ -1030,36 +1054,38 @@ INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
// Enable signal delivery while the thread is blocked.
BlockingCall bc(thr);
res = call_pthread_cancel_with_cleanup(
- (int(*)(void *c, void *m, void *abstime))REAL(pthread_cond_wait),
- cond, m, 0, (void(*)(void *arg))cond_mutex_unlock, &arg);
+ fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
}
- if (res == errno_EOWNERDEAD)
- MutexRepair(thr, pc, (uptr)m);
+ if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
MutexLock(thr, pc, (uptr)m);
return res;
}
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
+ pthread_cond_wait),
+ cond, m, 0);
+}
+
INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
- MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
- MutexUnlock(thr, pc, (uptr)m);
- CondMutexUnlockCtx arg = {&si, thr, pc, m};
- int res = 0;
- // This ensures that we handle mutex lock even in case of pthread_cancel.
- // See test/tsan/cond_cancel.cc.
- {
- BlockingCall bc(thr);
- res = call_pthread_cancel_with_cleanup(
- REAL(pthread_cond_timedwait), cond, m, abstime,
- (void(*)(void *arg))cond_mutex_unlock, &arg);
- }
- if (res == errno_EOWNERDEAD)
- MutexRepair(thr, pc, (uptr)m);
- MutexLock(thr, pc, (uptr)m);
- return res;
+ return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
+ abstime);
}
+#if SANITIZER_MAC
+INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
+ void *reltime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
+ return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
+ m, reltime);
+}
+#endif
+
INTERCEPTOR(int, pthread_cond_signal, void *c) {
void *cond = init_cond(c);
SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
@@ -1316,97 +1342,7 @@ TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
return 0;
}
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__xstat, version, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__xstat)(version, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT___XSTAT TSAN_INTERCEPT(__xstat)
-#else
-#define TSAN_MAYBE_INTERCEPT___XSTAT
-#endif
-
-TSAN_INTERCEPTOR(int, stat, const char *path, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC
- SCOPED_TSAN_INTERCEPTOR(stat, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(stat)(path, buf);
-#else
- SCOPED_TSAN_INTERCEPTOR(__xstat, 0, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__xstat)(0, path, buf);
-#endif
-}
-
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__xstat64, version, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__xstat64)(version, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT___XSTAT64 TSAN_INTERCEPT(__xstat64)
-#else
-#define TSAN_MAYBE_INTERCEPT___XSTAT64
-#endif
-
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, stat64, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__xstat64, 0, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__xstat64)(0, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT_STAT64 TSAN_INTERCEPT(stat64)
-#else
-#define TSAN_MAYBE_INTERCEPT_STAT64
-#endif
-
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__lxstat, version, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__lxstat)(version, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT___LXSTAT TSAN_INTERCEPT(__lxstat)
-#else
-#define TSAN_MAYBE_INTERCEPT___LXSTAT
-#endif
-
-TSAN_INTERCEPTOR(int, lstat, const char *path, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC
- SCOPED_TSAN_INTERCEPTOR(lstat, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(lstat)(path, buf);
-#else
- SCOPED_TSAN_INTERCEPTOR(__lxstat, 0, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__lxstat)(0, path, buf);
-#endif
-}
-
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__lxstat64, version, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__lxstat64)(version, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT___LXSTAT64 TSAN_INTERCEPT(__lxstat64)
-#else
-#define TSAN_MAYBE_INTERCEPT___LXSTAT64
-#endif
-
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, lstat64, const char *path, void *buf) {
- SCOPED_TSAN_INTERCEPTOR(__lxstat64, 0, path, buf);
- READ_STRING(thr, pc, path, 0);
- return REAL(__lxstat64)(0, path, buf);
-}
-#define TSAN_MAYBE_INTERCEPT_LSTAT64 TSAN_INTERCEPT(lstat64)
-#else
-#define TSAN_MAYBE_INTERCEPT_LSTAT64
-#endif
-
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
if (fd > 0)
@@ -1419,7 +1355,7 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID
SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
@@ -1432,7 +1368,7 @@ TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
#endif
}
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
if (fd > 0)
@@ -1444,7 +1380,7 @@ TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
#define TSAN_MAYBE_INTERCEPT___FXSTAT64
#endif
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
if (fd > 0)
@@ -1623,32 +1559,6 @@ TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
return res;
}
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, epoll_create, int size) {
- SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
- int fd = REAL(epoll_create)(size);
- if (fd >= 0)
- FdPollCreate(thr, pc, fd);
- return fd;
-}
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE TSAN_INTERCEPT(epoll_create)
-#else
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE
-#endif
-
-#if SANITIZER_LINUX
-TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
- SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
- int fd = REAL(epoll_create1)(flags);
- if (fd >= 0)
- FdPollCreate(thr, pc, fd);
- return fd;
-}
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1 TSAN_INTERCEPT(epoll_create1)
-#else
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1
-#endif
-
TSAN_INTERCEPTOR(int, close, int fd) {
SCOPED_TSAN_INTERCEPTOR(close, fd);
if (fd >= 0)
@@ -1669,7 +1579,7 @@ TSAN_INTERCEPTOR(int, __close, int fd) {
#endif
// glibc guts
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
int fds[64];
@@ -1703,37 +1613,6 @@ TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
}
#endif
-TSAN_INTERCEPTOR(long_t, send, int fd, void *buf, long_t len, int flags) {
- SCOPED_TSAN_INTERCEPTOR(send, fd, buf, len, flags);
- if (fd >= 0) {
- FdAccess(thr, pc, fd);
- FdRelease(thr, pc, fd);
- }
- int res = REAL(send)(fd, buf, len, flags);
- return res;
-}
-
-TSAN_INTERCEPTOR(long_t, sendmsg, int fd, void *msg, int flags) {
- SCOPED_TSAN_INTERCEPTOR(sendmsg, fd, msg, flags);
- if (fd >= 0) {
- FdAccess(thr, pc, fd);
- FdRelease(thr, pc, fd);
- }
- int res = REAL(sendmsg)(fd, msg, flags);
- return res;
-}
-
-TSAN_INTERCEPTOR(long_t, recv, int fd, void *buf, long_t len, int flags) {
- SCOPED_TSAN_INTERCEPTOR(recv, fd, buf, len, flags);
- if (fd >= 0)
- FdAccess(thr, pc, fd);
- int res = REAL(recv)(fd, buf, len, flags);
- if (res >= 0 && fd >= 0) {
- FdAcquire(thr, pc, fd);
- }
- return res;
-}
-
TSAN_INTERCEPTOR(int, unlink, char *path) {
SCOPED_TSAN_INTERCEPTOR(unlink, path);
Release(thr, pc, File2addr(path));
@@ -1814,12 +1693,30 @@ TSAN_INTERCEPTOR(int, rmdir, char *path) {
TSAN_INTERCEPTOR(int, closedir, void *dirp) {
SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
- int fd = dirfd(dirp);
- FdClose(thr, pc, fd);
+ if (dirp) {
+ int fd = dirfd(dirp);
+ FdClose(thr, pc, fd);
+ }
return REAL(closedir)(dirp);
}
#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, epoll_create, int size) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
+ int fd = REAL(epoll_create)(size);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
+ int fd = REAL(epoll_create1)(flags);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
if (epfd >= 0)
@@ -1831,12 +1728,7 @@ TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
int res = REAL(epoll_ctl)(epfd, op, fd, ev);
return res;
}
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL TSAN_INTERCEPT(epoll_ctl)
-#else
-#define TSAN_MAYBE_INTERCEPT_EPOLL_CTL
-#endif
-#if SANITIZER_LINUX
TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
if (epfd >= 0)
@@ -1846,17 +1738,72 @@ TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
FdAcquire(thr, pc, epfd);
return res;
}
-#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT TSAN_INTERCEPT(epoll_wait)
+
+TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
+ void *sigmask) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+#define TSAN_MAYBE_INTERCEPT_EPOLL \
+ TSAN_INTERCEPT(epoll_create); \
+ TSAN_INTERCEPT(epoll_create1); \
+ TSAN_INTERCEPT(epoll_ctl); \
+ TSAN_INTERCEPT(epoll_wait); \
+ TSAN_INTERCEPT(epoll_pwait)
#else
-#define TSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#define TSAN_MAYBE_INTERCEPT_EPOLL
#endif
+// The following functions are intercepted merely to process pending signals.
+// If program blocks signal X, we must deliver the signal before the function
+// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
+// it's better to deliver the signal straight away.
+TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
+ return REAL(sigsuspend)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigblock, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
+ return REAL(sigblock)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
+ return REAL(sigsetmask)(mask);
+}
+
+TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
+ return REAL(pthread_sigmask)(how, set, oldset);
+}
+
namespace __tsan {
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
bool sigact, int sig, my_siginfo_t *info, void *uctx) {
if (acquire)
Acquire(thr, 0, (uptr)&sigactions[sig]);
+ // Signals are generally asynchronous, so if we receive a signals when
+ // ignores are enabled we should disable ignores. This is critical for sync
+ // and interceptors, because otherwise we can miss syncronization and report
+ // false races.
+ int ignore_reads_and_writes = thr->ignore_reads_and_writes;
+ int ignore_interceptors = thr->ignore_interceptors;
+ int ignore_sync = thr->ignore_sync;
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = 0;
+ thr->fast_state.ClearIgnoreBit();
+ thr->ignore_interceptors = 0;
+ thr->ignore_sync = 0;
+ }
// Ensure that the handler does not spoil errno.
const int saved_errno = errno;
errno = 99;
@@ -1872,6 +1819,13 @@ static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
else
((sighandler_t)pc)(sig);
}
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = ignore_reads_and_writes;
+ if (ignore_reads_and_writes)
+ thr->fast_state.SetIgnoreBit();
+ thr->ignore_interceptors = ignore_interceptors;
+ thr->ignore_sync = ignore_sync;
+ }
// We do not detect errno spoiling for SIGTERM,
// because some SIGTERM handlers do spoil errno but reraise SIGTERM,
// tsan reports false positive in such case.
@@ -1901,10 +1855,9 @@ void ProcessPendingSignals(ThreadState *thr) {
return;
atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
- // These are too big for stack.
- static THREADLOCAL __sanitizer_sigset_t emptyset, oldset;
- CHECK_EQ(0, REAL(sigfillset)(&emptyset));
- CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &emptyset, &oldset));
+ internal_sigfillset(&sctx->emptyset);
+ int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
+ CHECK_EQ(res, 0);
for (int sig = 0; sig < kSigCount; sig++) {
SignalDesc *signal = &sctx->pending_signals[sig];
if (signal->armed) {
@@ -1913,7 +1866,8 @@ void ProcessPendingSignals(ThreadState *thr) {
&signal->siginfo, &signal->ctx);
}
}
- CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &oldset, 0));
+ res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
+ CHECK_EQ(res, 0);
atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
}
@@ -1943,13 +1897,8 @@ void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
(sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
- // We ignore interceptors in blocking functions,
- // temporary enbled them again while we are calling user function.
- int const i = thr->ignore_interceptors;
- thr->ignore_interceptors = 0;
atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
- thr->ignore_interceptors = i;
atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
} else {
// Be very conservative with when we do acquire in this case.
@@ -1987,7 +1936,10 @@ static void rtl_sigaction(int sig, my_siginfo_t *info, void *ctx) {
}
TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
- SCOPED_TSAN_INTERCEPTOR(sigaction, sig, act, old);
+ // Note: if we call REAL(sigaction) directly for any reason without proxying
+ // the signal handler through rtl_sigaction, very bad things will happen.
+ // The handler will run synchronously and corrupt tsan per-thread state.
+ SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
if (old)
internal_memcpy(old, &sigactions[sig], sizeof(*old));
if (act == 0)
@@ -2002,12 +1954,12 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
sigactions[sig].sa_flags = *(volatile int*)&act->sa_flags;
internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
sizeof(sigactions[sig].sa_mask));
-#if !SANITIZER_FREEBSD
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
sigactions[sig].sa_restorer = act->sa_restorer;
#endif
sigaction_t newact;
internal_memcpy(&newact, act, sizeof(newact));
- REAL(sigfillset)(&newact.sa_mask);
+ internal_sigfillset(&newact.sa_mask);
if (act->sa_handler != SIG_IGN && act->sa_handler != SIG_DFL) {
if (newact.sa_flags & SA_SIGINFO)
newact.sa_sigaction = rtl_sigaction;
@@ -2022,7 +1974,7 @@ TSAN_INTERCEPTOR(int, sigaction, int sig, sigaction_t *act, sigaction_t *old) {
TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
sigaction_t act;
act.sa_handler = h;
- REAL(memset)(&act.sa_mask, -1, sizeof(act.sa_mask));
+ internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
act.sa_flags = 0;
sigaction_t old;
int res = sigaction(sig, &act, &old);
@@ -2031,11 +1983,6 @@ TSAN_INTERCEPTOR(sighandler_t, signal, int sig, sighandler_t h) {
return old.sa_handler;
}
-TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
- SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
- return REAL(sigsuspend)(mask);
-}
-
TSAN_INTERCEPTOR(int, raise, int sig) {
SCOPED_TSAN_INTERCEPTOR(raise, sig);
ThreadSignalContext *sctx = SigCtx(thr);
@@ -2103,7 +2050,13 @@ TSAN_INTERCEPTOR(int, fork, int fake) {
return REAL(fork)(fake);
SCOPED_INTERCEPTOR_RAW(fork, fake);
ForkBefore(thr, pc);
- int pid = REAL(fork)(fake);
+ int pid;
+ {
+ // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
+ // we'll assert in CheckNoLocks() unless we ignore interceptors.
+ ScopedIgnoreInterceptors ignore;
+ pid = REAL(fork)(fake);
+ }
if (pid == 0) {
// child
ForkChildAfter(thr, pc);
@@ -2135,7 +2088,7 @@ TSAN_INTERCEPTOR(int, vfork, int fake) {
return WRAP(fork)(fake);
}
-#if !SANITIZER_MAC
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
void *data);
struct dl_iterate_phdr_data {
@@ -2218,23 +2171,15 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
#undef SANITIZER_INTERCEPT_FGETPWENT
#undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
#undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
-// __tls_get_addr can be called with mis-aligned stack due to:
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
-// There are two potential issues:
-// 1. Sanitizer code contains a MOVDQA spill (it does not seem to be the case
-// right now). or 2. ProcessPendingSignal calls user handler which contains
-// MOVDQA spill (this happens right now).
-// Since the interceptor only initializes memory for msan, the simplest solution
-// is to disable the interceptor in tsan (other sanitizers do not call
-// signal handlers from COMMON_INTERCEPTOR_ENTER).
-// As __tls_get_addr has been intercepted in the past, to avoid breaking
-// libtsan ABI, keep it around, but just call the real function.
+// We define our own.
#if SANITIZER_INTERCEPT_TLS_GET_ADDR
#define NEED_TLS_GET_ADDR
#endif
#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ INTERCEPT_FUNCTION_VER(name, ver)
#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
@@ -2321,6 +2266,10 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
+ MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
#if !SANITIZER_MAC
#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
@@ -2335,6 +2284,12 @@ static void HandleRecvmsg(ThreadState *thr, uptr pc,
*begin = *end = 0; \
}
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
+
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
+
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#define TSAN_SYSCALL() \
@@ -2357,7 +2312,7 @@ struct ScopedSyscall {
}
};
-#if !SANITIZER_MAC
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
TSAN_SYSCALL();
MemoryAccessRange(thr, pc, p, s, write);
@@ -2452,8 +2407,27 @@ static void syscall_post_fork(uptr pc, int pid) {
#include "sanitizer_common/sanitizer_common_syscalls.inc"
#ifdef NEED_TLS_GET_ADDR
+// Define own interceptor instead of sanitizer_common's for three reasons:
+// 1. It must not process pending signals.
+// Signal handlers may contain MOVDQA instruction (see below).
+// 2. It must be as simple as possible to not contain MOVDQA.
+// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
+// is empty for tsan (meant only for msan).
+// Note: __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// So the interceptor must work with mis-aligned stack, in particular, does not
+// execute MOVDQA with stack addresses.
TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
- return REAL(__tls_get_addr)(arg);
+ void *res = REAL(__tls_get_addr)(arg);
+ ThreadState *thr = cur_thread();
+ if (!thr)
+ return res;
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, thr->tls_size);
+ if (!dtv)
+ return res;
+ // New DTLS block has been allocated.
+ MemoryResetRange(thr, 0, dtv->beg, dtv->size);
+ return res;
}
#endif
@@ -2468,7 +2442,7 @@ static void finalize(void *arg) {
Die();
}
-#if !SANITIZER_MAC
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
static void unreachable() {
Report("FATAL: ThreadSanitizer: unreachable called\n");
Die();
@@ -2517,13 +2491,6 @@ void InitializeInterceptors() {
TSAN_MAYBE_INTERCEPT_PVALLOC;
TSAN_INTERCEPT(posix_memalign);
- TSAN_INTERCEPT(strlen);
- TSAN_INTERCEPT(memset);
- TSAN_INTERCEPT(memcpy);
- TSAN_INTERCEPT(memmove);
- TSAN_INTERCEPT(strchr);
- TSAN_INTERCEPT(strchrnul);
- TSAN_INTERCEPT(strrchr);
TSAN_INTERCEPT(strcpy); // NOLINT
TSAN_INTERCEPT(strncpy);
TSAN_INTERCEPT(strdup);
@@ -2566,14 +2533,6 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(pthread_once);
- TSAN_INTERCEPT(stat);
- TSAN_MAYBE_INTERCEPT___XSTAT;
- TSAN_MAYBE_INTERCEPT_STAT64;
- TSAN_MAYBE_INTERCEPT___XSTAT64;
- TSAN_INTERCEPT(lstat);
- TSAN_MAYBE_INTERCEPT___LXSTAT;
- TSAN_MAYBE_INTERCEPT_LSTAT64;
- TSAN_MAYBE_INTERCEPT___LXSTAT64;
TSAN_INTERCEPT(fstat);
TSAN_MAYBE_INTERCEPT___FXSTAT;
TSAN_MAYBE_INTERCEPT_FSTAT64;
@@ -2594,18 +2553,13 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(connect);
TSAN_INTERCEPT(bind);
TSAN_INTERCEPT(listen);
- TSAN_MAYBE_INTERCEPT_EPOLL_CREATE;
- TSAN_MAYBE_INTERCEPT_EPOLL_CREATE1;
+ TSAN_MAYBE_INTERCEPT_EPOLL;
TSAN_INTERCEPT(close);
TSAN_MAYBE_INTERCEPT___CLOSE;
TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
TSAN_INTERCEPT(pipe);
TSAN_INTERCEPT(pipe2);
- TSAN_INTERCEPT(send);
- TSAN_INTERCEPT(sendmsg);
- TSAN_INTERCEPT(recv);
-
TSAN_INTERCEPT(unlink);
TSAN_INTERCEPT(tmpfile);
TSAN_MAYBE_INTERCEPT_TMPFILE64;
@@ -2616,12 +2570,12 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(rmdir);
TSAN_INTERCEPT(closedir);
- TSAN_MAYBE_INTERCEPT_EPOLL_CTL;
- TSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
-
TSAN_INTERCEPT(sigaction);
TSAN_INTERCEPT(signal);
TSAN_INTERCEPT(sigsuspend);
+ TSAN_INTERCEPT(sigblock);
+ TSAN_INTERCEPT(sigsetmask);
+ TSAN_INTERCEPT(pthread_sigmask);
TSAN_INTERCEPT(raise);
TSAN_INTERCEPT(kill);
TSAN_INTERCEPT(pthread_kill);
@@ -2633,7 +2587,9 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(fork);
TSAN_INTERCEPT(vfork);
+#if !SANITIZER_ANDROID
TSAN_INTERCEPT(dl_iterate_phdr);
+#endif
TSAN_INTERCEPT(on_exit);
TSAN_INTERCEPT(__cxa_atexit);
TSAN_INTERCEPT(_exit);
@@ -2642,7 +2598,7 @@ void InitializeInterceptors() {
TSAN_INTERCEPT(__tls_get_addr);
#endif
-#if !SANITIZER_MAC
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
// Need to setup it, because interceptors check that the function is resolved.
// But atexit is emitted directly into the module, so can't be resolved.
REAL(atexit) = (int(*)(void(*)()))unreachable;
@@ -2653,12 +2609,50 @@ void InitializeInterceptors() {
Die();
}
+#if !SANITIZER_MAC
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Printf("ThreadSanitizer: failed to create thread key\n");
Die();
}
+#endif
FdInit();
}
} // namespace __tsan
+
+// Invisible barrier for tests.
+// There were several unsuccessful iterations for this functionality:
+// 1. Initially it was implemented in user code using
+// REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
+// MacOS. Futexes are linux-specific for this matter.
+// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
+// "as-if synchronized via sleep" messages in reports which failed some
+// output tests.
+// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
+// visible events, which lead to "failed to restore stack trace" failures.
+// Note that no_sanitize_thread attribute does not turn off atomic interception
+// so attaching it to the function defined in user code does not help.
+// That's why we now have what we have.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
+ if (count >= (1 << 8)) {
+ Printf("barrier_init: count is too large (%d)\n", count);
+ Die();
+ }
+ // 8 lsb is thread count, the remaining are count of entered threads.
+ *barrier = count;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_testonly_barrier_wait(u64 *barrier) {
+ unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
+ unsigned old_epoch = (old >> 8) / (old & 0xff);
+ for (;;) {
+ unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
+ unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
+ if (cur_epoch != old_epoch)
+ return;
+ internal_sched_yield();
+ }
+}
diff --git a/libsanitizer/tsan/tsan_interceptors.h b/libsanitizer/tsan/tsan_interceptors.h
index ed68eb96bf8..97fa5085a78 100644
--- a/libsanitizer/tsan/tsan_interceptors.h
+++ b/libsanitizer/tsan/tsan_interceptors.h
@@ -8,6 +8,8 @@ class ScopedInterceptor {
public:
ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
~ScopedInterceptor();
+ void UserCallbackStart();
+ void UserCallbackEnd();
private:
ThreadState *const thr_;
const uptr pc_;
@@ -30,18 +32,16 @@ class ScopedInterceptor {
Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
Die(); \
} \
- if (thr->ignore_interceptors || thr->in_ignored_lib) \
+ if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
return REAL(func)(__VA_ARGS__); \
/**/
-#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
+ si.UserCallbackStart();
-#if SANITIZER_FREEBSD
-#define __libc_free __free
-#define __libc_malloc __malloc
-#endif
+#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() \
+ si.UserCallbackEnd();
-extern "C" void __libc_free(void *ptr);
-extern "C" void *__libc_malloc(uptr size);
+#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
#endif // TSAN_INTERCEPTORS_H
diff --git a/libsanitizer/tsan/tsan_interceptors_mac.cc b/libsanitizer/tsan/tsan_interceptors_mac.cc
new file mode 100644
index 00000000000..eaf866d6c75
--- /dev/null
+++ b/libsanitizer/tsan/tsan_interceptors_mac.cc
@@ -0,0 +1,357 @@
+//===-- tsan_interceptors_mac.cc ------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific interceptors.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+
+#include <libkern/OSAtomic.h>
+#include <xpc/xpc.h>
+
+typedef long long_t; // NOLINT
+
+namespace __tsan {
+
+// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
+// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
+// actually aliases of each other, and we cannot have different interceptors for
+// them, because they're actually the same function. Thus, we have to stay
+// conservative and treat the non-barrier versions as mo_acq_rel.
+static const morder kMacOrderBarrier = mo_acq_rel;
+static const morder kMacOrderNonBarrier = mo_acq_rel;
+
+#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
+ }
+
+#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
+ }
+
+#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
+ }
+
+#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
+ }
+
+#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
+ m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderBarrier) \
+ m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderBarrier)
+
+#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
+ m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
+ __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
+ OSATOMIC_INTERCEPTOR_PLUS_X)
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
+ OSATOMIC_INTERCEPTOR_PLUS_1)
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
+ OSATOMIC_INTERCEPTOR_MINUS_1)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
+ OSATOMIC_INTERCEPTOR)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
+ OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
+ OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+
+#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
+ TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderNonBarrier, kMacOrderNonBarrier); \
+ } \
+ \
+ TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
+ t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderBarrier, kMacOrderNonBarrier); \
+ }
+
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
+ long_t)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
+ void *)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
+ int32_t)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
+ int64_t)
+
+#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
+ TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
+ char *byte_ptr = ((char *)ptr) + (n >> 3); \
+ char bit = 0x80u >> (n & 7); \
+ char mask = clear ? ~bit : bit; \
+ char orig_byte = op((a8 *)byte_ptr, mask, mo); \
+ return orig_byte & bit; \
+ }
+
+#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
+ OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
+ OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
+
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
+ true)
+
+TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
+ __tsan_release(item);
+ REAL(OSAtomicEnqueue)(list, item, offset);
+}
+
+TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
+ void *item = REAL(OSAtomicDequeue)(list, offset);
+ if (item) __tsan_acquire(item);
+ return item;
+}
+
+// OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
+#if !SANITIZER_IOS
+
+TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
+ __tsan_release(item);
+ REAL(OSAtomicFifoEnqueue)(list, item, offset);
+}
+
+TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
+ void *item = REAL(OSAtomicFifoDequeue)(list, offset);
+ if (item) __tsan_acquire(item);
+ return item;
+}
+
+#endif
+
+TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockLock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock);
+ REAL(OSSpinLockLock)(lock);
+ Acquire(thr, pc, (uptr)lock);
+}
+
+TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockTry)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock);
+ bool result = REAL(OSSpinLockTry)(lock);
+ if (result)
+ Acquire(thr, pc, (uptr)lock);
+ return result;
+}
+
+TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockUnlock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock);
+ Release(thr, pc, (uptr)lock);
+ REAL(OSSpinLockUnlock)(lock);
+}
+
+TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_lock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock);
+ REAL(os_lock_lock)(lock);
+ Acquire(thr, pc, (uptr)lock);
+}
+
+TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_trylock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock);
+ bool result = REAL(os_lock_trylock)(lock);
+ if (result)
+ Acquire(thr, pc, (uptr)lock);
+ return result;
+}
+
+TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_unlock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock);
+ Release(thr, pc, (uptr)lock);
+ REAL(os_lock_unlock)(lock);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
+ xpc_connection_t connection, xpc_handler_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
+ handler);
+ Release(thr, pc, (uptr)connection);
+ xpc_handler_t new_handler = ^(xpc_object_t object) {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ handler(object);
+ };
+ REAL(xpc_connection_set_event_handler)(connection, new_handler);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
+ dispatch_block_t barrier) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
+ Release(thr, pc, (uptr)connection);
+ dispatch_block_t new_barrier = ^() {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ barrier();
+ };
+ REAL(xpc_connection_send_barrier)(connection, new_barrier);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
+ xpc_connection_t connection, xpc_object_t message,
+ dispatch_queue_t replyq, xpc_handler_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
+ message, replyq, handler);
+ Release(thr, pc, (uptr)connection);
+ xpc_handler_t new_handler = ^(xpc_object_t object) {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ handler(object);
+ };
+ REAL(xpc_connection_send_message_with_reply)
+ (connection, message, replyq, new_handler);
+}
+
+// On macOS, libc++ is always linked dynamically, so intercepting works the
+// usual way.
+#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+
+namespace {
+struct fake_shared_weak_count {
+ volatile a64 shared_owners;
+ volatile a64 shared_weak_owners;
+ virtual void _unused_0x0() = 0;
+ virtual void _unused_0x8() = 0;
+ virtual void on_zero_shared() = 0;
+ virtual void _unused_0x18() = 0;
+ virtual void on_zero_shared_weak() = 0;
+};
+} // namespace
+
+// This adds a libc++ interceptor for:
+// void __shared_weak_count::__release_shared() _NOEXCEPT;
+// Shared and weak pointers in C++ maintain reference counts via atomics in
+// libc++.dylib, which are TSan-invisible, and this leads to false positives in
+// destructor code. This interceptor re-implements the whole function so that
+// the mo_acq_rel semantics of the atomic decrement are visible.
+//
+// Unfortunately, this interceptor cannot simply Acquire/Release some sync
+// object and call the original function, because it would have a race between
+// the sync and the destruction of the object. Calling both under a lock will
+// not work because the destructor can invoke this interceptor again (and even
+// in a different thread, so recursive locks don't help).
+STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
+ o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
+ 0) {
+ Acquire(thr, pc, (uptr)&o->shared_weak_owners);
+ o->on_zero_shared_weak();
+ }
+ }
+}
+
+namespace {
+struct call_once_callback_args {
+ void (*orig_func)(void *arg);
+ void *orig_arg;
+ void *flag;
+};
+
+void call_once_callback_wrapper(void *arg) {
+ call_once_callback_args *new_args = (call_once_callback_args *)arg;
+ new_args->orig_func(new_args->orig_arg);
+ __tsan_release(new_args->flag);
+}
+} // namespace
+
+// This adds a libc++ interceptor for:
+// void __call_once(volatile unsigned long&, void*, void(*)(void*));
+// C++11 call_once is implemented via an internal function __call_once which is
+// inside libc++.dylib, and the atomic release store inside it is thus
+// TSan-invisible. To avoid false positives, this interceptor wraps the callback
+// function and performs an explicit Release after the user code has run.
+STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
+ void *arg, void (*func)(void *arg)) {
+ call_once_callback_args new_args = {func, arg, flag};
+ REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
+ call_once_callback_wrapper);
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_MAC
diff --git a/libsanitizer/tsan/tsan_interface.h b/libsanitizer/tsan/tsan_interface.h
index 4ff6fa1831e..066dde6f543 100644
--- a/libsanitizer/tsan/tsan_interface.h
+++ b/libsanitizer/tsan/tsan_interface.h
@@ -15,6 +15,7 @@
#define TSAN_INTERFACE_H
#include <sanitizer_common/sanitizer_internal_defs.h>
+using __sanitizer::uptr;
// This header should NOT include any other headers.
// All functions in this header are extern "C" and start with __tsan_.
@@ -23,6 +24,8 @@
extern "C" {
#endif
+#if !SANITIZER_GO
+
// This function should be called at the very beginning of the process,
// before any instrumented code is executed and before any call to malloc.
SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
@@ -73,8 +76,296 @@ void __tsan_read_range(void *addr, unsigned long size); // NOLINT
SANITIZER_INTERFACE_ATTRIBUTE
void __tsan_write_range(void *addr, unsigned long size); // NOLINT
+// User may provide function that would be called right when TSan detects
+// an error. The argument 'report' is an opaque pointer that can be used to
+// gather additional information using other TSan report API functions.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_on_report(void *report);
+
+// If TSan is currently reporting a detected issue on the current thread,
+// returns an opaque pointer to the current report. Otherwise returns NULL.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report();
+
+// Returns a report's description (issue type), number of duplicate issues
+// found, counts of array data (stack traces, memory operations, locations,
+// mutexes, threads, unique thread IDs) and a stack trace of a sleep() call (if
+// one was involved in the issue).
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size);
+
+// Returns information about stack traces included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size);
+
+// Returns information about memory operations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size);
+
+// Returns information about locations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size);
+
+// Returns information about mutexes included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size);
+
+// Returns information about threads included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, uptr *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size);
+
+// Returns information about unique thread IDs included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
+
+#endif // SANITIZER_GO
+
#ifdef __cplusplus
} // extern "C"
#endif
+namespace __tsan {
+
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16; // NOLINT
+typedef unsigned int a32;
+typedef unsigned long long a64; // NOLINT
+#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
+
+// Part of ABI, do not change.
+// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
+
+struct ThreadState;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_thread_fence(morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_signal_fence(morder mo);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+} // extern "C"
+
+} // namespace __tsan
+
#endif // TSAN_INTERFACE_H
diff --git a/libsanitizer/tsan/tsan_interface_atomic.cc b/libsanitizer/tsan/tsan_interface_atomic.cc
index 0ded1aa1099..5c5c34f3b87 100644
--- a/libsanitizer/tsan/tsan_interface_atomic.cc
+++ b/libsanitizer/tsan/tsan_interface_atomic.cc
@@ -21,39 +21,16 @@
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "tsan_flags.h"
+#include "tsan_interface.h"
#include "tsan_rtl.h"
using namespace __tsan; // NOLINT
-// These should match declarations from public tsan_interface_atomic.h header.
-typedef unsigned char a8;
-typedef unsigned short a16; // NOLINT
-typedef unsigned int a32;
-typedef unsigned long long a64; // NOLINT
-#if !defined(SANITIZER_GO) && (defined(__SIZEOF_INT128__) \
- || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
-__extension__ typedef __int128 a128;
-# define __TSAN_HAS_INT128 1
-#else
-# define __TSAN_HAS_INT128 0
-#endif
-
-#if !defined(SANITIZER_GO) && __TSAN_HAS_INT128
+#if !SANITIZER_GO && __TSAN_HAS_INT128
// Protects emulation of 128-bit atomic operations.
static StaticSpinMutex mutex128;
#endif
-// Part of ABI, do not change.
-// http://llvm.org/viewvc/llvm-project/libcxx/trunk/include/atomic?view=markup
-typedef enum {
- mo_relaxed,
- mo_consume,
- mo_acquire,
- mo_release,
- mo_acq_rel,
- mo_seq_cst
-} morder;
-
static bool IsLoadOrder(morder mo) {
return mo == mo_relaxed || mo == mo_consume
|| mo == mo_acquire || mo == mo_seq_cst;
@@ -123,7 +100,7 @@ template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
// Atomic ops are executed under tsan internal mutex,
// here we assume that the atomic variables are not accessed
// from non-instrumented code.
-#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !defined(SANITIZER_GO) \
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
&& __TSAN_HAS_INT128
a128 func_xchg(volatile a128 *v, a128 op) {
SpinMutexLock lock(&mutex128);
@@ -197,7 +174,7 @@ static int SizeLog() {
// this leads to false negatives only in very obscure cases.
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static atomic_uint8_t *to_atomic(const volatile a8 *a) {
return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
}
@@ -233,7 +210,7 @@ static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
return atomic_load(to_atomic(a), to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
SpinMutexLock lock(&mutex128);
return *a;
@@ -263,7 +240,7 @@ static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
atomic_store(to_atomic(a), v, to_mo(mo));
}
-#if __TSAN_HAS_INT128 && !defined(SANITIZER_GO)
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
SpinMutexLock lock(&mutex128);
*a = v;
@@ -455,7 +432,7 @@ static T AtomicCAS(ThreadState *thr, uptr pc,
return c;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void NoTsanAtomicFence(morder mo) {
__sync_synchronize();
}
@@ -467,7 +444,7 @@ static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
#endif
// Interface functions follow.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// C/C++
@@ -866,7 +843,7 @@ void __tsan_atomic_signal_fence(morder mo) {
}
} // extern "C"
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
// Go
@@ -949,4 +926,4 @@ void __tsan_go_atomic64_compare_exchange(
*(bool*)(a+24) = (cur == cmp);
}
} // extern "C"
-#endif // #ifndef SANITIZER_GO
+#endif // #if !SANITIZER_GO
diff --git a/libsanitizer/tsan/tsan_interface_java.cc b/libsanitizer/tsan/tsan_interface_java.cc
index 934ccc3ef9c..d1638f2aa9d 100644
--- a/libsanitizer/tsan/tsan_interface_java.cc
+++ b/libsanitizer/tsan/tsan_interface_java.cc
@@ -109,7 +109,7 @@ void __tsan_java_free(jptr ptr, jptr size) {
CHECK_GE(ptr, jctx->heap_begin);
CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
- ctx->metamap.FreeRange(thr, pc, ptr, size);
+ ctx->metamap.FreeRange(thr->proc(), ptr, size);
}
void __tsan_java_move(jptr src, jptr dst, jptr size) {
diff --git a/libsanitizer/tsan/tsan_libdispatch_mac.cc b/libsanitizer/tsan/tsan_libdispatch_mac.cc
index 5b39665d5d2..10c70a831c6 100644
--- a/libsanitizer/tsan/tsan_libdispatch_mac.cc
+++ b/libsanitizer/tsan/tsan_libdispatch_mac.cc
@@ -19,11 +19,205 @@
#include "tsan_platform.h"
#include "tsan_rtl.h"
+#include <Block.h>
#include <dispatch/dispatch.h>
#include <pthread.h>
+typedef long long_t; // NOLINT
+
namespace __tsan {
+typedef struct {
+ dispatch_queue_t queue;
+ void *orig_context;
+ dispatch_function_t orig_work;
+ bool free_context_in_callback;
+ bool submitted_synchronously;
+ bool is_barrier_block;
+ uptr non_queue_sync_object;
+} tsan_block_context_t;
+
+// The offsets of different fields of the dispatch_queue_t structure, exported
+// by libdispatch.dylib.
+extern "C" struct dispatch_queue_offsets_s {
+ const uint16_t dqo_version;
+ const uint16_t dqo_label;
+ const uint16_t dqo_label_size;
+ const uint16_t dqo_flags;
+ const uint16_t dqo_flags_size;
+ const uint16_t dqo_serialnum;
+ const uint16_t dqo_serialnum_size;
+ const uint16_t dqo_width;
+ const uint16_t dqo_width_size;
+ const uint16_t dqo_running;
+ const uint16_t dqo_running_size;
+ const uint16_t dqo_suspend_cnt;
+ const uint16_t dqo_suspend_cnt_size;
+ const uint16_t dqo_target_queue;
+ const uint16_t dqo_target_queue_size;
+ const uint16_t dqo_priority;
+ const uint16_t dqo_priority_size;
+} dispatch_queue_offsets;
+
+static bool IsQueueSerial(dispatch_queue_t q) {
+ CHECK_EQ(dispatch_queue_offsets.dqo_width_size, 2);
+ uptr width = *(uint16_t *)(((uptr)q) + dispatch_queue_offsets.dqo_width);
+ CHECK_NE(width, 0);
+ return width == 1;
+}
+
+static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+ CHECK_EQ(dispatch_queue_offsets.dqo_target_queue_size, 8);
+ dispatch_queue_t target_queue =
+ *(dispatch_queue_t *)(((uptr)source) +
+ dispatch_queue_offsets.dqo_target_queue);
+ CHECK_NE(target_queue, 0);
+ return target_queue;
+}
+
+static tsan_block_context_t *AllocContext(ThreadState *thr, uptr pc,
+ dispatch_queue_t queue,
+ void *orig_context,
+ dispatch_function_t orig_work) {
+ tsan_block_context_t *new_context =
+ (tsan_block_context_t *)user_alloc(thr, pc, sizeof(tsan_block_context_t));
+ new_context->queue = queue;
+ new_context->orig_context = orig_context;
+ new_context->orig_work = orig_work;
+ new_context->free_context_in_callback = true;
+ new_context->submitted_synchronously = false;
+ new_context->is_barrier_block = false;
+ return new_context;
+}
+
+static void dispatch_callback_wrap(void *param) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_callback_wrap);
+ tsan_block_context_t *context = (tsan_block_context_t *)param;
+ bool is_queue_serial = context->queue && IsQueueSerial(context->queue);
+ uptr sync_ptr = (uptr)context->queue ?: context->non_queue_sync_object;
+
+ uptr serial_sync = (uptr)sync_ptr;
+ uptr concurrent_sync = ((uptr)sync_ptr) + sizeof(uptr);
+ uptr submit_sync = (uptr)context;
+ bool serial_task = context->is_barrier_block || is_queue_serial;
+
+ Acquire(thr, pc, submit_sync);
+ Acquire(thr, pc, serial_sync);
+ if (serial_task) Acquire(thr, pc, concurrent_sync);
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ context->orig_work(context->orig_context);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+
+ Release(thr, pc, serial_task ? serial_sync : concurrent_sync);
+ if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+
+ if (context->free_context_in_callback) user_free(thr, pc, context);
+}
+
+static void invoke_block(void *param) {
+ dispatch_block_t block = (dispatch_block_t)param;
+ block();
+}
+
+static void invoke_and_release_block(void *param) {
+ dispatch_block_t block = (dispatch_block_t)param;
+ block();
+ Block_release(block);
+}
+
+#define DISPATCH_INTERCEPT_B(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ dispatch_block_t heap_block = Block_copy(block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ tsan_block_context_t *new_context = \
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block); \
+ new_context->is_barrier_block = barrier; \
+ Release(thr, pc, (uptr)new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name##_f)(q, new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ }
+
+#define DISPATCH_INTERCEPT_SYNC_B(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ dispatch_block_t heap_block = Block_copy(block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ tsan_block_context_t new_context = { \
+ q, heap_block, &invoke_and_release_block, false, true, barrier, 0}; \
+ Release(thr, pc, (uptr)&new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name##_f)(q, &new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ Acquire(thr, pc, (uptr)&new_context); \
+ }
+
+#define DISPATCH_INTERCEPT_F(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \
+ dispatch_function_t work) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \
+ tsan_block_context_t *new_context = \
+ AllocContext(thr, pc, q, context, work); \
+ new_context->is_barrier_block = barrier; \
+ Release(thr, pc, (uptr)new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name)(q, new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ }
+
+#define DISPATCH_INTERCEPT_SYNC_F(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \
+ dispatch_function_t work) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \
+ tsan_block_context_t new_context = { \
+ q, context, work, false, true, barrier, 0}; \
+ Release(thr, pc, (uptr)&new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name)(q, &new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ Acquire(thr, pc, (uptr)&new_context); \
+ }
+
+// We wrap dispatch_async, dispatch_sync and friends where we allocate a new
+// context, which is used to synchronize (we release the context before
+// submitting, and the callback acquires it before executing the original
+// callback).
+DISPATCH_INTERCEPT_B(dispatch_async, false)
+DISPATCH_INTERCEPT_B(dispatch_barrier_async, true)
+DISPATCH_INTERCEPT_F(dispatch_async_f, false)
+DISPATCH_INTERCEPT_F(dispatch_barrier_async_f, true)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_sync, false)
+DISPATCH_INTERCEPT_SYNC_B(dispatch_barrier_sync, true)
+DISPATCH_INTERCEPT_SYNC_F(dispatch_sync_f, false)
+DISPATCH_INTERCEPT_SYNC_F(dispatch_barrier_sync_f, true)
+
+TSAN_INTERCEPTOR(void, dispatch_after, dispatch_time_t when,
+ dispatch_queue_t queue, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_after, when, queue, block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ tsan_block_context_t *new_context =
+ AllocContext(thr, pc, queue, heap_block, &invoke_and_release_block);
+ Release(thr, pc, (uptr)new_context);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_after_f)(when, queue, new_context, dispatch_callback_wrap);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+}
+
+TSAN_INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
+ dispatch_queue_t queue, void *context,
+ dispatch_function_t work) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_after_f, when, queue, context, work);
+ WRAP(dispatch_after)(when, queue, ^(void) {
+ work(context);
+ });
+}
+
// GCD's dispatch_once implementation has a fast path that contains a racy read
// and it's inlined into user's code. Furthermore, this fast path doesn't
// establish a proper happens-before relations between the initialization and
@@ -39,12 +233,14 @@ namespace __tsan {
#undef dispatch_once
TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
dispatch_block_t block) {
- SCOPED_TSAN_INTERCEPTOR(dispatch_once, predicate, block);
+ SCOPED_INTERCEPTOR_RAW(dispatch_once, predicate, block);
atomic_uint32_t *a = reinterpret_cast<atomic_uint32_t *>(predicate);
u32 v = atomic_load(a, memory_order_acquire);
if (v == 0 &&
atomic_compare_exchange_strong(a, &v, 1, memory_order_relaxed)) {
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
block();
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
Release(thr, pc, (uptr)a);
atomic_store(a, 2, memory_order_release);
} else {
@@ -59,10 +255,423 @@ TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
#undef dispatch_once_f
TSAN_INTERCEPTOR(void, dispatch_once_f, dispatch_once_t *predicate,
void *context, dispatch_function_t function) {
- SCOPED_TSAN_INTERCEPTOR(dispatch_once_f, predicate, context, function);
+ SCOPED_INTERCEPTOR_RAW(dispatch_once_f, predicate, context, function);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
WRAP(dispatch_once)(predicate, ^(void) {
function(context);
});
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_semaphore_signal,
+ dispatch_semaphore_t dsema) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_semaphore_signal, dsema);
+ Release(thr, pc, (uptr)dsema);
+ return REAL(dispatch_semaphore_signal)(dsema);
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_semaphore_wait, dispatch_semaphore_t dsema,
+ dispatch_time_t timeout) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_semaphore_wait, dsema, timeout);
+ long_t result = REAL(dispatch_semaphore_wait)(dsema, timeout);
+ if (result == 0) Acquire(thr, pc, (uptr)dsema);
+ return result;
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_group_wait, dispatch_group_t group,
+ dispatch_time_t timeout) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_wait, group, timeout);
+ long_t result = REAL(dispatch_group_wait)(group, timeout);
+ if (result == 0) Acquire(thr, pc, (uptr)group);
+ return result;
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_leave, dispatch_group_t group) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_leave, group);
+ // Acquired in the group noticifaction callback in dispatch_group_notify[_f].
+ Release(thr, pc, (uptr)group);
+ REAL(dispatch_group_leave)(group);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_async, dispatch_group_t group,
+ dispatch_queue_t queue, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_async, group, queue, block);
+ dispatch_retain(group);
+ dispatch_group_enter(group);
+ __block dispatch_block_t block_copy = (dispatch_block_t)_Block_copy(block);
+ WRAP(dispatch_async)(queue, ^(void) {
+ block_copy();
+ _Block_release(block_copy);
+ WRAP(dispatch_group_leave)(group);
+ dispatch_release(group);
+ });
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t queue, void *context,
+ dispatch_function_t work) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_async_f, group, queue, context, work);
+ dispatch_retain(group);
+ dispatch_group_enter(group);
+ WRAP(dispatch_async)(queue, ^(void) {
+ work(context);
+ WRAP(dispatch_group_leave)(group);
+ dispatch_release(group);
+ });
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_notify, dispatch_group_t group,
+ dispatch_queue_t q, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_notify, group, q, block);
+
+ // To make sure the group is still available in the callback (otherwise
+ // it can be already destroyed). Will be released in the callback.
+ dispatch_retain(group);
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(^(void) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_read_callback);
+ // Released when leaving the group (dispatch_group_leave).
+ Acquire(thr, pc, (uptr)group);
+ }
+ dispatch_release(group);
+ block();
+ });
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ tsan_block_context_t *new_context =
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block);
+ new_context->is_barrier_block = true;
+ Release(thr, pc, (uptr)new_context);
+ REAL(dispatch_group_notify_f)(group, q, new_context, dispatch_callback_wrap);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_notify_f, dispatch_group_t group,
+ dispatch_queue_t q, void *context, dispatch_function_t work) {
+ WRAP(dispatch_group_notify)(group, q, ^(void) { work(context); });
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_event_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_event_handler, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_event_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block tsan_block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0 };
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_event_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_event_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_event_handler_f, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_event_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_event_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_cancel_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_cancel_handler, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_cancel_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block tsan_block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0};
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_cancel_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_cancel_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_cancel_handler_f, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_cancel_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_cancel_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_registration_handler, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_registration_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block tsan_block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0};
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_registration_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_registration_handler_f, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_registration_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_registration_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_apply, size_t iterations,
+ dispatch_queue_t queue, void (^block)(size_t)) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_apply, iterations, queue, block);
+
+ void *parent_to_child_sync = nullptr;
+ uptr parent_to_child_sync_uptr = (uptr)&parent_to_child_sync;
+ void *child_to_parent_sync = nullptr;
+ uptr child_to_parent_sync_uptr = (uptr)&child_to_parent_sync;
+
+ Release(thr, pc, parent_to_child_sync_uptr);
+ void (^new_block)(size_t) = ^(size_t iteration) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_apply);
+ Acquire(thr, pc, parent_to_child_sync_uptr);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ block(iteration);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Release(thr, pc, child_to_parent_sync_uptr);
+ };
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_apply)(iterations, queue, new_block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Acquire(thr, pc, child_to_parent_sync_uptr);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_apply_f, size_t iterations,
+ dispatch_queue_t queue, void *context,
+ void (*work)(void *, size_t)) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_apply_f, iterations, queue, context, work);
+ void (^new_block)(size_t) = ^(size_t iteration) {
+ work(context, iteration);
+ };
+ WRAP(dispatch_apply)(iterations, queue, new_block);
+}
+
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+DECLARE_REAL_AND_INTERCEPTOR(int, munmap, void *addr, long_t sz)
+
+TSAN_INTERCEPTOR(dispatch_data_t, dispatch_data_create, const void *buffer,
+ size_t size, dispatch_queue_t q, dispatch_block_t destructor) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_data_create, buffer, size, q, destructor);
+ if ((q == nullptr) || (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT))
+ return REAL(dispatch_data_create)(buffer, size, q, destructor);
+
+ if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE)
+ destructor = ^(void) { WRAP(free)((void *)buffer); };
+ else if (destructor == DISPATCH_DATA_DESTRUCTOR_MUNMAP)
+ destructor = ^(void) { WRAP(munmap)((void *)buffer, size); };
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(destructor);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ tsan_block_context_t *new_context =
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block);
+ uptr submit_sync = (uptr)new_context;
+ Release(thr, pc, submit_sync);
+ return REAL(dispatch_data_create)(buffer, size, q, ^(void) {
+ dispatch_callback_wrap(new_context);
+ });
+}
+
+typedef void (^fd_handler_t)(dispatch_data_t data, int error);
+typedef void (^cleanup_handler_t)(int error);
+
+TSAN_INTERCEPTOR(void, dispatch_read, dispatch_fd_t fd, size_t length,
+ dispatch_queue_t q, fd_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_read, fd, length, q, h);
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ fd_handler_t new_h = Block_copy(^(dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_read)(fd, length, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_write, dispatch_fd_t fd, dispatch_data_t data,
+ dispatch_queue_t q, fd_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_write, fd, data, q, h);
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ fd_handler_t new_h = Block_copy(^(dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_write)(fd, data, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_read, dispatch_io_t channel, off_t offset,
+ size_t length, dispatch_queue_t q, dispatch_io_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_read, channel, offset, length, q, h);
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ dispatch_io_handler_t new_h =
+ Block_copy(^(bool done, dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(done, data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_read)(channel, offset, length, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_write, dispatch_io_t channel, off_t offset,
+ dispatch_data_t data, dispatch_queue_t q,
+ dispatch_io_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_write, channel, offset, data, q, h);
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ dispatch_io_handler_t new_h =
+ Block_copy(^(bool done, dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(done, data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_write)(channel, offset, data, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_barrier, dispatch_io_t channel,
+ dispatch_block_t barrier) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_barrier, channel, barrier);
+ __block tsan_block_context_t new_context = {
+ nullptr, nullptr, &invoke_block, false, false, false, 0};
+ new_context.non_queue_sync_object = (uptr)channel;
+ new_context.is_barrier_block = true;
+ dispatch_block_t new_block = Block_copy(^(void) {
+ new_context.orig_context = ^(void) {
+ barrier();
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_barrier)(channel, new_block);
+ Block_release(new_block);
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create, dispatch_io_type_t type,
+ dispatch_fd_t fd, dispatch_queue_t q, cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create, type, fd, q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel = REAL(dispatch_io_create)(type, fd, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create_with_path,
+ dispatch_io_type_t type, const char *path, int oflag,
+ mode_t mode, dispatch_queue_t q, cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create_with_path, type, path, oflag, mode,
+ q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel =
+ REAL(dispatch_io_create_with_path)(type, path, oflag, mode, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create_with_io,
+ dispatch_io_type_t type, dispatch_io_t io, dispatch_queue_t q,
+ cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create_with_io, type, io, q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block tsan_block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel = REAL(dispatch_io_create_with_io)(type, io, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_close, dispatch_io_t channel,
+ dispatch_io_close_flags_t flags) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_close, channel, flags);
+ Release(thr, pc, (uptr)channel); // Acquire() in dispatch_io_create[_*].
+ return REAL(dispatch_io_close)(channel, flags);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_malloc_mac.cc b/libsanitizer/tsan/tsan_malloc_mac.cc
index 97090773ca7..cc1031a584f 100644
--- a/libsanitizer/tsan/tsan_malloc_mac.cc
+++ b/libsanitizer/tsan/tsan_malloc_mac.cc
@@ -25,41 +25,32 @@ using namespace __tsan;
#define COMMON_MALLOC_MEMALIGN(alignment, size) \
void *p = \
user_alloc(cur_thread(), StackTrace::GetCurrentPc(), size, alignment)
-#define COMMON_MALLOC_MALLOC(size) \
- if (cur_thread()->in_symbolizer) \
- return REAL(malloc)(size); \
- SCOPED_INTERCEPTOR_RAW(malloc, size); \
+#define COMMON_MALLOC_MALLOC(size) \
+ if (cur_thread()->in_symbolizer) return InternalAlloc(size); \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
void *p = user_alloc(thr, pc, size)
-#define COMMON_MALLOC_REALLOC(ptr, size) \
- if (cur_thread()->in_symbolizer) \
- return REAL(realloc)(ptr, size); \
- SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ if (cur_thread()->in_symbolizer) return InternalRealloc(ptr, size); \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
void *p = user_realloc(thr, pc, ptr, size)
-#define COMMON_MALLOC_CALLOC(count, size) \
- if (cur_thread()->in_symbolizer) \
- return REAL(calloc)(count, size); \
- SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+#define COMMON_MALLOC_CALLOC(count, size) \
+ if (cur_thread()->in_symbolizer) return InternalCalloc(count, size); \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
void *p = user_calloc(thr, pc, size, count)
-#define COMMON_MALLOC_VALLOC(size) \
- if (cur_thread()->in_symbolizer) \
- return REAL(valloc)(size); \
- SCOPED_INTERCEPTOR_RAW(valloc, size); \
+#define COMMON_MALLOC_VALLOC(size) \
+ if (cur_thread()->in_symbolizer) \
+ return InternalAlloc(size, nullptr, GetPageSizeCached()); \
+ SCOPED_INTERCEPTOR_RAW(valloc, size); \
void *p = user_alloc(thr, pc, size, GetPageSizeCached())
-#define COMMON_MALLOC_FREE(ptr) \
- if (cur_thread()->in_symbolizer) \
- return REAL(free)(ptr); \
- SCOPED_INTERCEPTOR_RAW(free, ptr); \
+#define COMMON_MALLOC_FREE(ptr) \
+ if (cur_thread()->in_symbolizer) return InternalFree(ptr); \
+ SCOPED_INTERCEPTOR_RAW(free, ptr); \
user_free(thr, pc, ptr)
-#define COMMON_MALLOC_SIZE(ptr) \
- uptr size = user_alloc_usable_size(ptr);
+#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
#define COMMON_MALLOC_FILL_STATS(zone, stats)
#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
(void)zone_name; \
Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
-#define COMMON_MALLOC_IGNORE_INVALID_FREE false
-#define COMMON_MALLOC_REPORT_FREE_UNALLOCATED(ptr, zone_ptr, zone_name) \
- (void)zone_name; \
- Report("free_common(%p) -- attempting to free unallocated memory.\n", ptr);
#define COMMON_MALLOC_NAMESPACE __tsan
#include "sanitizer_common/sanitizer_malloc_mac.inc"
diff --git a/libsanitizer/tsan/tsan_mman.cc b/libsanitizer/tsan/tsan_mman.cc
index c8a5a6a264a..152c2de28d8 100644
--- a/libsanitizer/tsan/tsan_mman.cc
+++ b/libsanitizer/tsan/tsan_mman.cc
@@ -17,12 +17,14 @@
#include "tsan_flags.h"
// May be overriden by front-end.
-extern "C" void WEAK __sanitizer_malloc_hook(void *ptr, uptr size) {
+SANITIZER_WEAK_DEFAULT_IMPL
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
(void)ptr;
(void)size;
}
-extern "C" void WEAK __sanitizer_free_hook(void *ptr) {
+SANITIZER_WEAK_DEFAULT_IMPL
+void __sanitizer_free_hook(void *ptr) {
(void)ptr;
}
@@ -50,7 +52,7 @@ struct MapUnmapCallback {
diff = p + size - RoundDown(p + size, kPageSize);
if (diff != 0)
size -= diff;
- FlushUnneededShadowMemory((uptr)MemToMeta(p), size / kMetaRatio);
+ ReleaseMemoryToOS((uptr)MemToMeta(p), size / kMetaRatio);
}
};
@@ -59,18 +61,69 @@ Allocator *allocator() {
return reinterpret_cast<Allocator*>(&allocator_placeholder);
}
+struct GlobalProc {
+ Mutex mtx;
+ Processor *proc;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
+ , proc(ProcCreate()) {
+ }
+};
+
+static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
+GlobalProc *global_proc() {
+ return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
+}
+
+ScopedGlobalProcessor::ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc())
+ return;
+ // If we don't have a proc, use the global one.
+ // There are currently only two known case where this path is triggered:
+ // __interceptor_free
+ // __nptl_deallocate_tsd
+ // start_thread
+ // clone
+ // and:
+ // ResetRange
+ // __interceptor_munmap
+ // __deallocate_stack
+ // start_thread
+ // clone
+ // Ideally, we destroy thread state (and unwire proc) when a thread actually
+ // exits (i.e. when we join/wait it). Then we would not need the global proc
+ gp->mtx.Lock();
+ ProcWire(gp->proc, thr);
+}
+
+ScopedGlobalProcessor::~ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc() != gp->proc)
+ return;
+ ProcUnwire(gp->proc, thr);
+ gp->mtx.Unlock();
+}
+
void InitializeAllocator() {
allocator()->Init(common_flags()->allocator_may_return_null);
}
-void AllocatorThreadStart(ThreadState *thr) {
- allocator()->InitCache(&thr->alloc_cache);
- internal_allocator()->InitCache(&thr->internal_alloc_cache);
+void InitializeAllocatorLate() {
+ new(global_proc()) GlobalProc();
+}
+
+void AllocatorProcStart(Processor *proc) {
+ allocator()->InitCache(&proc->alloc_cache);
+ internal_allocator()->InitCache(&proc->internal_alloc_cache);
}
-void AllocatorThreadFinish(ThreadState *thr) {
- allocator()->DestroyCache(&thr->alloc_cache);
- internal_allocator()->DestroyCache(&thr->internal_alloc_cache);
+void AllocatorProcFinish(Processor *proc) {
+ allocator()->DestroyCache(&proc->alloc_cache);
+ internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
}
void AllocatorPrintStats() {
@@ -93,8 +146,8 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
- return allocator()->ReturnNullOrDie();
- void *p = allocator()->Allocate(&thr->alloc_cache, sz, align);
+ return allocator()->ReturnNullOrDieOnBadRequest();
+ void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
if (ctx && ctx->initialized)
@@ -106,7 +159,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (CallocShouldReturnNullDueToOverflow(size, n))
- return allocator()->ReturnNullOrDie();
+ return allocator()->ReturnNullOrDieOnBadRequest();
void *p = user_alloc(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);
@@ -114,9 +167,10 @@ void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
}
void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ ScopedGlobalProcessor sgp;
if (ctx && ctx->initialized)
OnUserFree(thr, pc, (uptr)p, true);
- allocator()->Deallocate(&thr->alloc_cache, p);
+ allocator()->Deallocate(&thr->proc()->alloc_cache, p);
if (signal)
SignalUnsafeCall(thr, pc);
}
@@ -132,27 +186,23 @@ void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
CHECK_NE(p, (void*)0);
- uptr sz = ctx->metamap.FreeBlock(thr, pc, p);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
if (write && thr->ignore_reads_and_writes == 0)
MemoryRangeFreed(thr, pc, (uptr)p, sz);
}
void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
- void *p2 = 0;
// FIXME: Handle "shrinking" more efficiently,
// it seems that some software actually does this.
- if (sz) {
- p2 = user_alloc(thr, pc, sz);
- if (p2 == 0)
- return 0;
- if (p) {
- uptr oldsz = user_alloc_usable_size(p);
- internal_memcpy(p2, p, min(oldsz, sz));
- }
- }
- if (p)
+ void *p2 = user_alloc(thr, pc, sz);
+ if (p2 == 0)
+ return 0;
+ if (p) {
+ uptr oldsz = user_alloc_usable_size(p);
+ internal_memcpy(p2, p, min(oldsz, sz));
user_free(thr, pc, p);
+ }
return p2;
}
@@ -160,7 +210,11 @@ uptr user_alloc_usable_size(const void *p) {
if (p == 0)
return 0;
MBlock *b = ctx->metamap.GetBlock((uptr)p);
- return b ? b->siz : 0;
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
}
void invoke_malloc_hook(void *ptr, uptr size) {
@@ -168,6 +222,7 @@ void invoke_malloc_hook(void *ptr, uptr size) {
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
__sanitizer_malloc_hook(ptr, size);
+ RunMallocHooks(ptr, size);
}
void invoke_free_hook(void *ptr) {
@@ -175,6 +230,7 @@ void invoke_free_hook(void *ptr) {
if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
return;
__sanitizer_free_hook(ptr);
+ RunFreeHooks(ptr);
}
void *internal_alloc(MBlockType typ, uptr sz) {
@@ -183,7 +239,7 @@ void *internal_alloc(MBlockType typ, uptr sz) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
- return InternalAlloc(sz, &thr->internal_alloc_cache);
+ return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
}
void internal_free(void *p) {
@@ -192,7 +248,7 @@ void internal_free(void *p) {
thr->nomalloc = 0; // CHECK calls internal_malloc().
CHECK(0);
}
- InternalFree(p, &thr->internal_alloc_cache);
+ InternalFree(p, &thr->proc()->internal_alloc_cache);
}
} // namespace __tsan
@@ -234,8 +290,8 @@ uptr __sanitizer_get_allocated_size(const void *p) {
void __tsan_on_thread_idle() {
ThreadState *thr = cur_thread();
- allocator()->SwallowCache(&thr->alloc_cache);
- internal_allocator()->SwallowCache(&thr->internal_alloc_cache);
- ctx->metamap.OnThreadIdle(thr);
+ allocator()->SwallowCache(&thr->proc()->alloc_cache);
+ internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
+ ctx->metamap.OnProcIdle(thr->proc());
}
} // extern "C"
diff --git a/libsanitizer/tsan/tsan_mman.h b/libsanitizer/tsan/tsan_mman.h
index a7c91fae5e1..d49c9a9e9f2 100644
--- a/libsanitizer/tsan/tsan_mman.h
+++ b/libsanitizer/tsan/tsan_mman.h
@@ -18,9 +18,10 @@ namespace __tsan {
const uptr kDefaultAlignment = 16;
void InitializeAllocator();
+void InitializeAllocatorLate();
void ReplaceSystemMalloc();
-void AllocatorThreadStart(ThreadState *thr);
-void AllocatorThreadFinish(ThreadState *thr);
+void AllocatorProcStart(Processor *proc);
+void AllocatorProcFinish(Processor *proc);
void AllocatorPrintStats();
// For user allocations.
diff --git a/libsanitizer/tsan/tsan_mutex.cc b/libsanitizer/tsan/tsan_mutex.cc
index 3d3f6cc82b9..9b105cf201b 100644
--- a/libsanitizer/tsan/tsan_mutex.cc
+++ b/libsanitizer/tsan/tsan_mutex.cc
@@ -41,6 +41,7 @@ static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
/*11 MutexTypeDDetector*/ {},
/*12 MutexTypeFired*/ {MutexTypeLeaf},
/*13 MutexTypeRacy*/ {MutexTypeLeaf},
+ /*14 MutexTypeGlobalProc*/ {},
};
static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
diff --git a/libsanitizer/tsan/tsan_mutex.h b/libsanitizer/tsan/tsan_mutex.h
index 9960eee9bc8..bd1000f0b29 100644
--- a/libsanitizer/tsan/tsan_mutex.h
+++ b/libsanitizer/tsan/tsan_mutex.h
@@ -32,6 +32,7 @@ enum MutexType {
MutexTypeDDetector,
MutexTypeFired,
MutexTypeRacy,
+ MutexTypeGlobalProc,
// This must be the last.
MutexTypeCount
diff --git a/libsanitizer/tsan/tsan_mutexset.h b/libsanitizer/tsan/tsan_mutexset.h
index d5406ae9f12..b2c60b9a826 100644
--- a/libsanitizer/tsan/tsan_mutexset.h
+++ b/libsanitizer/tsan/tsan_mutexset.h
@@ -41,7 +41,7 @@ class MutexSet {
}
private:
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr size_;
Desc descs_[kMaxSize];
#endif
@@ -53,7 +53,7 @@ class MutexSet {
// Go does not have mutexes, so do not spend memory and time.
// (Go sync.Mutex is actually a semaphore -- can be unlocked
// in different goroutine).
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
MutexSet::MutexSet() {}
void MutexSet::Add(u64 id, bool write, u64 epoch) {}
void MutexSet::Del(u64 id, bool write) {}
diff --git a/libsanitizer/tsan/tsan_new_delete.cc b/libsanitizer/tsan/tsan_new_delete.cc
index ba8474e0714..606cdd659f4 100644
--- a/libsanitizer/tsan/tsan_new_delete.cc
+++ b/libsanitizer/tsan/tsan_new_delete.cc
@@ -21,14 +21,10 @@ struct nothrow_t {};
DECLARE_REAL(void *, malloc, uptr size)
DECLARE_REAL(void, free, void *ptr)
-#if SANITIZER_MAC
-#define __libc_malloc REAL(malloc)
-#define __libc_free REAL(free)
-#endif
#define OPERATOR_NEW_BODY(mangled_name) \
if (cur_thread()->in_symbolizer) \
- return __libc_malloc(size); \
+ return InternalAlloc(size); \
void *p = 0; \
{ \
SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
@@ -64,7 +60,7 @@ void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
#define OPERATOR_DELETE_BODY(mangled_name) \
if (ptr == 0) return; \
if (cur_thread()->in_symbolizer) \
- return __libc_free(ptr); \
+ return InternalFree(ptr); \
invoke_free_hook(ptr); \
SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
user_free(thr, pc, ptr);
diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h
index f34f577ec03..368edc21ce4 100644
--- a/libsanitizer/tsan/tsan_platform.h
+++ b/libsanitizer/tsan/tsan_platform.h
@@ -22,38 +22,46 @@
namespace __tsan {
-#if !defined(SANITIZER_GO)
+#if !SANITIZER_GO
#if defined(__x86_64__)
/*
C/C++ on linux/x86_64 and freebsd/x86_64
-0000 0000 1000 - 0100 0000 0000: main binary and/or MAP_32BIT mappings
-0100 0000 0000 - 0200 0000 0000: -
-0200 0000 0000 - 1000 0000 0000: shadow
-1000 0000 0000 - 3000 0000 0000: -
+0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
+0040 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 2000 0000 0000: shadow
+2000 0000 0000 - 3000 0000 0000: -
3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
-4000 0000 0000 - 6000 0000 0000: -
+4000 0000 0000 - 5500 0000 0000: -
+5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5680 0000 0000 - 6000 0000 0000: -
6000 0000 0000 - 6200 0000 0000: traces
6200 0000 0000 - 7d00 0000 0000: -
-7d00 0000 0000 - 7e00 0000 0000: heap
-7e00 0000 0000 - 7e80 0000 0000: -
+7b00 0000 0000 - 7c00 0000 0000: heap
+7c00 0000 0000 - 7e80 0000 0000: -
7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
*/
-const uptr kMetaShadowBeg = 0x300000000000ull;
-const uptr kMetaShadowEnd = 0x400000000000ull;
-const uptr kTraceMemBeg = 0x600000000000ull;
-const uptr kTraceMemEnd = 0x620000000000ull;
-const uptr kShadowBeg = 0x020000000000ull;
-const uptr kShadowEnd = 0x100000000000ull;
-const uptr kHeapMemBeg = 0x7d0000000000ull;
-const uptr kHeapMemEnd = 0x7e0000000000ull;
-const uptr kLoAppMemBeg = 0x000000001000ull;
-const uptr kLoAppMemEnd = 0x010000000000ull;
-const uptr kHiAppMemBeg = 0x7e8000000000ull;
-const uptr kHiAppMemEnd = 0x800000000000ull;
-const uptr kAppMemMsk = 0x7c0000000000ull;
-const uptr kAppMemXor = 0x020000000000ull;
-const uptr kVdsoBeg = 0xf000000000000000ull;
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x200000000000ull;
+ static const uptr kHeapMemBeg = 0x7b0000000000ull;
+ static const uptr kHeapMemEnd = 0x7c0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kMidAppMemBeg = 0x550000000000ull;
+ static const uptr kMidAppMemEnd = 0x568000000000ull;
+ static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull;
+ static const uptr kAppMemMsk = 0x780000000000ull;
+ static const uptr kAppMemXor = 0x040000000000ull;
+ static const uptr kVdsoBeg = 0xf000000000000000ull;
+};
+
+#define TSAN_MID_APP_RANGE 1
#elif defined(__mips64)
/*
C/C++ on linux/mips64
@@ -69,129 +77,204 @@ fe00 0000 00 - ff00 0000 00: heap
ff00 0000 00 - ff80 0000 00: -
ff80 0000 00 - ffff ffff ff: modules and main thread stack
*/
-const uptr kMetaShadowBeg = 0x3000000000ull;
-const uptr kMetaShadowEnd = 0x4000000000ull;
-const uptr kTraceMemBeg = 0x6000000000ull;
-const uptr kTraceMemEnd = 0x6200000000ull;
-const uptr kShadowBeg = 0x1400000000ull;
-const uptr kShadowEnd = 0x2400000000ull;
-const uptr kHeapMemBeg = 0xfe00000000ull;
-const uptr kHeapMemEnd = 0xff00000000ull;
-const uptr kLoAppMemBeg = 0x0100000000ull;
-const uptr kLoAppMemEnd = 0x0200000000ull;
-const uptr kHiAppMemBeg = 0xff80000000ull;
-const uptr kHiAppMemEnd = 0xffffffffffull;
-const uptr kAppMemMsk = 0xfc00000000ull;
-const uptr kAppMemXor = 0x0400000000ull;
-const uptr kVdsoBeg = 0xfffff00000ull;
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x4000000000ull;
+ static const uptr kMetaShadowEnd = 0x5000000000ull;
+ static const uptr kTraceMemBeg = 0xb000000000ull;
+ static const uptr kTraceMemEnd = 0xb200000000ull;
+ static const uptr kShadowBeg = 0x2400000000ull;
+ static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kHeapMemBeg = 0xfe00000000ull;
+ static const uptr kHeapMemEnd = 0xff00000000ull;
+ static const uptr kLoAppMemBeg = 0x0100000000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kMidAppMemBeg = 0xaa00000000ull;
+ static const uptr kMidAppMemEnd = 0xab00000000ull;
+ static const uptr kHiAppMemBeg = 0xff80000000ull;
+ static const uptr kHiAppMemEnd = 0xffffffffffull;
+ static const uptr kAppMemMsk = 0xf800000000ull;
+ static const uptr kAppMemXor = 0x0800000000ull;
+ static const uptr kVdsoBeg = 0xfffff00000ull;
+};
+
+#define TSAN_MID_APP_RANGE 1
#elif defined(__aarch64__)
-# if SANITIZER_AARCH64_VMA == 39
+// AArch64 supports multiple VMA which leads to multiple address transformation
+// functions. To support these multiple VMAS transformations and mappings TSAN
+// runtime for AArch64 uses an external memory read (vmaSize) to select which
+// mapping to use. Although slower, it make a same instrumented binary run on
+// multiple kernels.
+
/*
C/C++ on linux/aarch64 (39-bit VMA)
-0000 4000 00 - 0200 0000 00: main binary
-2000 0000 00 - 4000 0000 00: shadow memory
-4000 0000 00 - 5000 0000 00: metainfo
-5000 0000 00 - 6000 0000 00: -
+0000 0010 00 - 0100 0000 00: main binary
+0100 0000 00 - 0800 0000 00: -
+0800 0000 00 - 2000 0000 00: shadow memory
+2000 0000 00 - 3100 0000 00: -
+3100 0000 00 - 3400 0000 00: metainfo
+3400 0000 00 - 5500 0000 00: -
+5500 0000 00 - 5600 0000 00: main binary (PIE)
+5600 0000 00 - 6000 0000 00: -
6000 0000 00 - 6200 0000 00: traces
6200 0000 00 - 7d00 0000 00: -
-7d00 0000 00 - 7e00 0000 00: heap
-7e00 0000 00 - 7fff ffff ff: modules and main thread stack
+7c00 0000 00 - 7d00 0000 00: heap
+7d00 0000 00 - 7fff ffff ff: modules and main thread stack
*/
-const uptr kLoAppMemBeg = 0x0000400000ull;
-const uptr kLoAppMemEnd = 0x0200000000ull;
-const uptr kShadowBeg = 0x2000000000ull;
-const uptr kShadowEnd = 0x4000000000ull;
-const uptr kMetaShadowBeg = 0x4000000000ull;
-const uptr kMetaShadowEnd = 0x5000000000ull;
-const uptr kTraceMemBeg = 0x6000000000ull;
-const uptr kTraceMemEnd = 0x6200000000ull;
-const uptr kHeapMemBeg = 0x7d00000000ull;
-const uptr kHeapMemEnd = 0x7e00000000ull;
-const uptr kHiAppMemBeg = 0x7e00000000ull;
-const uptr kHiAppMemEnd = 0x7fffffffffull;
-const uptr kAppMemMsk = 0x7800000000ull;
-const uptr kAppMemXor = 0x0800000000ull;
-const uptr kVdsoBeg = 0x7f00000000ull;
-# elif SANITIZER_AARCH64_VMA == 42
+struct Mapping39 {
+ static const uptr kLoAppMemBeg = 0x0000001000ull;
+ static const uptr kLoAppMemEnd = 0x0100000000ull;
+ static const uptr kShadowBeg = 0x0800000000ull;
+ static const uptr kShadowEnd = 0x2000000000ull;
+ static const uptr kMetaShadowBeg = 0x3100000000ull;
+ static const uptr kMetaShadowEnd = 0x3400000000ull;
+ static const uptr kMidAppMemBeg = 0x5500000000ull;
+ static const uptr kMidAppMemEnd = 0x5600000000ull;
+ static const uptr kTraceMemBeg = 0x6000000000ull;
+ static const uptr kTraceMemEnd = 0x6200000000ull;
+ static const uptr kHeapMemBeg = 0x7c00000000ull;
+ static const uptr kHeapMemEnd = 0x7d00000000ull;
+ static const uptr kHiAppMemBeg = 0x7e00000000ull;
+ static const uptr kHiAppMemEnd = 0x7fffffffffull;
+ static const uptr kAppMemMsk = 0x7800000000ull;
+ static const uptr kAppMemXor = 0x0200000000ull;
+ static const uptr kVdsoBeg = 0x7f00000000ull;
+};
+
/*
C/C++ on linux/aarch64 (42-bit VMA)
-00000 4000 00 - 01000 0000 00: main binary
+00000 0010 00 - 01000 0000 00: main binary
01000 0000 00 - 10000 0000 00: -
10000 0000 00 - 20000 0000 00: shadow memory
20000 0000 00 - 26000 0000 00: -
26000 0000 00 - 28000 0000 00: metainfo
-28000 0000 00 - 36200 0000 00: -
+28000 0000 00 - 2aa00 0000 00: -
+2aa00 0000 00 - 2ab00 0000 00: main binary (PIE)
+2ab00 0000 00 - 36200 0000 00: -
36200 0000 00 - 36240 0000 00: traces
36240 0000 00 - 3e000 0000 00: -
3e000 0000 00 - 3f000 0000 00: heap
-3c000 0000 00 - 3ff00 0000 00: -
-3ff00 0000 00 - 3ffff f000 00: modules and main thread stack
+3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
*/
-const uptr kLoAppMemBeg = 0x00000400000ull;
-const uptr kLoAppMemEnd = 0x01000000000ull;
-const uptr kShadowBeg = 0x10000000000ull;
-const uptr kShadowEnd = 0x20000000000ull;
-const uptr kMetaShadowBeg = 0x26000000000ull;
-const uptr kMetaShadowEnd = 0x28000000000ull;
-const uptr kTraceMemBeg = 0x36200000000ull;
-const uptr kTraceMemEnd = 0x36400000000ull;
-const uptr kHeapMemBeg = 0x3e000000000ull;
-const uptr kHeapMemEnd = 0x3f000000000ull;
-const uptr kHiAppMemBeg = 0x3ff00000000ull;
-const uptr kHiAppMemEnd = 0x3fffff00000ull;
-const uptr kAppMemMsk = 0x3c000000000ull;
-const uptr kAppMemXor = 0x04000000000ull;
-const uptr kVdsoBeg = 0x37f00000000ull;
-# endif
-#endif
-
-ALWAYS_INLINE
-bool IsAppMem(uptr mem) {
- return (mem >= kHeapMemBeg && mem < kHeapMemEnd) ||
- (mem >= kLoAppMemBeg && mem < kLoAppMemEnd) ||
- (mem >= kHiAppMemBeg && mem < kHiAppMemEnd);
-}
-
-ALWAYS_INLINE
-bool IsShadowMem(uptr mem) {
- return mem >= kShadowBeg && mem <= kShadowEnd;
-}
-
-ALWAYS_INLINE
-bool IsMetaMem(uptr mem) {
- return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
-}
+struct Mapping42 {
+ static const uptr kLoAppMemBeg = 0x00000001000ull;
+ static const uptr kLoAppMemEnd = 0x01000000000ull;
+ static const uptr kShadowBeg = 0x10000000000ull;
+ static const uptr kShadowEnd = 0x20000000000ull;
+ static const uptr kMetaShadowBeg = 0x26000000000ull;
+ static const uptr kMetaShadowEnd = 0x28000000000ull;
+ static const uptr kMidAppMemBeg = 0x2aa00000000ull;
+ static const uptr kMidAppMemEnd = 0x2ab00000000ull;
+ static const uptr kTraceMemBeg = 0x36200000000ull;
+ static const uptr kTraceMemEnd = 0x36400000000ull;
+ static const uptr kHeapMemBeg = 0x3e000000000ull;
+ static const uptr kHeapMemEnd = 0x3f000000000ull;
+ static const uptr kHiAppMemBeg = 0x3f000000000ull;
+ static const uptr kHiAppMemEnd = 0x3ffffffffffull;
+ static const uptr kAppMemMsk = 0x3c000000000ull;
+ static const uptr kAppMemXor = 0x04000000000ull;
+ static const uptr kVdsoBeg = 0x37f00000000ull;
+};
-ALWAYS_INLINE
-uptr MemToShadow(uptr x) {
- DCHECK(IsAppMem(x));
- return (((x) & ~(kAppMemMsk | (kShadowCell - 1)))
- ^ kAppMemXor) * kShadowCnt;
-}
+struct Mapping48 {
+ static const uptr kLoAppMemBeg = 0x0000000001000ull;
+ static const uptr kLoAppMemEnd = 0x0000200000000ull;
+ static const uptr kShadowBeg = 0x0002000000000ull;
+ static const uptr kShadowEnd = 0x0004000000000ull;
+ static const uptr kMetaShadowBeg = 0x0005000000000ull;
+ static const uptr kMetaShadowEnd = 0x0006000000000ull;
+ static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
+ static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
+ static const uptr kTraceMemBeg = 0x0f06000000000ull;
+ static const uptr kTraceMemEnd = 0x0f06200000000ull;
+ static const uptr kHeapMemBeg = 0x0ffff00000000ull;
+ static const uptr kHeapMemEnd = 0x0ffff00000000ull;
+ static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
+ static const uptr kHiAppMemEnd = 0x1000000000000ull;
+ static const uptr kAppMemMsk = 0x0fff800000000ull;
+ static const uptr kAppMemXor = 0x0000800000000ull;
+ static const uptr kVdsoBeg = 0xffff000000000ull;
+};
-ALWAYS_INLINE
-u32 *MemToMeta(uptr x) {
- DCHECK(IsAppMem(x));
- return (u32*)(((((x) & ~(kAppMemMsk | (kMetaShadowCell - 1)))
- ^ kAppMemXor) / kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
-}
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+// Indicates that mapping defines a mid range memory segment.
+#define TSAN_MID_APP_RANGE 1
+#elif defined(__powerpc64__)
+// PPC64 supports multiple VMA which leads to multiple address transformation
+// functions. To support these multiple VMAS transformations and mappings TSAN
+// runtime for PPC64 uses an external memory read (vmaSize) to select which
+// mapping to use. Although slower, it make a same instrumented binary run on
+// multiple kernels.
-ALWAYS_INLINE
-uptr ShadowToMem(uptr s) {
- CHECK(IsShadowMem(s));
- if (s >= MemToShadow(kLoAppMemBeg) && s <= MemToShadow(kLoAppMemEnd - 1))
- return (s / kShadowCnt) ^ kAppMemXor;
- else
- return ((s / kShadowCnt) ^ kAppMemXor) | kAppMemMsk;
-}
+/*
+C/C++ on linux/powerpc64 (44-bit VMA)
+0000 0000 0100 - 0001 0000 0000: main binary
+0001 0000 0000 - 0001 0000 0000: -
+0001 0000 0000 - 0b00 0000 0000: shadow
+0b00 0000 0000 - 0b00 0000 0000: -
+0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects)
+0d00 0000 0000 - 0d00 0000 0000: -
+0d00 0000 0000 - 0f00 0000 0000: traces
+0f00 0000 0000 - 0f00 0000 0000: -
+0f00 0000 0000 - 0f50 0000 0000: heap
+0f50 0000 0000 - 0f60 0000 0000: -
+0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
+*/
+struct Mapping44 {
+ static const uptr kMetaShadowBeg = 0x0b0000000000ull;
+ static const uptr kMetaShadowEnd = 0x0d0000000000ull;
+ static const uptr kTraceMemBeg = 0x0d0000000000ull;
+ static const uptr kTraceMemEnd = 0x0f0000000000ull;
+ static const uptr kShadowBeg = 0x000100000000ull;
+ static const uptr kShadowEnd = 0x0b0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000000100ull;
+ static const uptr kLoAppMemEnd = 0x000100000000ull;
+ static const uptr kHeapMemBeg = 0x0f0000000000ull;
+ static const uptr kHeapMemEnd = 0x0f5000000000ull;
+ static const uptr kHiAppMemBeg = 0x0f6000000000ull;
+ static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits
+ static const uptr kAppMemMsk = 0x0f0000000000ull;
+ static const uptr kAppMemXor = 0x002100000000ull;
+ static const uptr kVdsoBeg = 0x3c0000000000000ull;
+};
-static USED uptr UserRegions[] = {
- kLoAppMemBeg, kLoAppMemEnd,
- kHiAppMemBeg, kHiAppMemEnd,
- kHeapMemBeg, kHeapMemEnd,
+/*
+C/C++ on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 3d00 0000 0000: -
+3d00 0000 0000 - 3e00 0000 0000: heap
+3e00 0000 0000 - 3e80 0000 0000: -
+3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
+*/
+struct Mapping46 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x3d0000000000ull;
+ static const uptr kHeapMemEnd = 0x3e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x3e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits
+ static const uptr kAppMemMsk = 0x3c0000000000ull;
+ static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
};
-#elif defined(SANITIZER_GO) && !SANITIZER_WINDOWS
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+#endif
+
+#elif SANITIZER_GO && !SANITIZER_WINDOWS
/* Go on linux, darwin and freebsd
0000 0000 1000 - 0000 1000 0000: executable
@@ -206,146 +289,529 @@ static USED uptr UserRegions[] = {
6200 0000 0000 - 8000 0000 0000: -
*/
-const uptr kMetaShadowBeg = 0x300000000000ull;
-const uptr kMetaShadowEnd = 0x400000000000ull;
-const uptr kTraceMemBeg = 0x600000000000ull;
-const uptr kTraceMemEnd = 0x620000000000ull;
-const uptr kShadowBeg = 0x200000000000ull;
-const uptr kShadowEnd = 0x238000000000ull;
-const uptr kAppMemBeg = 0x000000001000ull;
-const uptr kAppMemEnd = 0x00e000000000ull;
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+#elif SANITIZER_GO && SANITIZER_WINDOWS
+
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0500 0000 0000: shadow
+0500 0000 0000 - 0560 0000 0000: -
+0560 0000 0000 - 0760 0000 0000: traces
+0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x076000000000ull;
+ static const uptr kMetaShadowEnd = 0x07d000000000ull;
+ static const uptr kTraceMemBeg = 0x056000000000ull;
+ static const uptr kTraceMemEnd = 0x076000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x050000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+#else
+# error "Unknown platform"
+#endif
+
+
+#ifdef TSAN_RUNTIME_VMA
+extern uptr vmaSize;
+#endif
+
+
+enum MappingType {
+ MAPPING_LO_APP_BEG,
+ MAPPING_LO_APP_END,
+ MAPPING_HI_APP_BEG,
+ MAPPING_HI_APP_END,
+#ifdef TSAN_MID_APP_RANGE
+ MAPPING_MID_APP_BEG,
+ MAPPING_MID_APP_END,
+#endif
+ MAPPING_HEAP_BEG,
+ MAPPING_HEAP_END,
+ MAPPING_APP_BEG,
+ MAPPING_APP_END,
+ MAPPING_SHADOW_BEG,
+ MAPPING_SHADOW_END,
+ MAPPING_META_SHADOW_BEG,
+ MAPPING_META_SHADOW_END,
+ MAPPING_TRACE_BEG,
+ MAPPING_TRACE_END,
+ MAPPING_VDSO_BEG,
+};
+
+template<typename Mapping, int Type>
+uptr MappingImpl(void) {
+ switch (Type) {
+#if !SANITIZER_GO
+ case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
+ case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
+# ifdef TSAN_MID_APP_RANGE
+ case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg;
+ case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd;
+# endif
+ case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg;
+ case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd;
+ case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg;
+ case MAPPING_HEAP_END: return Mapping::kHeapMemEnd;
+ case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg;
+#else
+ case MAPPING_APP_BEG: return Mapping::kAppMemBeg;
+ case MAPPING_APP_END: return Mapping::kAppMemEnd;
+#endif
+ case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg;
+ case MAPPING_SHADOW_END: return Mapping::kShadowEnd;
+ case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg;
+ case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd;
+ case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg;
+ case MAPPING_TRACE_END: return Mapping::kTraceMemEnd;
+ }
+}
+template<int Type>
+uptr MappingArchImpl(void) {
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return MappingImpl<Mapping39, Type>();
+ case 42: return MappingImpl<Mapping42, Type>();
+ case 48: return MappingImpl<Mapping48, Type>();
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return MappingImpl<Mapping44, Type>();
+ else
+ return MappingImpl<Mapping46, Type>();
+ DCHECK(0);
+#else
+ return MappingImpl<Mapping, Type>();
+#endif
+}
+
+#if !SANITIZER_GO
ALWAYS_INLINE
-bool IsAppMem(uptr mem) {
- return mem >= kAppMemBeg && mem < kAppMemEnd;
+uptr LoAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_LO_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr LoAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_LO_APP_END>();
}
+#ifdef TSAN_MID_APP_RANGE
ALWAYS_INLINE
-bool IsShadowMem(uptr mem) {
- return mem >= kShadowBeg && mem <= kShadowEnd;
+uptr MidAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_MID_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr MidAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_MID_APP_END>();
}
+#endif
ALWAYS_INLINE
-bool IsMetaMem(uptr mem) {
- return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+uptr HeapMemBeg(void) {
+ return MappingArchImpl<MAPPING_HEAP_BEG>();
+}
+ALWAYS_INLINE
+uptr HeapMemEnd(void) {
+ return MappingArchImpl<MAPPING_HEAP_END>();
}
ALWAYS_INLINE
-uptr MemToShadow(uptr x) {
- DCHECK(IsAppMem(x));
- return ((x & ~(kShadowCell - 1)) * kShadowCnt) | kShadowBeg;
+uptr HiAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_HI_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr HiAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_HI_APP_END>();
}
ALWAYS_INLINE
-u32 *MemToMeta(uptr x) {
- DCHECK(IsAppMem(x));
- return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
- kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+uptr VdsoBeg(void) {
+ return MappingArchImpl<MAPPING_VDSO_BEG>();
}
+#else
+
ALWAYS_INLINE
-uptr ShadowToMem(uptr s) {
- CHECK(IsShadowMem(s));
- return (s & ~kShadowBeg) / kShadowCnt;
+uptr AppMemBeg(void) {
+ return MappingArchImpl<MAPPING_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr AppMemEnd(void) {
+ return MappingArchImpl<MAPPING_APP_END>();
}
-static USED uptr UserRegions[] = {
- kAppMemBeg, kAppMemEnd,
-};
+#endif
-#elif defined(SANITIZER_GO) && SANITIZER_WINDOWS
+static inline
+bool GetUserRegion(int i, uptr *start, uptr *end) {
+ switch (i) {
+ default:
+ return false;
+#if !SANITIZER_GO
+ case 0:
+ *start = LoAppMemBeg();
+ *end = LoAppMemEnd();
+ return true;
+ case 1:
+ *start = HiAppMemBeg();
+ *end = HiAppMemEnd();
+ return true;
+ case 2:
+ *start = HeapMemBeg();
+ *end = HeapMemEnd();
+ return true;
+# ifdef TSAN_MID_APP_RANGE
+ case 3:
+ *start = MidAppMemBeg();
+ *end = MidAppMemEnd();
+ return true;
+# endif
+#else
+ case 0:
+ *start = AppMemBeg();
+ *end = AppMemEnd();
+ return true;
+#endif
+ }
+}
-/* Go on windows
-0000 0000 1000 - 0000 1000 0000: executable
-0000 1000 0000 - 00f8 0000 0000: -
-00c0 0000 0000 - 00e0 0000 0000: heap
-00e0 0000 0000 - 0100 0000 0000: -
-0100 0000 0000 - 0500 0000 0000: shadow
-0500 0000 0000 - 0560 0000 0000: -
-0560 0000 0000 - 0760 0000 0000: traces
-0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
-07d0 0000 0000 - 8000 0000 0000: -
-*/
+ALWAYS_INLINE
+uptr ShadowBeg(void) {
+ return MappingArchImpl<MAPPING_SHADOW_BEG>();
+}
+ALWAYS_INLINE
+uptr ShadowEnd(void) {
+ return MappingArchImpl<MAPPING_SHADOW_END>();
+}
-const uptr kMetaShadowBeg = 0x076000000000ull;
-const uptr kMetaShadowEnd = 0x07d000000000ull;
-const uptr kTraceMemBeg = 0x056000000000ull;
-const uptr kTraceMemEnd = 0x076000000000ull;
-const uptr kShadowBeg = 0x010000000000ull;
-const uptr kShadowEnd = 0x050000000000ull;
-const uptr kAppMemBeg = 0x000000001000ull;
-const uptr kAppMemEnd = 0x00e000000000ull;
+ALWAYS_INLINE
+uptr MetaShadowBeg(void) {
+ return MappingArchImpl<MAPPING_META_SHADOW_BEG>();
+}
+ALWAYS_INLINE
+uptr MetaShadowEnd(void) {
+ return MappingArchImpl<MAPPING_META_SHADOW_END>();
+}
+
+ALWAYS_INLINE
+uptr TraceMemBeg(void) {
+ return MappingArchImpl<MAPPING_TRACE_BEG>();
+}
+ALWAYS_INLINE
+uptr TraceMemEnd(void) {
+ return MappingArchImpl<MAPPING_TRACE_END>();
+}
+
+
+template<typename Mapping>
+bool IsAppMemImpl(uptr mem) {
+#if !SANITIZER_GO
+ return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
+# ifdef TSAN_MID_APP_RANGE
+ (mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
+# endif
+ (mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) ||
+ (mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd);
+#else
+ return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd;
+#endif
+}
ALWAYS_INLINE
bool IsAppMem(uptr mem) {
- return mem >= kAppMemBeg && mem < kAppMemEnd;
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return IsAppMemImpl<Mapping39>(mem);
+ case 42: return IsAppMemImpl<Mapping42>(mem);
+ case 48: return IsAppMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return IsAppMemImpl<Mapping44>(mem);
+ else
+ return IsAppMemImpl<Mapping46>(mem);
+ DCHECK(0);
+#else
+ return IsAppMemImpl<Mapping>(mem);
+#endif
+}
+
+
+template<typename Mapping>
+bool IsShadowMemImpl(uptr mem) {
+ return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
}
ALWAYS_INLINE
bool IsShadowMem(uptr mem) {
- return mem >= kShadowBeg && mem <= kShadowEnd;
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return IsShadowMemImpl<Mapping39>(mem);
+ case 42: return IsShadowMemImpl<Mapping42>(mem);
+ case 48: return IsShadowMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return IsShadowMemImpl<Mapping44>(mem);
+ else
+ return IsShadowMemImpl<Mapping46>(mem);
+ DCHECK(0);
+#else
+ return IsShadowMemImpl<Mapping>(mem);
+#endif
+}
+
+
+template<typename Mapping>
+bool IsMetaMemImpl(uptr mem) {
+ return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
}
ALWAYS_INLINE
bool IsMetaMem(uptr mem) {
- return mem >= kMetaShadowBeg && mem <= kMetaShadowEnd;
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return IsMetaMemImpl<Mapping39>(mem);
+ case 42: return IsMetaMemImpl<Mapping42>(mem);
+ case 48: return IsMetaMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return IsMetaMemImpl<Mapping44>(mem);
+ else
+ return IsMetaMemImpl<Mapping46>(mem);
+ DCHECK(0);
+#else
+ return IsMetaMemImpl<Mapping>(mem);
+#endif
}
-ALWAYS_INLINE
-uptr MemToShadow(uptr x) {
+
+template<typename Mapping>
+uptr MemToShadowImpl(uptr x) {
DCHECK(IsAppMem(x));
- return ((x & ~(kShadowCell - 1)) * kShadowCnt) + kShadowBeg;
+#if !SANITIZER_GO
+ return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
+ ^ Mapping::kAppMemXor) * kShadowCnt;
+#else
+# ifndef SANITIZER_WINDOWS
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg;
+# else
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) + Mapping::kShadowBeg;
+# endif
+#endif
}
ALWAYS_INLINE
-u32 *MemToMeta(uptr x) {
+uptr MemToShadow(uptr x) {
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return MemToShadowImpl<Mapping39>(x);
+ case 42: return MemToShadowImpl<Mapping42>(x);
+ case 48: return MemToShadowImpl<Mapping48>(x);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return MemToShadowImpl<Mapping44>(x);
+ else
+ return MemToShadowImpl<Mapping46>(x);
+ DCHECK(0);
+#else
+ return MemToShadowImpl<Mapping>(x);
+#endif
+}
+
+
+template<typename Mapping>
+u32 *MemToMetaImpl(uptr x) {
DCHECK(IsAppMem(x));
+#if !SANITIZER_GO
+ return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
+ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
+#else
+# ifndef SANITIZER_WINDOWS
return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
- kMetaShadowCell * kMetaShadowSize) | kMetaShadowBeg);
+ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
+# else
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg);
+# endif
+#endif
}
ALWAYS_INLINE
-uptr ShadowToMem(uptr s) {
- CHECK(IsShadowMem(s));
- // FIXME(dvyukov): this is most likely wrong as the mapping is not bijection.
- return (s - kShadowBeg) / kShadowCnt;
+u32 *MemToMeta(uptr x) {
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return MemToMetaImpl<Mapping39>(x);
+ case 42: return MemToMetaImpl<Mapping42>(x);
+ case 48: return MemToMetaImpl<Mapping48>(x);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return MemToMetaImpl<Mapping44>(x);
+ else
+ return MemToMetaImpl<Mapping46>(x);
+ DCHECK(0);
+#else
+ return MemToMetaImpl<Mapping>(x);
+#endif
}
-static USED uptr UserRegions[] = {
- kAppMemBeg, kAppMemEnd,
-};
+template<typename Mapping>
+uptr ShadowToMemImpl(uptr s) {
+ DCHECK(IsShadowMem(s));
+#if !SANITIZER_GO
+ // The shadow mapping is non-linear and we've lost some bits, so we don't have
+ // an easy way to restore the original app address. But the mapping is a
+ // bijection, so we try to restore the address as belonging to low/mid/high
+ // range consecutively and see if shadow->app->shadow mapping gives us the
+ // same address.
+ uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
+ if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
+# ifdef TSAN_MID_APP_RANGE
+ p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) +
+ (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk);
+ if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
+# endif
+ return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
+#else // #if !SANITIZER_GO
+# ifndef SANITIZER_WINDOWS
+ return (s & ~Mapping::kShadowBeg) / kShadowCnt;
+# else
+ return (s - Mapping::kShadowBeg) / kShadowCnt;
+# endif // SANITIZER_WINDOWS
+#endif
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return ShadowToMemImpl<Mapping39>(s);
+ case 42: return ShadowToMemImpl<Mapping42>(s);
+ case 48: return ShadowToMemImpl<Mapping48>(s);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return ShadowToMemImpl<Mapping44>(s);
+ else
+ return ShadowToMemImpl<Mapping46>(s);
+ DCHECK(0);
#else
-# error "Unknown platform"
+ return ShadowToMemImpl<Mapping>(s);
#endif
+}
+
+
// The additional page is to catch shadow stack overflow as paging fault.
// Windows wants 64K alignment for mmaps.
const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
-uptr ALWAYS_INLINE GetThreadTrace(int tid) {
- uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize;
- DCHECK_LT(p, kTraceMemEnd);
+template<typename Mapping>
+uptr GetThreadTraceImpl(int tid) {
+ uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, Mapping::kTraceMemEnd);
return p;
}
-uptr ALWAYS_INLINE GetThreadTraceHeader(int tid) {
- uptr p = kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ALWAYS_INLINE
+uptr GetThreadTrace(int tid) {
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return GetThreadTraceImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceImpl<Mapping48>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return GetThreadTraceImpl<Mapping44>(tid);
+ else
+ return GetThreadTraceImpl<Mapping46>(tid);
+ DCHECK(0);
+#else
+ return GetThreadTraceImpl<Mapping>(tid);
+#endif
+}
+
+
+template<typename Mapping>
+uptr GetThreadTraceHeaderImpl(int tid) {
+ uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ kTraceSize * sizeof(Event);
- DCHECK_LT(p, kTraceMemEnd);
+ DCHECK_LT(p, Mapping::kTraceMemEnd);
return p;
}
+ALWAYS_INLINE
+uptr GetThreadTraceHeader(int tid) {
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ if (vmaSize == 44)
+ return GetThreadTraceHeaderImpl<Mapping44>(tid);
+ else
+ return GetThreadTraceHeaderImpl<Mapping46>(tid);
+ DCHECK(0);
+#else
+ return GetThreadTraceHeaderImpl<Mapping>(tid);
+#endif
+}
+
void InitializePlatform();
+void InitializePlatformEarly();
void CheckAndProtect();
void InitializeShadowMemoryPlatform();
void FlushShadowMemory();
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
-
-// Says whether the addr relates to a global var.
-// Guesses with high probability, may yield both false positives and negatives.
-bool IsGlobalVar(uptr addr);
int ExtractResolvFDs(void *state, int *fds, int nfd);
int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
diff --git a/libsanitizer/tsan/tsan_platform_linux.cc b/libsanitizer/tsan/tsan_platform_linux.cc
index 09cec5fdffd..2ed5718a12e 100644
--- a/libsanitizer/tsan/tsan_platform_linux.cc
+++ b/libsanitizer/tsan/tsan_platform_linux.cc
@@ -16,6 +16,8 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
#include "sanitizer_common/sanitizer_posix.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stoptheworld.h"
@@ -32,6 +34,10 @@
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
+#if SANITIZER_LINUX
+#include <sys/personality.h>
+#include <setjmp.h>
+#endif
#include <sys/syscall.h>
#include <sys/socket.h>
#include <sys/time.h>
@@ -66,8 +72,10 @@ void InitializeGuardPtr() __attribute__((visibility("hidden")));
namespace __tsan {
-static uptr g_data_start;
-static uptr g_data_end;
+#ifdef TSAN_RUNTIME_VMA
+// Runtime detected VMA size.
+uptr vmaSize;
+#endif
enum {
MemTotal = 0,
@@ -84,29 +92,30 @@ enum {
void FillProfileCallback(uptr p, uptr rss, bool file,
uptr *mem, uptr stats_size) {
mem[MemTotal] += rss;
- if (p >= kShadowBeg && p < kShadowEnd)
+ if (p >= ShadowBeg() && p < ShadowEnd())
mem[MemShadow] += rss;
- else if (p >= kMetaShadowBeg && p < kMetaShadowEnd)
+ else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
mem[MemMeta] += rss;
-#ifndef SANITIZER_GO
- else if (p >= kHeapMemBeg && p < kHeapMemEnd)
+#if !SANITIZER_GO
+ else if (p >= HeapMemBeg() && p < HeapMemEnd())
mem[MemHeap] += rss;
- else if (p >= kLoAppMemBeg && p < kLoAppMemEnd)
+ else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
- else if (p >= kHiAppMemBeg && p < kHiAppMemEnd)
+ else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
#else
- else if (p >= kAppMemBeg && p < kAppMemEnd)
+ else if (p >= AppMemBeg() && p < AppMemEnd())
mem[file ? MemFile : MemMmap] += rss;
#endif
- else if (p >= kTraceMemBeg && p < kTraceMemEnd)
+ else if (p >= TraceMemBeg() && p < TraceMemEnd())
mem[MemTrace] += rss;
else
mem[MemOther] += rss;
}
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
- uptr mem[MemCount] = {};
+ uptr mem[MemCount];
+ internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
__sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
StackDepotStats *stacks = StackDepotGetStats();
internal_snprintf(buf, buf_size,
@@ -123,7 +132,7 @@ void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
void FlushShadowMemoryCallback(
const SuspendedThreadsList &suspended_threads_list,
void *argument) {
- FlushUnneededShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
+ ReleaseMemoryToOS(ShadowBeg(), ShadowEnd() - ShadowBeg());
}
#endif
@@ -133,7 +142,7 @@ void FlushShadowMemory() {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Mark shadow for .rodata sections with the special kShadowRodata marker.
// Accesses to .rodata can't race, so this saves time, memory and trace space.
static void MapRodata() {
@@ -195,55 +204,35 @@ void InitializeShadowMemoryPlatform() {
MapRodata();
}
-static void InitDataSeg() {
- MemoryMappingLayout proc_maps(true);
- uptr start, end, offset;
- char name[128];
-#if SANITIZER_FREEBSD
- // On FreeBSD BSS is usually the last block allocated within the
- // low range and heap is the last block allocated within the range
- // 0x800000000-0x8ffffffff.
- while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
- /*protection*/ 0)) {
- DPrintf("%p-%p %p %s\n", start, end, offset, name);
- if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 &&
- name[0] == '\0') {
- g_data_start = start;
- g_data_end = end;
- }
+#endif // #if !SANITIZER_GO
+
+void InitializePlatformEarly() {
+#ifdef TSAN_RUNTIME_VMA
+ vmaSize =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+#if defined(__aarch64__)
+ if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize);
+ Die();
}
-#else
- bool prev_is_data = false;
- while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
- /*protection*/ 0)) {
- DPrintf("%p-%p %p %s\n", start, end, offset, name);
- bool is_data = offset != 0 && name[0] != 0;
- // BSS may get merged with [heap] in /proc/self/maps. This is not very
- // reliable.
- bool is_bss = offset == 0 &&
- (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data;
- if (g_data_start == 0 && is_data)
- g_data_start = start;
- if (is_bss)
- g_data_end = end;
- prev_is_data = is_data;
+#elif defined(__powerpc64__)
+ if (vmaSize != 44 && vmaSize != 46) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize);
+ Die();
}
#endif
- DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
- CHECK_LT(g_data_start, g_data_end);
- CHECK_GE((uptr)&g_data_start, g_data_start);
- CHECK_LT((uptr)&g_data_start, g_data_end);
+#endif
}
-#endif // #ifndef SANITIZER_GO
-
void InitializePlatform() {
DisableCoreDumperIfNecessary();
// Go maps shadow memory lazily and works fine with limited address space.
// Unlimited stack is not a problem as well, because the executable
// is not compiled with -pie.
- if (kCppMode) {
+ if (!SANITIZER_GO) {
bool reexec = false;
// TSan doesn't play well with unlimited stack size (as stack
// overlaps with shadow memory). If we detect unlimited stack size,
@@ -266,6 +255,18 @@ void InitializePlatform() {
reexec = true;
}
#if SANITIZER_LINUX && defined(__aarch64__)
+ // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
+ // linux kernel, the random gap between stack and mapped area is increased
+ // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
+ // this big range, we should disable randomized virtual space on aarch64.
+ int old_personality = personality(0xffffffff);
+ if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ VReport(1, "WARNING: Program is run with randomized virtual address "
+ "space, which wouldn't work with ThreadSanitizer.\n"
+ "Re-execing with fixed virtual address space.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ reexec = true;
+ }
// Initialize the guard pointer used in {sig}{set,long}jump.
InitializeGuardPtr();
#endif
@@ -273,23 +274,18 @@ void InitializePlatform() {
ReExec();
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
CheckAndProtect();
InitTlsSize();
- InitDataSeg();
#endif
}
-bool IsGlobalVar(uptr addr) {
- return g_data_start && addr >= g_data_start && addr < g_data_end;
-}
-
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Extract file descriptors passed to glibc internal __res_iclose function.
// This is required to properly "close" the fds, because we do not see internal
// closes within glibc. The code is a pure hack.
int ExtractResolvFDs(void *state, int *fds, int nfd) {
-#if SANITIZER_LINUX
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
int cnt = 0;
__res_state *statp = (__res_state*)state;
for (int i = 0; i < MAXNS && cnt < nfd; i++) {
@@ -337,10 +333,73 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
}
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ReplaceSystemMalloc() { }
#endif
+#if !SANITIZER_GO
+#if SANITIZER_ANDROID
+
+#if defined(__aarch64__)
+# define __get_tls() \
+ ({ void** __val; __asm__("mrs %0, tpidr_el0" : "=r"(__val)); __val; })
+#elif defined(__x86_64__)
+# define __get_tls() \
+ ({ void** __val; __asm__("mov %%fs:0, %0" : "=r"(__val)); __val; })
+#else
+#error unsupported architecture
+#endif
+
+// On Android, __thread is not supported. So we store the pointer to ThreadState
+// in TLS_SLOT_TSAN, which is the tls slot allocated by Android bionic for tsan.
+static const int TLS_SLOT_TSAN = 8;
+// On Android, one thread can call intercepted functions after
+// DestroyThreadState(), so add a fake thread state for "dead" threads.
+static ThreadState *dead_thread_state = nullptr;
+
+ThreadState *cur_thread() {
+ ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN];
+ if (thr == nullptr) {
+ __sanitizer_sigset_t emptyset;
+ internal_sigfillset(&emptyset);
+ __sanitizer_sigset_t oldset;
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
+ thr = reinterpret_cast<ThreadState*>(__get_tls()[TLS_SLOT_TSAN]);
+ if (thr == nullptr) {
+ thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState),
+ "ThreadState"));
+ __get_tls()[TLS_SLOT_TSAN] = thr;
+ if (dead_thread_state == nullptr) {
+ dead_thread_state = reinterpret_cast<ThreadState*>(
+ MmapOrDie(sizeof(ThreadState), "ThreadState"));
+ dead_thread_state->fast_state.SetIgnoreBit();
+ dead_thread_state->ignore_interceptors = 1;
+ dead_thread_state->is_dead = true;
+ *const_cast<int*>(&dead_thread_state->tid) = -1;
+ CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
+ PROT_READ));
+ }
+ }
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
+ }
+ return thr;
+}
+
+void cur_thread_finalize() {
+ __sanitizer_sigset_t emptyset;
+ internal_sigfillset(&emptyset);
+ __sanitizer_sigset_t oldset;
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
+ ThreadState* thr = (ThreadState*)__get_tls()[TLS_SLOT_TSAN];
+ if (thr != dead_thread_state) {
+ __get_tls()[TLS_SLOT_TSAN] = dead_thread_state;
+ UnmapOrDie(thr, sizeof(ThreadState));
+ }
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
+}
+#endif // SANITIZER_ANDROID
+#endif // if !SANITIZER_GO
+
} // namespace __tsan
#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
diff --git a/libsanitizer/tsan/tsan_platform_mac.cc b/libsanitizer/tsan/tsan_platform_mac.cc
index e1405ffeabf..ff5131e75bb 100644
--- a/libsanitizer/tsan/tsan_platform_mac.cc
+++ b/libsanitizer/tsan/tsan_platform_mac.cc
@@ -40,7 +40,7 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
void *val = (void *)atomic_load_relaxed(a);
@@ -65,20 +65,18 @@ static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
// when TLVs are not accessible (early process startup, thread cleanup, ...).
// The following provides a "poor man's TLV" implementation, where we use the
// shadow memory of the pointer returned by pthread_self() to store a pointer to
-// the ThreadState object. The main thread's ThreadState pointer is stored
-// separately in a static variable, because we need to access it even before the
+// the ThreadState object. The main thread's ThreadState is stored separately
+// in a static variable, because we need to access it even before the
// shadow memory is set up.
static uptr main_thread_identity = 0;
-static ThreadState *main_thread_state = nullptr;
+ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
ThreadState *cur_thread() {
- ThreadState **fake_tls;
uptr thread_identity = (uptr)pthread_self();
if (thread_identity == main_thread_identity || main_thread_identity == 0) {
- fake_tls = &main_thread_state;
- } else {
- fake_tls = (ThreadState **)MemToShadow(thread_identity);
+ return (ThreadState *)&main_thread_state;
}
+ ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
ThreadState *thr = (ThreadState *)SignalSafeGetOrAllocate(
(uptr *)fake_tls, sizeof(ThreadState));
return thr;
@@ -89,7 +87,11 @@ ThreadState *cur_thread() {
// handler will try to access the unmapped ThreadState.
void cur_thread_finalize() {
uptr thread_identity = (uptr)pthread_self();
- CHECK_NE(thread_identity, main_thread_identity);
+ if (thread_identity == main_thread_identity) {
+ // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
+ // exit the main thread. Let's keep the main thread's ThreadState.
+ return;
+ }
ThreadState **fake_tls = (ThreadState **)MemToShadow(thread_identity);
internal_munmap(*fake_tls, sizeof(ThreadState));
*fake_tls = nullptr;
@@ -106,7 +108,7 @@ void FlushShadowMemory() {
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void InitializeShadowMemoryPlatform() { }
// On OS X, GCD worker threads are created without a call to pthread_create. We
@@ -122,23 +124,27 @@ typedef void (*pthread_introspection_hook_t)(unsigned int event,
extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
pthread_introspection_hook_t hook);
static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
-static const uptr PTHREAD_INTROSPECTION_THREAD_DESTROY = 4;
+static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
static pthread_introspection_hook_t prev_pthread_introspection_hook;
static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
void *addr, size_t size) {
if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
if (thread == pthread_self()) {
// The current thread is a newly created GCD worker thread.
+ ThreadState *thr = cur_thread();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
ThreadState *parent_thread_state = nullptr; // No parent.
int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
CHECK_NE(tid, 0);
- ThreadState *thr = cur_thread();
ThreadStart(thr, tid, GetTid());
}
- } else if (event == PTHREAD_INTROSPECTION_THREAD_DESTROY) {
- ThreadState *thr = cur_thread();
- if (thr->tctx && thr->tctx->parent_tid == kInvalidTid) {
- DestroyThreadState();
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
+ if (thread == pthread_self()) {
+ ThreadState *thr = cur_thread();
+ if (thr->tctx) {
+ DestroyThreadState();
+ }
}
}
@@ -147,9 +153,12 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
}
#endif
+void InitializePlatformEarly() {
+}
+
void InitializePlatform() {
DisableCoreDumperIfNecessary();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
CheckAndProtect();
CHECK_EQ(main_thread_identity, 0);
@@ -160,7 +169,7 @@ void InitializePlatform() {
#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Note: this function runs with async signals enabled,
// so it must not touch any tsan state.
int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
@@ -176,10 +185,6 @@ int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
}
#endif
-bool IsGlobalVar(uptr addr) {
- return false;
-}
-
} // namespace __tsan
#endif // SANITIZER_MAC
diff --git a/libsanitizer/tsan/tsan_platform_posix.cc b/libsanitizer/tsan/tsan_platform_posix.cc
index 5e3d12e9496..0a3b61a08dc 100644
--- a/libsanitizer/tsan/tsan_platform_posix.cc
+++ b/libsanitizer/tsan/tsan_platform_posix.cc
@@ -21,15 +21,16 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void InitializeShadowMemory() {
// Map memory shadow.
uptr shadow =
- (uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg, "shadow");
- if (shadow != kShadowBeg) {
+ (uptr)MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(),
+ "shadow");
+ if (shadow != ShadowBeg()) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", shadow, kShadowBeg);
+ "to link with -pie (%p, %p).\n", shadow, ShadowBeg());
Die();
}
// This memory range is used for thread stacks and large user mmaps.
@@ -44,29 +45,50 @@ void InitializeShadowMemory() {
const uptr kMadviseRangeBeg = 0xff00000000ull;
const uptr kMadviseRangeSize = 0x0100000000ull;
#elif defined(__aarch64__)
- const uptr kMadviseRangeBeg = 0x7e00000000ull;
- const uptr kMadviseRangeSize = 0x0100000000ull;
+ uptr kMadviseRangeBeg = 0;
+ uptr kMadviseRangeSize = 0;
+ if (vmaSize == 39) {
+ kMadviseRangeBeg = 0x7d00000000ull;
+ kMadviseRangeSize = 0x0300000000ull;
+ } else if (vmaSize == 42) {
+ kMadviseRangeBeg = 0x3f000000000ull;
+ kMadviseRangeSize = 0x01000000000ull;
+ } else {
+ DCHECK(0);
+ }
+#elif defined(__powerpc64__)
+ uptr kMadviseRangeBeg = 0;
+ uptr kMadviseRangeSize = 0;
+ if (vmaSize == 44) {
+ kMadviseRangeBeg = 0x0f60000000ull;
+ kMadviseRangeSize = 0x0010000000ull;
+ } else if (vmaSize == 46) {
+ kMadviseRangeBeg = 0x3f0000000000ull;
+ kMadviseRangeSize = 0x010000000000ull;
+ } else {
+ DCHECK(0);
+ }
#endif
NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg),
kMadviseRangeSize * kShadowMultiplier);
// Meta shadow is compressing and we don't flush it,
// so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory.
// On one program it reduces memory consumption from 5GB to 2.5GB.
- NoHugePagesInRegion(kMetaShadowBeg, kMetaShadowEnd - kMetaShadowBeg);
+ NoHugePagesInRegion(MetaShadowBeg(), MetaShadowEnd() - MetaShadowBeg());
if (common_flags()->use_madv_dontdump)
- DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg);
+ DontDumpShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg());
DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
- kShadowBeg, kShadowEnd,
- (kShadowEnd - kShadowBeg) >> 30);
+ ShadowBeg(), ShadowEnd(),
+ (ShadowEnd() - ShadowBeg()) >> 30);
// Map meta shadow.
- uptr meta_size = kMetaShadowEnd - kMetaShadowBeg;
+ uptr meta_size = MetaShadowEnd() - MetaShadowBeg();
uptr meta =
- (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size, "meta shadow");
- if (meta != kMetaShadowBeg) {
+ (uptr)MmapFixedNoReserve(MetaShadowBeg(), meta_size, "meta shadow");
+ if (meta != MetaShadowBeg()) {
Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
Printf("FATAL: Make sure to compile with -fPIE and "
- "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg);
+ "to link with -pie (%p, %p).\n", meta, MetaShadowBeg());
Die();
}
if (common_flags()->use_madv_dontdump)
@@ -81,7 +103,7 @@ static void ProtectRange(uptr beg, uptr end) {
CHECK_LE(beg, end);
if (beg == end)
return;
- if (beg != (uptr)MmapNoAccess(beg, end - beg)) {
+ if (beg != (uptr)MmapFixedNoAccess(beg, end - beg)) {
Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
Printf("FATAL: Make sure you are not using unlimited stack\n");
Die();
@@ -95,25 +117,30 @@ void CheckAndProtect() {
while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) {
if (IsAppMem(p))
continue;
- if (p >= kHeapMemEnd &&
+ if (p >= HeapMemEnd() &&
p < HeapEnd())
continue;
if (prot == 0) // Zero page or mprotected.
continue;
- if (p >= kVdsoBeg) // vdso
+ if (p >= VdsoBeg()) // vdso
break;
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
Die();
}
- ProtectRange(kLoAppMemEnd, kShadowBeg);
- ProtectRange(kShadowEnd, kMetaShadowBeg);
- ProtectRange(kMetaShadowEnd, kTraceMemBeg);
+ ProtectRange(LoAppMemEnd(), ShadowBeg());
+ ProtectRange(ShadowEnd(), MetaShadowBeg());
+#ifdef TSAN_MID_APP_RANGE
+ ProtectRange(MetaShadowEnd(), MidAppMemBeg());
+ ProtectRange(MidAppMemEnd(), TraceMemBeg());
+#else
+ ProtectRange(MetaShadowEnd(), TraceMemBeg());
+#endif
// Memory for traces is mapped lazily in MapThreadTrace.
// Protect the whole range for now, so that user does not map something here.
- ProtectRange(kTraceMemBeg, kTraceMemEnd);
- ProtectRange(kTraceMemEnd, kHeapMemBeg);
- ProtectRange(HeapEnd(), kHiAppMemBeg);
+ ProtectRange(TraceMemBeg(), TraceMemEnd());
+ ProtectRange(TraceMemEnd(), HeapMemBeg());
+ ProtectRange(HeapEnd(), HiAppMemBeg());
}
#endif
diff --git a/libsanitizer/tsan/tsan_platform_windows.cc b/libsanitizer/tsan/tsan_platform_windows.cc
index ccea22ed66e..54ec6a655df 100644
--- a/libsanitizer/tsan/tsan_platform_windows.cc
+++ b/libsanitizer/tsan/tsan_platform_windows.cc
@@ -29,6 +29,9 @@ void FlushShadowMemory() {
void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
}
+void InitializePlatformEarly() {
+}
+
void InitializePlatform() {
}
diff --git a/libsanitizer/tsan/tsan_ppc_regs.h b/libsanitizer/tsan/tsan_ppc_regs.h
new file mode 100644
index 00000000000..15bd10ad96b
--- /dev/null
+++ b/libsanitizer/tsan/tsan_ppc_regs.h
@@ -0,0 +1,94 @@
+#define r0 0
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+#define f0 0
+#define f1 1
+#define f2 2
+#define f3 3
+#define f4 4
+#define f5 5
+#define f6 6
+#define f7 7
+#define f8 8
+#define f9 9
+#define f10 10
+#define f11 11
+#define f12 12
+#define f13 13
+#define f14 14
+#define f15 15
+#define f16 16
+#define f17 17
+#define f18 18
+#define f19 19
+#define f20 20
+#define f21 21
+#define f22 22
+#define f23 23
+#define f24 24
+#define f25 25
+#define f26 26
+#define f27 27
+#define f28 28
+#define f29 29
+#define f30 30
+#define f31 31
+#define v0 0
+#define v1 1
+#define v2 2
+#define v3 3
+#define v4 4
+#define v5 5
+#define v6 6
+#define v7 7
+#define v8 8
+#define v9 9
+#define v10 10
+#define v11 11
+#define v12 12
+#define v13 13
+#define v14 14
+#define v15 15
+#define v16 16
+#define v17 17
+#define v18 18
+#define v19 19
+#define v20 20
+#define v21 21
+#define v22 22
+#define v23 23
+#define v24 24
+#define v25 25
+#define v26 26
+#define v27 27
+#define v28 28
+#define v29 29
+#define v30 30
+#define v31 31
diff --git a/libsanitizer/tsan/tsan_preinit.cc b/libsanitizer/tsan/tsan_preinit.cc
new file mode 100644
index 00000000000..d5d1659c09b
--- /dev/null
+++ b/libsanitizer/tsan/tsan_preinit.cc
@@ -0,0 +1,25 @@
+//===-- tsan_preinit.cc ---------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer.
+//
+// Call __tsan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interface.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+
+// The symbol is called __local_tsan_preinit, because it's not intended to be
+// exported.
+// This code linked into the main executable when -fsanitize=thread is in
+// the link flags. It can only use exported interface functions.
+__attribute__((section(".preinit_array"), used))
+void (*__local_tsan_preinit)(void) = __tsan_init;
+
+#endif
diff --git a/libsanitizer/tsan/tsan_report.cc b/libsanitizer/tsan/tsan_report.cc
index 119b1ec1da9..ba3e34fbfa5 100644
--- a/libsanitizer/tsan/tsan_report.cc
+++ b/libsanitizer/tsan/tsan_report.cc
@@ -69,7 +69,7 @@ ReportDesc::~ReportDesc() {
// FIXME(dvyukov): it must be leaking a lot of memory.
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const int kThreadBufSize = 32;
const char *thread_name(char *buf, int tid) {
@@ -94,6 +94,8 @@ static const char *ReportTypeString(ReportType typ) {
return "destroy of a locked mutex";
if (typ == ReportTypeMutexDoubleLock)
return "double lock of a mutex";
+ if (typ == ReportTypeMutexInvalidAccess)
+ return "use of an invalid mutex (e.g. uninitialized or destroyed)";
if (typ == ReportTypeMutexBadUnlock)
return "unlock of an unlocked mutex (or by a wrong thread)";
if (typ == ReportTypeMutexBadReadLock)
@@ -232,7 +234,7 @@ static void PrintThread(const ReportThread *rt) {
Printf(" '%s'", rt->name);
char thrbuf[kThreadBufSize];
Printf(" (tid=%zu, %s) created by %s",
- rt->pid, rt->running ? "running" : "finished",
+ rt->os_id, rt->running ? "running" : "finished",
thread_name(thrbuf, rt->parent_tid));
if (rt->stack)
Printf(" at:");
@@ -265,10 +267,15 @@ static bool FrameIsInternal(const SymbolizedStack *frame) {
if (frame == 0)
return false;
const char *file = frame->info.file;
- return file != 0 &&
- (internal_strstr(file, "tsan_interceptors.cc") ||
- internal_strstr(file, "sanitizer_common_interceptors.inc") ||
- internal_strstr(file, "tsan_interface_"));
+ const char *module = frame->info.module;
+ if (file != 0 &&
+ (internal_strstr(file, "tsan_interceptors.cc") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_")))
+ return true;
+ if (module != 0 && (internal_strstr(module, "libclang_rt.tsan_")))
+ return true;
+ return false;
}
static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
@@ -352,7 +359,7 @@ void PrintReport(const ReportDesc *rep) {
Printf("==================\n");
}
-#else // #ifndef SANITIZER_GO
+#else // #if !SANITIZER_GO
const int kMainThreadId = 1;
@@ -372,9 +379,9 @@ void PrintStack(const ReportStack *ent) {
static void PrintMop(const ReportMop *mop, bool first) {
Printf("\n");
- Printf("%s by ",
+ Printf("%s at %p by ",
(first ? (mop->write ? "Write" : "Read")
- : (mop->write ? "Previous write" : "Previous read")));
+ : (mop->write ? "Previous write" : "Previous read")), mop->addr);
if (mop->tid == kMainThreadId)
Printf("main goroutine:\n");
else
@@ -382,6 +389,31 @@ static void PrintMop(const ReportMop *mop, bool first) {
PrintStack(mop->stack);
}
+static void PrintLocation(const ReportLocation *loc) {
+ switch (loc->type) {
+ case ReportLocationHeap: {
+ Printf("\n");
+ Printf("Heap block of size %zu at %p allocated by ",
+ loc->heap_chunk_size, loc->heap_chunk_start);
+ if (loc->tid == kMainThreadId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", loc->tid);
+ PrintStack(loc->stack);
+ break;
+ }
+ case ReportLocationGlobal: {
+ Printf("\n");
+ Printf("Global var %s of size %zu at %p declared at %s:%zu\n",
+ loc->global.name, loc->global.size, loc->global.start,
+ loc->global.file, loc->global.line);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
static void PrintThread(const ReportThread *rt) {
if (rt->id == kMainThreadId)
return;
@@ -397,6 +429,8 @@ void PrintReport(const ReportDesc *rep) {
Printf("WARNING: DATA RACE");
for (uptr i = 0; i < rep->mops.Size(); i++)
PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
for (uptr i = 0; i < rep->threads.Size(); i++)
PrintThread(rep->threads[i]);
} else if (rep->typ == ReportTypeDeadlock) {
diff --git a/libsanitizer/tsan/tsan_report.h b/libsanitizer/tsan/tsan_report.h
index 68924edb547..e51ed4f50f8 100644
--- a/libsanitizer/tsan/tsan_report.h
+++ b/libsanitizer/tsan/tsan_report.h
@@ -25,6 +25,7 @@ enum ReportType {
ReportTypeThreadLeak,
ReportTypeMutexDestroyLocked,
ReportTypeMutexDoubleLock,
+ ReportTypeMutexInvalidAccess,
ReportTypeMutexBadUnlock,
ReportTypeMutexBadReadLock,
ReportTypeMutexBadReadUnlock,
@@ -84,7 +85,7 @@ struct ReportLocation {
struct ReportThread {
int id;
- uptr pid;
+ uptr os_id;
bool running;
char *name;
int parent_tid;
diff --git a/libsanitizer/tsan/tsan_rtl.cc b/libsanitizer/tsan/tsan_rtl.cc
index 4fceca6f41f..07fa165e939 100644
--- a/libsanitizer/tsan/tsan_rtl.cc
+++ b/libsanitizer/tsan/tsan_rtl.cc
@@ -42,7 +42,7 @@ extern "C" void __tsan_resume() {
namespace __tsan {
-#if !defined(SANITIZER_GO) && !SANITIZER_MAC
+#if !SANITIZER_GO && !SANITIZER_MAC
THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
#endif
static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
@@ -53,12 +53,12 @@ Context *ctx;
bool OnFinalize(bool failed);
void OnInitialize();
#else
-SANITIZER_INTERFACE_ATTRIBUTE
-bool WEAK OnFinalize(bool failed) {
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool OnFinalize(bool failed) {
return failed;
}
-SANITIZER_INTERFACE_ATTRIBUTE
-void WEAK OnInitialize() {}
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+void OnInitialize() {}
#endif
static char thread_registry_placeholder[sizeof(ThreadRegistry)];
@@ -84,7 +84,7 @@ static ThreadContextBase *CreateThreadContext(u32 tid) {
return new(mem) ThreadContext(tid);
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static const u32 kThreadQuarantineSize = 16;
#else
static const u32 kThreadQuarantineSize = 64;
@@ -115,7 +115,7 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
// , ignore_reads_and_writes()
// , ignore_interceptors()
, clock(tid, reuse_count)
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
, jmp_bufs(MBlockJmpBuf)
#endif
, tid(tid)
@@ -124,13 +124,13 @@ ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
, stk_size(stk_size)
, tls_addr(tls_addr)
, tls_size(tls_size)
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
, last_sleep_clock(tid)
#endif
{
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
uptr n_threads;
uptr n_running_threads;
@@ -233,14 +233,17 @@ static void StopBackgroundThread() {
void DontNeedShadowFor(uptr addr, uptr size) {
uptr shadow_beg = MemToShadow(addr);
uptr shadow_end = MemToShadow(addr + size);
- FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
+ ReleaseMemoryToOS(shadow_beg, shadow_end - shadow_beg);
}
void MapShadow(uptr addr, uptr size) {
// Global data is not 64K aligned, but there are no adjacent mappings,
// so we can get away with unaligned mapping.
// CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
- MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier, "shadow");
+ const uptr kPageSize = GetPageSizeCached();
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
+ MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow");
// Meta shadow is 2:1, so tread carefully.
static bool data_mapped = false;
@@ -271,8 +274,8 @@ void MapShadow(uptr addr, uptr size) {
void MapThreadTrace(uptr addr, uptr size, const char *name) {
DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
- CHECK_GE(addr, kTraceMemBeg);
- CHECK_LE(addr + size, kTraceMemEnd);
+ CHECK_GE(addr, TraceMemBeg());
+ CHECK_LE(addr + size, TraceMemEnd());
CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name);
if (addr1 != addr) {
@@ -283,13 +286,17 @@ void MapThreadTrace(uptr addr, uptr size, const char *name) {
}
static void CheckShadowMapping() {
- for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
- const uptr beg = UserRegions[i];
- const uptr end = UserRegions[i + 1];
+ uptr beg, end;
+ for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
+ // Skip cases for empty regions (heap definition for architectures that
+ // do not use 64-bit allocator).
+ if (beg == end)
+ continue;
VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ uptr prev = 0;
for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
- for (int x = -1; x <= 1; x++) {
- const uptr p = p0 + x;
+ for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
+ const uptr p = RoundDown(p0 + x, kShadowCell);
if (p < beg || p >= end)
continue;
const uptr s = MemToShadow(p);
@@ -297,8 +304,18 @@ static void CheckShadowMapping() {
VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
CHECK(IsAppMem(p));
CHECK(IsShadowMem(s));
- CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
+ CHECK_EQ(p, ShadowToMem(s));
CHECK(IsMetaMem(m));
+ if (prev) {
+ // Ensure that shadow and meta mappings are linear within a single
+ // user range. Lots of code that processes memory ranges assumes it.
+ const uptr prev_s = MemToShadow(prev);
+ const uptr prev_m = (uptr)MemToMeta(prev);
+ CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
+ CHECK_EQ((m - prev_m) / kMetaShadowSize,
+ (p - prev) / kMetaShadowCell);
+ }
+ prev = p;
}
}
}
@@ -317,26 +334,35 @@ void Initialize(ThreadState *thr) {
SetCheckFailedCallback(TsanCheckFailed);
ctx = new(ctx_placeholder) Context;
- const char *options = GetEnv(kTsanOptionsEnv);
+ const char *options = GetEnv(SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS");
CacheBinaryName();
InitializeFlags(&ctx->flags, options);
- CheckVMASize();
-#ifndef SANITIZER_GO
+ AvoidCVE_2016_2143();
+ InitializePlatformEarly();
+#if !SANITIZER_GO
+ // Re-exec ourselves if we need to set additional env or command line args.
+ MaybeReexec();
+
InitializeAllocator();
ReplaceSystemMalloc();
#endif
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
InitializeInterceptors();
CheckShadowMapping();
InitializePlatform();
InitializeMutex();
InitializeDynamicAnnotations();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
InitializeShadowMemory();
+ InitializeAllocatorLate();
#endif
// Setup correct file descriptor for error reports.
__sanitizer_set_report_path(common_flags()->log_path);
InitializeSuppressions();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
InitializeLibIgnore();
Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
// On MIPS, TSan initialization is run before
@@ -347,8 +373,6 @@ void Initialize(ThreadState *thr) {
SetSandboxingCallback(StopBackgroundThread);
#endif
#endif
- if (common_flags()->detect_deadlocks)
- ctx->dd = DDetector::Create(flags());
VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
(int)internal_getpid());
@@ -362,6 +386,10 @@ void Initialize(ThreadState *thr) {
#endif
ctx->initialized = true;
+#if !SANITIZER_GO
+ Symbolizer::LateInitialize();
+#endif
+
if (flags()->stop_on_start) {
Printf("ThreadSanitizer is suspended at startup (pid %d)."
" Call __tsan_resume().\n",
@@ -384,7 +412,7 @@ int Finalize(ThreadState *thr) {
CommonSanitizerReportMutex.Unlock();
ctx->report_mtx.Unlock();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (Verbosity()) AllocatorPrintStats();
#endif
@@ -392,7 +420,7 @@ int Finalize(ThreadState *thr) {
if (ctx->nreported) {
failed = true;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
#else
Printf("Found %d data race(s)\n", ctx->nreported);
@@ -407,7 +435,7 @@ int Finalize(ThreadState *thr) {
if (common_flags()->print_suppressions)
PrintMatchedSuppressions();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (flags()->print_benign)
PrintMatchedBenignRaces();
#endif
@@ -422,7 +450,7 @@ int Finalize(ThreadState *thr) {
return failed ? common_flags()->exitcode : 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
@@ -455,7 +483,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc) {
}
#endif
-#ifdef SANITIZER_GO
+#if SANITIZER_GO
NOINLINE
void GrowShadowStack(ThreadState *thr) {
const int sz = thr->shadow_stack_end - thr->shadow_stack;
@@ -474,7 +502,7 @@ u32 CurrentStackId(ThreadState *thr, uptr pc) {
if (!thr->is_inited) // May happen during bootstrap.
return 0;
if (pc != 0) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -520,7 +548,7 @@ uptr TraceParts() {
return TraceSize() / kTracePartSize;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
extern "C" void __tsan_trace_switch() {
TraceSwitch(cur_thread());
}
@@ -553,7 +581,7 @@ void HandleRace(ThreadState *thr, u64 *shadow_mem,
thr->racy_state[0] = cur.raw();
thr->racy_state[1] = old.raw();
thr->racy_shadow_addr = shadow_mem;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
HACKY_CALL(__tsan_report_race);
#else
ReportRace(thr);
@@ -750,7 +778,7 @@ void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
}
#endif
- if (kCppMode && *shadow_mem == kShadowRodata) {
+ if (!SANITIZER_GO && *shadow_mem == kShadowRodata) {
// Access to .rodata section, no races here.
// Measurements show that it can be 10-20% of all memory accesses.
StatInc(thr, StatMop);
@@ -837,7 +865,7 @@ static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
// UnmapOrDie/MmapFixedNoReserve does not work on Windows,
// so we do it only for C/C++.
- if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
+ if (SANITIZER_GO || size < common_flags()->clear_shadow_mmap_threshold) {
u64 *p = (u64*)MemToShadow(addr);
CHECK(IsShadowMem((uptr)p));
CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
@@ -923,7 +951,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
// Shadow stack maintenance can be replaced with
// stack unwinding during trace switch (which presumably must be faster).
DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#else
if (thr->shadow_stack_pos == thr->shadow_stack_end)
@@ -943,7 +971,7 @@ void FuncExit(ThreadState *thr) {
}
DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
#endif
thr->shadow_stack_pos--;
@@ -954,7 +982,7 @@ void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
thr->ignore_reads_and_writes++;
CHECK_GT(thr->ignore_reads_and_writes, 0);
thr->fast_state.SetIgnoreBit();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -966,7 +994,7 @@ void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
CHECK_GE(thr->ignore_reads_and_writes, 0);
if (thr->ignore_reads_and_writes == 0) {
thr->fast_state.ClearIgnoreBit();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
thr->mop_ignore_set.Reset();
#endif
}
@@ -976,7 +1004,7 @@ void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
thr->ignore_sync++;
CHECK_GT(thr->ignore_sync, 0);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!ctx->after_multithreaded_fork)
thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
#endif
@@ -986,7 +1014,7 @@ void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
thr->ignore_sync--;
CHECK_GE(thr->ignore_sync, 0);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (thr->ignore_sync == 0)
thr->sync_ignore_set.Reset();
#endif
@@ -1010,7 +1038,7 @@ void build_consistency_nostats() {}
} // namespace __tsan
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#include "tsan_interface_inl.h"
#endif
diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h
index 12587dd203e..522c76002a1 100644
--- a/libsanitizer/tsan/tsan_rtl.h
+++ b/libsanitizer/tsan/tsan_rtl.h
@@ -50,9 +50,9 @@
namespace __tsan {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
struct MapUnmapCallback;
-#if defined(__mips64) || defined(__aarch64__)
+#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
static const uptr kAllocatorSpace = 0;
static const uptr kAllocatorSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kAllocatorRegionSizeLog = 20;
@@ -64,8 +64,15 @@ typedef SizeClassAllocator32<kAllocatorSpace, kAllocatorSize, 0,
CompactSizeClassMap, kAllocatorRegionSizeLog, ByteMap,
MapUnmapCallback> PrimaryAllocator;
#else
-typedef SizeClassAllocator64<kHeapMemBeg, kHeapMemEnd - kHeapMemBeg, 0,
- DefaultSizeClassMap, MapUnmapCallback> PrimaryAllocator;
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
+ static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
+ static const uptr kMetadataSize = 0;
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
#endif
typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
@@ -322,6 +329,36 @@ struct JmpBuf {
uptr *shadow_stack_pos;
};
+// A Processor represents a physical thread, or a P for Go.
+// It is used to store internal resources like allocate cache, and does not
+// participate in race-detection logic (invisible to end user).
+// In C++ it is tied to an OS thread just like ThreadState, however ideally
+// it should be tied to a CPU (this way we will have fewer allocator caches).
+// In Go it is tied to a P, so there are significantly fewer Processor's than
+// ThreadState's (which are tied to Gs).
+// A ThreadState must be wired with a Processor to handle events.
+struct Processor {
+ ThreadState *thr; // currently wired thread, or nullptr
+#if !SANITIZER_GO
+ AllocatorCache alloc_cache;
+ InternalAllocatorCache internal_alloc_cache;
+#endif
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+ DDPhysicalThread *dd_pt;
+};
+
+#if !SANITIZER_GO
+// ScopedGlobalProcessor temporary setups a global processor for the current
+// thread, if it does not have one. Intended for interceptors that can run
+// at the very thread end, when we already destroyed the thread processor.
+struct ScopedGlobalProcessor {
+ ScopedGlobalProcessor();
+ ~ScopedGlobalProcessor();
+};
+#endif
+
// This struct is stored in TLS.
struct ThreadState {
FastState fast_state;
@@ -343,7 +380,7 @@ struct ThreadState {
int ignore_reads_and_writes;
int ignore_sync;
// Go does not support ignores.
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
IgnoreSet mop_ignore_set;
IgnoreSet sync_ignore_set;
#endif
@@ -356,9 +393,7 @@ struct ThreadState {
u64 racy_state[2];
MutexSet mset;
ThreadClock clock;
-#ifndef SANITIZER_GO
- AllocatorCache alloc_cache;
- InternalAllocatorCache internal_alloc_cache;
+#if !SANITIZER_GO
Vector<JmpBuf> jmp_bufs;
int ignore_interceptors;
#endif
@@ -382,17 +417,20 @@ struct ThreadState {
#if SANITIZER_DEBUG && !SANITIZER_GO
InternalDeadlockDetector internal_deadlock_detector;
#endif
- DDPhysicalThread *dd_pt;
DDLogicalThread *dd_lt;
+ // Current wired Processor, or nullptr. Required to handle any events.
+ Processor *proc1;
+#if !SANITIZER_GO
+ Processor *proc() { return proc1; }
+#else
+ Processor *proc();
+#endif
+
atomic_uintptr_t in_signal_handler;
ThreadSignalContext *signal_ctx;
- DenseSlabAllocCache block_cache;
- DenseSlabAllocCache sync_cache;
- DenseSlabAllocCache clock_cache;
-
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
u32 last_sleep_stack_id;
ThreadClock last_sleep_clock;
#endif
@@ -401,14 +439,16 @@ struct ThreadState {
// If set, malloc must not be called.
int nomalloc;
+ const ReportDesc *current_report;
+
explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
unsigned reuse_count,
uptr stk_addr, uptr stk_size,
uptr tls_addr, uptr tls_size);
};
-#ifndef SANITIZER_GO
-#if SANITIZER_MAC
+#if !SANITIZER_GO
+#if SANITIZER_MAC || SANITIZER_ANDROID
ThreadState *cur_thread();
void cur_thread_finalize();
#else
@@ -418,7 +458,7 @@ INLINE ThreadState *cur_thread() {
return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
}
INLINE void cur_thread_finalize() { }
-#endif // SANITIZER_MAC
+#endif // SANITIZER_MAC || SANITIZER_ANDROID
#endif // SANITIZER_GO
class ThreadContext : public ThreadContextBase {
@@ -505,13 +545,13 @@ extern Context *ctx; // The one and the only global runtime context.
struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptors() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_interceptors++;
#endif
}
~ScopedIgnoreInterceptors() {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
cur_thread()->ignore_interceptors--;
#endif
}
@@ -680,6 +720,11 @@ void ThreadSetName(ThreadState *thr, const char *name);
int ThreadCount(ThreadState *thr);
void ProcessPendingSignals(ThreadState *thr);
+Processor *ProcCreate();
+void ProcDestroy(Processor *proc);
+void ProcWire(Processor *proc, ThreadState *thr);
+void ProcUnwire(Processor *proc, ThreadState *thr);
+
void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
bool rw, bool recursive, bool linker_init);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr);
@@ -690,6 +735,7 @@ void MutexReadLock(ThreadState *thr, uptr pc, uptr addr, bool try_lock = false);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
void Acquire(ThreadState *thr, uptr pc, uptr addr);
// AcquireGlobal synchronizes the current thread with all other threads.
@@ -745,7 +791,7 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
StatInc(thr, StatEvents);
u64 pos = fs.GetTracePos();
if (UNLIKELY((pos % kTracePartSize) == 0)) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
HACKY_CALL(__tsan_trace_switch);
#else
TraceSwitch(thr);
@@ -757,9 +803,9 @@ void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
*evp = ev;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() {
- return kHeapMemEnd + PrimaryAllocator::AdditionalSize();
+ return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
}
#endif
diff --git a/libsanitizer/tsan/tsan_rtl_aarch64.S b/libsanitizer/tsan/tsan_rtl_aarch64.S
index ef06f0444ae..ab5a830d60c 100644
--- a/libsanitizer/tsan/tsan_rtl_aarch64.S
+++ b/libsanitizer/tsan/tsan_rtl_aarch64.S
@@ -1,6 +1,4 @@
#include "sanitizer_common/sanitizer_asm.h"
-
-.section .bss
.type __tsan_pointer_chk_guard, %object
.size __tsan_pointer_chk_guard, 8
__tsan_pointer_chk_guard:
diff --git a/libsanitizer/tsan/tsan_rtl_amd64.S b/libsanitizer/tsan/tsan_rtl_amd64.S
index 6df36a500a7..caa832375e5 100644
--- a/libsanitizer/tsan/tsan_rtl_amd64.S
+++ b/libsanitizer/tsan/tsan_rtl_amd64.S
@@ -1,7 +1,13 @@
#include "sanitizer_common/sanitizer_asm.h"
-.hidden __tsan_trace_switch
-.globl __tsan_trace_switch_thunk
-__tsan_trace_switch_thunk:
+#if !defined(__APPLE__)
+.section .text
+#else
+.section __TEXT,__text
+#endif
+
+ASM_HIDDEN(__tsan_trace_switch)
+.globl ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk)
+ASM_TSAN_SYMBOL(__tsan_trace_switch_thunk):
CFI_STARTPROC
# Save scratch registers.
push %rax
@@ -40,7 +46,7 @@ __tsan_trace_switch_thunk:
shr $4, %rsp # clear 4 lsb, align to 16
shl $4, %rsp
- call __tsan_trace_switch
+ call ASM_TSAN_SYMBOL(__tsan_trace_switch)
# Unalign stack frame back.
mov %rbx, %rsp # restore the original rsp
@@ -79,9 +85,9 @@ __tsan_trace_switch_thunk:
ret
CFI_ENDPROC
-.hidden __tsan_report_race
-.globl __tsan_report_race_thunk
-__tsan_report_race_thunk:
+ASM_HIDDEN(__tsan_report_race)
+.globl ASM_TSAN_SYMBOL(__tsan_report_race_thunk)
+ASM_TSAN_SYMBOL(__tsan_report_race_thunk):
CFI_STARTPROC
# Save scratch registers.
push %rax
@@ -120,7 +126,7 @@ __tsan_report_race_thunk:
shr $4, %rsp # clear 4 lsb, align to 16
shl $4, %rsp
- call __tsan_report_race
+ call ASM_TSAN_SYMBOL(__tsan_report_race)
# Unalign stack frame back.
mov %rbx, %rsp # restore the original rsp
@@ -159,11 +165,13 @@ __tsan_report_race_thunk:
ret
CFI_ENDPROC
-.hidden __tsan_setjmp
+ASM_HIDDEN(__tsan_setjmp)
+#if !defined(__APPLE__)
.comm _ZN14__interception11real_setjmpE,8,8
-.globl setjmp
-.type setjmp, @function
-setjmp:
+#endif
+.globl ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
+ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp):
CFI_STARTPROC
// save env parameter
push %rdi
@@ -173,29 +181,38 @@ setjmp:
#if defined(__FreeBSD__)
lea 8(%rsp), %rdi
mov %rdi, %rsi
-#else
+#elif defined(__APPLE__)
+ lea 16(%rsp), %rdi
+ mov %rdi, %rsi
+#elif defined(__linux__)
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#else
+# error "Unknown platform"
#endif
// call tsan interceptor
- call __tsan_setjmp
+ call ASM_TSAN_SYMBOL(__tsan_setjmp)
// restore env parameter
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rdi)
// tail jump to libc setjmp
movl $0, %eax
+#if !defined(__APPLE__)
movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
+#else
+ jmp ASM_TSAN_SYMBOL(setjmp)
+#endif
CFI_ENDPROC
-.size setjmp, .-setjmp
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
.comm _ZN14__interception12real__setjmpE,8,8
-.globl _setjmp
-.type _setjmp, @function
-_setjmp:
+.globl ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
CFI_STARTPROC
// save env parameter
push %rdi
@@ -205,29 +222,38 @@ _setjmp:
#if defined(__FreeBSD__)
lea 8(%rsp), %rdi
mov %rdi, %rsi
-#else
+#elif defined(__APPLE__)
+ lea 16(%rsp), %rdi
+ mov %rdi, %rsi
+#elif defined(__linux__)
lea 16(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#else
+# error "Unknown platform"
#endif
// call tsan interceptor
- call __tsan_setjmp
+ call ASM_TSAN_SYMBOL(__tsan_setjmp)
// restore env parameter
pop %rdi
CFI_ADJUST_CFA_OFFSET(-8)
CFI_RESTORE(%rdi)
// tail jump to libc setjmp
movl $0, %eax
+#if !defined(__APPLE__)
movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
+#else
+ jmp ASM_TSAN_SYMBOL(_setjmp)
+#endif
CFI_ENDPROC
-.size _setjmp, .-_setjmp
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
.comm _ZN14__interception14real_sigsetjmpE,8,8
-.globl sigsetjmp
-.type sigsetjmp, @function
-sigsetjmp:
+.globl ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
CFI_STARTPROC
// save env parameter
push %rdi
@@ -244,14 +270,19 @@ sigsetjmp:
#if defined(__FreeBSD__)
lea 24(%rsp), %rdi
mov %rdi, %rsi
-#else
+#elif defined(__APPLE__)
+ lea 32(%rsp), %rdi
+ mov %rdi, %rsi
+#elif defined(__linux__)
lea 32(%rsp), %rdi
mov %rdi, %rsi
xor %fs:0x30, %rsi // magic mangling of rsp (see libc setjmp)
rol $0x11, %rsi
+#else
+# error "Unknown platform"
#endif
// call tsan interceptor
- call __tsan_setjmp
+ call ASM_TSAN_SYMBOL(__tsan_setjmp)
// unalign stack frame
add $8, %rsp
CFI_ADJUST_CFA_OFFSET(-8)
@@ -265,15 +296,20 @@ sigsetjmp:
CFI_RESTORE(%rdi)
// tail jump to libc sigsetjmp
movl $0, %eax
+#if !defined(__APPLE__)
movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
+#else
+ jmp ASM_TSAN_SYMBOL(sigsetjmp)
+#endif
CFI_ENDPROC
-.size sigsetjmp, .-sigsetjmp
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
+#if !defined(__APPLE__)
.comm _ZN14__interception16real___sigsetjmpE,8,8
-.globl __sigsetjmp
-.type __sigsetjmp, @function
-__sigsetjmp:
+.globl ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
CFI_STARTPROC
// save env parameter
push %rdi
@@ -297,7 +333,7 @@ __sigsetjmp:
rol $0x11, %rsi
#endif
// call tsan interceptor
- call __tsan_setjmp
+ call ASM_TSAN_SYMBOL(__tsan_setjmp)
// unalign stack frame
add $8, %rsp
CFI_ADJUST_CFA_OFFSET(-8)
@@ -314,7 +350,8 @@ __sigsetjmp:
movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx
jmp *(%rdx)
CFI_ENDPROC
-.size __sigsetjmp, .-__sigsetjmp
+ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
+#endif // !defined(__APPLE__)
#if defined(__FreeBSD__) || defined(__linux__)
/* We do not need executable stack. */
diff --git a/libsanitizer/tsan/tsan_rtl_mips64.S b/libsanitizer/tsan/tsan_rtl_mips64.S
new file mode 100644
index 00000000000..b1c9d8bb12d
--- /dev/null
+++ b/libsanitizer/tsan/tsan_rtl_mips64.S
@@ -0,0 +1,212 @@
+.section .text
+.hidden __tsan_setjmp
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl setjmp
+.type setjmp, @function
+setjmp:
+
+ // save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc setjmp to t9
+ dla $t9,(_ZN14__interception11real_setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size setjmp, .-setjmp
+
+.hidden __tsan_setjmp
+.globl _setjmp
+.comm _ZN14__interception12real__setjmpE,8,8
+.type _setjmp, @function
+_setjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(_setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(_setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc _setjmp to t9
+ dla $t9,(_ZN14__interception12real__setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc _setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size _setjmp, .-_setjmp
+
+.hidden __tsan_setjmp
+.globl sigsetjmp
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.type sigsetjmp, @function
+sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc sigsetjmp to t9
+ dla $t9,(_ZN14__interception14real_sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size sigsetjmp, .-sigsetjmp
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl __sigsetjmp
+.type __sigsetjmp, @function
+__sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(__sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(__sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer to libc __sigsetjmp in t9
+ dla $t9,(_ZN14__interception16real___sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc __sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size __sigsetjmp, .-__sigsetjmp
diff --git a/libsanitizer/tsan/tsan_rtl_mutex.cc b/libsanitizer/tsan/tsan_rtl_mutex.cc
index deb7722f521..e575bbfb7e9 100644
--- a/libsanitizer/tsan/tsan_rtl_mutex.cc
+++ b/libsanitizer/tsan/tsan_rtl_mutex.cc
@@ -30,7 +30,7 @@ struct Callback : DDCallback {
Callback(ThreadState *thr, uptr pc)
: thr(thr)
, pc(pc) {
- DDCallback::pt = thr->dd_pt;
+ DDCallback::pt = thr->proc()->dd_pt;
DDCallback::lt = thr->dd_lt;
}
@@ -48,7 +48,7 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
uptr addr, u64 mid) {
// In Go, these misuses are either impossible, or detected by std lib,
// or false positives (e.g. unlock in a different thread).
- if (kGoMode)
+ if (SANITIZER_GO)
return;
ThreadRegistryLock l(ctx->thread_registry);
ScopedReport rep(typ);
@@ -74,7 +74,7 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
s->is_rw = rw;
s->is_recursive = recursive;
s->is_linker_init = linker_init;
- if (kCppMode && s->creation_stack_id == 0)
+ if (!SANITIZER_GO && s->creation_stack_id == 0)
s->creation_stack_id = CurrentStackId(thr, pc);
s->mtx.Unlock();
}
@@ -82,21 +82,14 @@ void MutexCreate(ThreadState *thr, uptr pc, uptr addr,
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
StatInc(thr, StatMutexDestroy);
-#ifndef SANITIZER_GO
- // Global mutexes not marked as LINKER_INITIALIZED
- // cause tons of not interesting reports, so just ignore it.
- if (IsGlobalVar(addr))
- return;
-#endif
- if (IsAppMem(addr)) {
- CHECK(!thr->is_freeing);
- thr->is_freeing = true;
- MemoryWrite(thr, pc, addr, kSizeLog1);
- thr->is_freeing = false;
- }
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s == 0)
return;
+ if (s->is_linker_init) {
+ // Destroy is no-op for linker-initialized mutexes.
+ s->mtx.Unlock();
+ return;
+ }
if (common_flags()->detect_deadlocks) {
Callback cb(thr, pc);
ctx->dd->MutexDestroy(&cb, &s->dd);
@@ -112,7 +105,7 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
u64 mid = s->GetId();
u32 last_lock = s->last_lock;
if (!unlock_locked)
- s->Reset(thr); // must not reset it before the report is printed
+ s->Reset(thr->proc()); // must not reset it before the report is printed
s->mtx.Unlock();
if (unlock_locked) {
ThreadRegistryLock l(ctx->thread_registry);
@@ -126,15 +119,23 @@ void MutexDestroy(ThreadState *thr, uptr pc, uptr addr) {
rep.AddStack(trace, true);
rep.AddLocation(addr, 1);
OutputReport(thr, rep);
- }
- if (unlock_locked) {
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
if (s != 0) {
- s->Reset(thr);
+ s->Reset(thr->proc());
s->mtx.Unlock();
}
}
thr->mset.Remove(mid);
+ // Imitate a memory write to catch unlock-destroy races.
+ // Do this outside of sync mutex, because it can report a race which locks
+ // sync mutexes.
+ if (IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
// s will be destroyed and freed in MetaMap::FreeBlock.
}
@@ -192,7 +193,7 @@ int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, bool all) {
TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
int rec = 0;
bool report_bad_unlock = false;
- if (kCppMode && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
if (flags()->report_mutex_bugs && !s->is_broken) {
s->is_broken = true;
report_bad_unlock = true;
@@ -348,11 +349,21 @@ void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
+}
+
void Acquire(ThreadState *thr, uptr pc, uptr addr) {
DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
if (thr->ignore_sync)
return;
- SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
+ if (!s)
+ return;
AcquireImpl(thr, pc, &s->clock);
s->mtx.ReadUnlock();
}
@@ -399,7 +410,7 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
s->mtx.Unlock();
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
@@ -424,7 +435,7 @@ void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
if (thr->ignore_sync)
return;
thr->clock.set(thr->fast_state.epoch());
- thr->clock.acquire(&thr->clock_cache, c);
+ thr->clock.acquire(&thr->proc()->clock_cache, c);
StatInc(thr, StatSyncAcquire);
}
@@ -433,7 +444,7 @@ void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.release(&thr->clock_cache, c);
+ thr->clock.release(&thr->proc()->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
@@ -442,7 +453,7 @@ void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.ReleaseStore(&thr->clock_cache, c);
+ thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
StatInc(thr, StatSyncRelease);
}
@@ -451,7 +462,7 @@ void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
return;
thr->clock.set(thr->fast_state.epoch());
thr->fast_synch_epoch = thr->fast_state.epoch();
- thr->clock.acq_rel(&thr->clock_cache, c);
+ thr->clock.acq_rel(&thr->proc()->clock_cache, c);
StatInc(thr, StatSyncAcquire);
StatInc(thr, StatSyncRelease);
}
diff --git a/libsanitizer/tsan/tsan_rtl_ppc64.S b/libsanitizer/tsan/tsan_rtl_ppc64.S
new file mode 100644
index 00000000000..81d309fbd2f
--- /dev/null
+++ b/libsanitizer/tsan/tsan_rtl_ppc64.S
@@ -0,0 +1,286 @@
+#include "tsan_ppc_regs.h"
+ .hidden __tsan_setjmp
+ .globl _setjmp
+ .type _setjmp, @function
+ .align 4
+#if _CALL_ELF == 2
+_setjmp:
+#else
+ .section ".opd","aw"
+ .align 3
+_setjmp:
+ .quad .L._setjmp,.TOC.@tocbase,0
+ .previous
+#endif
+.L._setjmp:
+ mflr r0
+ stdu r1,-48(r1)
+ std r2,24(r1)
+ std r3,32(r1)
+ std r0,40(r1)
+ // r3 is the original stack pointer.
+ addi r3,r1,48
+ // r4 is the mangled stack pointer (see glibc)
+ ld r4,-28696(r13)
+ xor r4,r3,r4
+ // Materialize a TOC in case we were called from libc.
+ // For big-endian, we load the TOC from the OPD. For little-
+ // endian, we use the .TOC. symbol to find it.
+ nop
+ bcl 20,31,0f
+0:
+ mflr r2
+#if _CALL_ELF == 2
+ addis r2,r2,.TOC.-0b@ha
+ addi r2,r2,.TOC.-0b@l
+#else
+ addis r2,r2,_setjmp-0b@ha
+ addi r2,r2,_setjmp-0b@l
+ ld r2,8(r2)
+#endif
+ // Call the interceptor.
+ bl __tsan_setjmp
+ nop
+ // Restore regs needed for setjmp.
+ ld r3,32(r1)
+ ld r0,40(r1)
+ // Emulate the real setjmp function. We do this because we can't
+ // perform a sibcall: The real setjmp function trashes the TOC
+ // pointer, and with a sibcall we have no way to restore it.
+ // This way we can make sure our caller's stack pointer and
+ // link register are saved correctly in the jmpbuf.
+ ld r6,-28696(r13)
+ addi r5,r1,48 // original stack ptr of caller
+ xor r5,r6,r5
+ std r5,0(r3) // mangled stack ptr of caller
+ ld r5,24(r1)
+ std r5,8(r3) // caller's saved TOC pointer
+ xor r0,r6,r0
+ std r0,16(r3) // caller's mangled return address
+ mfcr r0
+ // Nonvolatiles.
+ std r14,24(r3)
+ stfd f14,176(r3)
+ stw r0,172(r3) // CR
+ std r15,32(r3)
+ stfd f15,184(r3)
+ std r16,40(r3)
+ stfd f16,192(r3)
+ std r17,48(r3)
+ stfd f17,200(r3)
+ std r18,56(r3)
+ stfd f18,208(r3)
+ std r19,64(r3)
+ stfd f19,216(r3)
+ std r20,72(r3)
+ stfd f20,224(r3)
+ std r21,80(r3)
+ stfd f21,232(r3)
+ std r22,88(r3)
+ stfd f22,240(r3)
+ std r23,96(r3)
+ stfd f23,248(r3)
+ std r24,104(r3)
+ stfd f24,256(r3)
+ std r25,112(r3)
+ stfd f25,264(r3)
+ std r26,120(r3)
+ stfd f26,272(r3)
+ std r27,128(r3)
+ stfd f27,280(r3)
+ std r28,136(r3)
+ stfd f28,288(r3)
+ std r29,144(r3)
+ stfd f29,296(r3)
+ std r30,152(r3)
+ stfd f30,304(r3)
+ std r31,160(r3)
+ stfd f31,312(r3)
+ addi r5,r3,320
+ mfspr r0,256
+ stw r0,168(r3) // VRSAVE
+ addi r6,r5,16
+ stvx v20,0,r5
+ addi r5,r5,32
+ stvx v21,0,r6
+ addi r6,r6,32
+ stvx v22,0,r5
+ addi r5,r5,32
+ stvx v23,0,r6
+ addi r6,r6,32
+ stvx v24,0,r5
+ addi r5,r5,32
+ stvx v25,0,r6
+ addi r6,r6,32
+ stvx v26,0,r5
+ addi r5,r5,32
+ stvx v27,0,r6
+ addi r6,r6,32
+ stvx v28,0,r5
+ addi r5,r5,32
+ stvx v29,0,r6
+ addi r6,r6,32
+ stvx v30,0,r5
+ stvx v31,0,r6
+ // Clear the "mask-saved" slot.
+ li r4,0
+ stw r4,512(r3)
+ // Restore TOC, LR, and stack and return to caller.
+ ld r2,24(r1)
+ ld r0,40(r1)
+ addi r1,r1,48
+ li r3,0 // This is the setjmp return path
+ mtlr r0
+ blr
+ .size _setjmp, .-.L._setjmp
+
+ .globl setjmp
+ .type setjmp, @function
+ .align 4
+setjmp:
+ b _setjmp
+ .size setjmp, .-setjmp
+
+ // sigsetjmp is like setjmp, except that the mask in r4 needs
+ // to be saved at offset 512 of the jump buffer.
+ .globl __sigsetjmp
+ .type __sigsetjmp, @function
+ .align 4
+#if _CALL_ELF == 2
+__sigsetjmp:
+#else
+ .section ".opd","aw"
+ .align 3
+__sigsetjmp:
+ .quad .L.__sigsetjmp,.TOC.@tocbase,0
+ .previous
+#endif
+.L.__sigsetjmp:
+ mflr r0
+ stdu r1,-64(r1)
+ std r2,24(r1)
+ std r3,32(r1)
+ std r4,40(r1)
+ std r0,48(r1)
+ // r3 is the original stack pointer.
+ addi r3,r1,64
+ // r4 is the mangled stack pointer (see glibc)
+ ld r4,-28696(r13)
+ xor r4,r3,r4
+ // Materialize a TOC in case we were called from libc.
+ // For big-endian, we load the TOC from the OPD. For little-
+ // endian, we use the .TOC. symbol to find it.
+ nop
+ bcl 20,31,1f
+1:
+ mflr r2
+#if _CALL_ELF == 2
+ addis r2,r2,.TOC.-1b@ha
+ addi r2,r2,.TOC.-1b@l
+#else
+ addis r2,r2,_setjmp-1b@ha
+ addi r2,r2,_setjmp-1b@l
+ ld r2,8(r2)
+#endif
+ // Call the interceptor.
+ bl __tsan_setjmp
+ nop
+ // Restore regs needed for __sigsetjmp.
+ ld r3,32(r1)
+ ld r4,40(r1)
+ ld r0,48(r1)
+ // Emulate the real sigsetjmp function. We do this because we can't
+ // perform a sibcall: The real sigsetjmp function trashes the TOC
+ // pointer, and with a sibcall we have no way to restore it.
+ // This way we can make sure our caller's stack pointer and
+ // link register are saved correctly in the jmpbuf.
+ ld r6,-28696(r13)
+ addi r5,r1,64 // original stack ptr of caller
+ xor r5,r6,r5
+ std r5,0(r3) // mangled stack ptr of caller
+ ld r5,24(r1)
+ std r5,8(r3) // caller's saved TOC pointer
+ xor r0,r6,r0
+ std r0,16(r3) // caller's mangled return address
+ mfcr r0
+ // Nonvolatiles.
+ std r14,24(r3)
+ stfd f14,176(r3)
+ stw r0,172(r3) // CR
+ std r15,32(r3)
+ stfd f15,184(r3)
+ std r16,40(r3)
+ stfd f16,192(r3)
+ std r17,48(r3)
+ stfd f17,200(r3)
+ std r18,56(r3)
+ stfd f18,208(r3)
+ std r19,64(r3)
+ stfd f19,216(r3)
+ std r20,72(r3)
+ stfd f20,224(r3)
+ std r21,80(r3)
+ stfd f21,232(r3)
+ std r22,88(r3)
+ stfd f22,240(r3)
+ std r23,96(r3)
+ stfd f23,248(r3)
+ std r24,104(r3)
+ stfd f24,256(r3)
+ std r25,112(r3)
+ stfd f25,264(r3)
+ std r26,120(r3)
+ stfd f26,272(r3)
+ std r27,128(r3)
+ stfd f27,280(r3)
+ std r28,136(r3)
+ stfd f28,288(r3)
+ std r29,144(r3)
+ stfd f29,296(r3)
+ std r30,152(r3)
+ stfd f30,304(r3)
+ std r31,160(r3)
+ stfd f31,312(r3)
+ addi r5,r3,320
+ mfspr r0,256
+ stw r0,168(r3) // VRSAVE
+ addi r6,r5,16
+ stvx v20,0,r5
+ addi r5,r5,32
+ stvx v21,0,r6
+ addi r6,r6,32
+ stvx v22,0,r5
+ addi r5,r5,32
+ stvx v23,0,r6
+ addi r6,r6,32
+ stvx v24,0,r5
+ addi r5,r5,32
+ stvx v25,0,r6
+ addi r6,r6,32
+ stvx v26,0,r5
+ addi r5,r5,32
+ stvx v27,0,r6
+ addi r6,r6,32
+ stvx v28,0,r5
+ addi r5,r5,32
+ stvx v29,0,r6
+ addi r6,r6,32
+ stvx v30,0,r5
+ stvx v31,0,r6
+ // Save into the "mask-saved" slot.
+ stw r4,512(r3)
+ // Restore TOC, LR, and stack and return to caller.
+ ld r2,24(r1)
+ ld r0,48(r1)
+ addi r1,r1,64
+ li r3,0 // This is the sigsetjmp return path
+ mtlr r0
+ blr
+ .size __sigsetjmp, .-.L.__sigsetjmp
+
+ .globl sigsetjmp
+ .type sigsetjmp, @function
+ .align 4
+sigsetjmp:
+ b __sigsetjmp
+ .size sigsetjmp, .-sigsetjmp
diff --git a/libsanitizer/tsan/tsan_rtl_proc.cc b/libsanitizer/tsan/tsan_rtl_proc.cc
new file mode 100644
index 00000000000..1b0a9b38938
--- /dev/null
+++ b/libsanitizer/tsan/tsan_rtl_proc.cc
@@ -0,0 +1,59 @@
+//===-- tsan_rtl_proc.cc ------------------------------------------------===//
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+
+namespace __tsan {
+
+Processor *ProcCreate() {
+ void *mem = InternalAlloc(sizeof(Processor));
+ internal_memset(mem, 0, sizeof(Processor));
+ Processor *proc = new(mem) Processor;
+ proc->thr = nullptr;
+#if !SANITIZER_GO
+ AllocatorProcStart(proc);
+#endif
+ if (common_flags()->detect_deadlocks)
+ proc->dd_pt = ctx->dd->CreatePhysicalThread();
+ return proc;
+}
+
+void ProcDestroy(Processor *proc) {
+ CHECK_EQ(proc->thr, nullptr);
+#if !SANITIZER_GO
+ AllocatorProcFinish(proc);
+#endif
+ ctx->clock_alloc.FlushCache(&proc->clock_cache);
+ ctx->metamap.OnProcIdle(proc);
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyPhysicalThread(proc->dd_pt);
+ proc->~Processor();
+ InternalFree(proc);
+}
+
+void ProcWire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, nullptr);
+ CHECK_EQ(proc->thr, nullptr);
+ thr->proc1 = proc;
+ proc->thr = thr;
+}
+
+void ProcUnwire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, proc);
+ CHECK_EQ(proc->thr, thr);
+ thr->proc1 = nullptr;
+ proc->thr = nullptr;
+}
+
+} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_rtl_report.cc b/libsanitizer/tsan/tsan_rtl_report.cc
index d0d1fbaf45a..8f2882485e6 100644
--- a/libsanitizer/tsan/tsan_rtl_report.cc
+++ b/libsanitizer/tsan/tsan_rtl_report.cc
@@ -36,6 +36,10 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
// on the other hand there is no sense in processing interceptors
// since we are going to die soon.
ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_GO
+ cur_thread()->ignore_sync++;
+ cur_thread()->ignore_reads_and_writes++;
+#endif
Printf("FATAL: ThreadSanitizer CHECK failed: "
"%s:%d \"%s\" (0x%zx, 0x%zx)\n",
file, line, cond, (uptr)v1, (uptr)v2);
@@ -47,13 +51,18 @@ void TsanCheckFailed(const char *file, int line, const char *cond,
#ifdef TSAN_EXTERNAL_HOOKS
bool OnReport(const ReportDesc *rep, bool suppressed);
#else
-SANITIZER_INTERFACE_ATTRIBUTE
-bool WEAK OnReport(const ReportDesc *rep, bool suppressed) {
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool OnReport(const ReportDesc *rep, bool suppressed) {
(void)rep;
return suppressed;
}
#endif
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_on_report(const ReportDesc *rep) {
+ (void)rep;
+}
+
static void StackStripMain(SymbolizedStack *frames) {
SymbolizedStack *last_frame = nullptr;
SymbolizedStack *last_frame2 = nullptr;
@@ -64,7 +73,7 @@ static void StackStripMain(SymbolizedStack *frames) {
if (last_frame2 == 0)
return;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
const char *last = last_frame->info.function;
const char *last2 = last_frame2->info.function;
// Strip frame above 'main'
@@ -184,10 +193,10 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
return;
}
void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
- ReportThread *rt = new(mem) ReportThread();
+ ReportThread *rt = new(mem) ReportThread;
rep_->threads.PushBack(rt);
rt->id = tctx->tid;
- rt->pid = tctx->os_id;
+ rt->os_id = tctx->os_id;
rt->running = (tctx->status == ThreadStatusRunning);
rt->name = internal_strdup(tctx->name);
rt->parent_tid = tctx->parent_tid;
@@ -197,17 +206,17 @@ void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) {
rt->stack->suppressable = suppressable;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
+static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
+ int unique_id = *(int *)arg;
+ return tctx->unique_id == (u32)unique_id;
+}
+
static ThreadContext *FindThreadByUidLocked(int unique_id) {
ctx->thread_registry->CheckLocked();
- for (unsigned i = 0; i < kMaxTid; i++) {
- ThreadContext *tctx = static_cast<ThreadContext*>(
- ctx->thread_registry->GetThreadLocked(i));
- if (tctx && tctx->unique_id == (u32)unique_id) {
- return tctx;
- }
- }
- return 0;
+ return static_cast<ThreadContext *>(
+ ctx->thread_registry->FindThreadContextLocked(
+ FindThreadByUidLockedCallback, &unique_id));
}
static ThreadContext *FindThreadByTidLocked(int tid) {
@@ -242,7 +251,7 @@ ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
#endif
void ScopedReport::AddThread(int unique_tid, bool suppressable) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
AddThread(tctx, suppressable);
#endif
@@ -254,7 +263,7 @@ void ScopedReport::AddMutex(const SyncVar *s) {
return;
}
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
- ReportMutex *rm = new(mem) ReportMutex();
+ ReportMutex *rm = new(mem) ReportMutex;
rep_->mutexes.PushBack(rm);
rm->id = s->uid;
rm->addr = s->addr;
@@ -266,7 +275,7 @@ u64 ScopedReport::AddMutex(u64 id) {
u64 uid = 0;
u64 mid = id;
uptr addr = SyncVar::SplitId(id, &uid);
- SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
// Check that the mutex is still alive.
// Another mutex can be created at the same address,
// so check uid as well.
@@ -287,7 +296,7 @@ void ScopedReport::AddDeadMutex(u64 id) {
return;
}
void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
- ReportMutex *rm = new(mem) ReportMutex();
+ ReportMutex *rm = new(mem) ReportMutex;
rep_->mutexes.PushBack(rm);
rm->id = id;
rm->addr = 0;
@@ -298,7 +307,7 @@ void ScopedReport::AddDeadMutex(u64 id) {
void ScopedReport::AddLocation(uptr addr, uptr size) {
if (addr == 0)
return;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
int fd = -1;
int creat_tid = -1;
u32 creat_stack = 0;
@@ -340,15 +349,15 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
rep_->locs.PushBack(loc);
AddThread(tctx);
}
+#endif
if (ReportLocation *loc = SymbolizeData(addr)) {
loc->suppressable = true;
rep_->locs.PushBack(loc);
return;
}
-#endif
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
void ScopedReport::AddSleep(u32 stack_id) {
rep_->sleep = SymbolizeStackId(stack_id);
}
@@ -490,6 +499,8 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
return false;
atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
const ReportDesc *rep = srep.GetReport();
+ CHECK_EQ(thr->current_report, nullptr);
+ thr->current_report = rep;
Suppression *supp = 0;
uptr pc_or_addr = 0;
for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
@@ -510,13 +521,17 @@ bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
thr->is_freeing = false;
bool suppressed = OnReport(rep, pc_or_addr != 0);
thr->is_freeing = old_is_freeing;
- if (suppressed)
+ if (suppressed) {
+ thr->current_report = nullptr;
return false;
+ }
}
PrintReport(rep);
+ __tsan_on_report(rep);
ctx->nreported++;
if (flags()->halt_on_error)
Die();
+ thr->current_report = nullptr;
return true;
}
@@ -647,7 +662,7 @@ void ReportRace(ThreadState *thr) {
rep.AddLocation(addr_min, addr_max - addr_min);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
{ // NOLINT
Shadow s(thr->racy_state[1]);
if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
@@ -667,8 +682,16 @@ void PrintCurrentStack(ThreadState *thr, uptr pc) {
PrintStack(SymbolizeStack(trace));
}
+// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
+// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
+// tail-call to PrintCurrentStackSlow breaks this assumption because
+// __sanitizer_print_stack_trace disappears after tail-call.
+// However, this solution is not reliable enough, please see dvyukov's comment
+// http://reviews.llvm.org/D19148#406208
+// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
+ALWAYS_INLINE
void PrintCurrentStackSlow(uptr pc) {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
BufferedStackTrace *ptrace =
new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
BufferedStackTrace();
diff --git a/libsanitizer/tsan/tsan_rtl_thread.cc b/libsanitizer/tsan/tsan_rtl_thread.cc
index 3939c77d41c..6c4b74e2a76 100644
--- a/libsanitizer/tsan/tsan_rtl_thread.cc
+++ b/libsanitizer/tsan/tsan_rtl_thread.cc
@@ -28,7 +28,7 @@ ThreadContext::ThreadContext(int tid)
, epoch1() {
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
ThreadContext::~ThreadContext() {
}
#endif
@@ -40,7 +40,7 @@ void ThreadContext::OnDead() {
void ThreadContext::OnJoined(void *arg) {
ThreadState *caller_thr = static_cast<ThreadState *>(arg);
AcquireImpl(caller_thr, 0, &sync);
- sync.Reset(&caller_thr->clock_cache);
+ sync.Reset(&caller_thr->proc()->clock_cache);
}
struct OnCreatedArgs {
@@ -66,13 +66,13 @@ void ThreadContext::OnCreated(void *arg) {
void ThreadContext::OnReset() {
CHECK_EQ(sync.size(), 0);
- FlushUnneededShadowMemory(GetThreadTrace(tid), TraceSize() * sizeof(Event));
- //!!! FlushUnneededShadowMemory(GetThreadTraceHeader(tid), sizeof(Trace));
+ ReleaseMemoryToOS(GetThreadTrace(tid), TraceSize() * sizeof(Event));
+ //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
}
void ThreadContext::OnDetached(void *arg) {
ThreadState *thr1 = static_cast<ThreadState*>(arg);
- sync.Reset(&thr1->clock_cache);
+ sync.Reset(&thr1->proc()->clock_cache);
}
struct OnStartedArgs {
@@ -92,7 +92,7 @@ void ThreadContext::OnStarted(void *arg) {
epoch1 = (u64)-1;
new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
@@ -104,13 +104,8 @@ void ThreadContext::OnStarted(void *arg) {
thr->shadow_stack_pos = thr->shadow_stack;
thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
#endif
-#ifndef SANITIZER_GO
- AllocatorThreadStart(thr);
-#endif
- if (common_flags()->detect_deadlocks) {
- thr->dd_pt = ctx->dd->CreatePhysicalThread();
+ if (common_flags()->detect_deadlocks)
thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
- }
thr->fast_state.SetHistorySize(flags()->history_size);
// Commit switch to the new part of the trace.
// TraceAddEvent will reset stack0/mset0 in the new part for us.
@@ -119,7 +114,7 @@ void ThreadContext::OnStarted(void *arg) {
thr->fast_synch_epoch = epoch0;
AcquireImpl(thr, 0, &sync);
StatInc(thr, StatSyncAcquire);
- sync.Reset(&thr->clock_cache);
+ sync.Reset(&thr->proc()->clock_cache);
thr->is_inited = true;
DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
"tls_addr=%zx tls_size=%zx\n",
@@ -128,6 +123,12 @@ void ThreadContext::OnStarted(void *arg) {
}
void ThreadContext::OnFinished() {
+#if SANITIZER_GO
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = nullptr;
+ thr->shadow_stack_pos = nullptr;
+ thr->shadow_stack_end = nullptr;
+#endif
if (!detached) {
thr->fast_state.IncrementEpoch();
// Can't increment epoch w/o writing to the trace as well.
@@ -136,15 +137,8 @@ void ThreadContext::OnFinished() {
}
epoch1 = thr->fast_state.epoch();
- if (common_flags()->detect_deadlocks) {
- ctx->dd->DestroyPhysicalThread(thr->dd_pt);
+ if (common_flags()->detect_deadlocks)
ctx->dd->DestroyLogicalThread(thr->dd_lt);
- }
- ctx->clock_alloc.FlushCache(&thr->clock_cache);
- ctx->metamap.OnThreadIdle(thr);
-#ifndef SANITIZER_GO
- AllocatorThreadFinish(thr);
-#endif
thr->~ThreadState();
#if TSAN_COLLECT_STATS
StatAggregate(ctx->stat, thr->stat);
@@ -152,7 +146,7 @@ void ThreadContext::OnFinished() {
thr = 0;
}
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
struct ThreadLeak {
ThreadContext *tctx;
int count;
@@ -174,7 +168,7 @@ static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
}
#endif
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
if (tctx->tid == 0) {
Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
@@ -206,7 +200,7 @@ static void ThreadCheckIgnore(ThreadState *thr) {}
void ThreadFinalize(ThreadState *thr) {
ThreadCheckIgnore(thr);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (!flags()->report_thread_leaks)
return;
ThreadRegistryLock l(ctx->thread_registry);
@@ -244,7 +238,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
uptr stk_size = 0;
uptr tls_addr = 0;
uptr tls_size = 0;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
if (tid) {
@@ -275,7 +269,7 @@ void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
tr->Unlock();
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
if (ctx->after_multithreaded_fork) {
thr->ignore_interceptors++;
ThreadIgnoreBegin(thr, 0);
diff --git a/libsanitizer/tsan/tsan_stat.cc b/libsanitizer/tsan/tsan_stat.cc
index 5eccca69312..740cb86c69e 100644
--- a/libsanitizer/tsan/tsan_stat.cc
+++ b/libsanitizer/tsan/tsan_stat.cc
@@ -166,6 +166,7 @@ void StatOutput(u64 *stat) {
name[StatMtxFired] = " FiredSuppressions ";
name[StatMtxRacy] = " RacyStacks ";
name[StatMtxFD] = " FD ";
+ name[StatMtxGlobalProc] = " GlobalProc ";
Printf("Statistics:\n");
for (int i = 0; i < StatCnt; i++)
diff --git a/libsanitizer/tsan/tsan_stat.h b/libsanitizer/tsan/tsan_stat.h
index 002570f42f1..788adeb56ea 100644
--- a/libsanitizer/tsan/tsan_stat.h
+++ b/libsanitizer/tsan/tsan_stat.h
@@ -171,6 +171,7 @@ enum StatType {
StatMtxFired,
StatMtxRacy,
StatMtxFD,
+ StatMtxGlobalProc,
// This must be the last.
StatCnt
diff --git a/libsanitizer/tsan/tsan_suppressions.cc b/libsanitizer/tsan/tsan_suppressions.cc
index 5c8d03dddd2..dc862b1b9ad 100644
--- a/libsanitizer/tsan/tsan_suppressions.cc
+++ b/libsanitizer/tsan/tsan_suppressions.cc
@@ -19,7 +19,7 @@
#include "tsan_mman.h"
#include "tsan_platform.h"
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Suppressions for true/false positives in standard libraries.
static const char *const std_suppressions =
// Libstdc++ 4.4 has data races in std::string.
@@ -32,7 +32,8 @@ static const char *const std_suppressions =
"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
// Can be overriden in frontend.
-extern "C" const char *WEAK __tsan_default_suppressions() {
+SANITIZER_WEAK_DEFAULT_IMPL
+const char *__tsan_default_suppressions() {
return 0;
}
#endif
@@ -51,7 +52,7 @@ void InitializeSuppressions() {
suppression_ctx = new (suppression_placeholder) // NOLINT
SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
suppression_ctx->ParseFromFile(flags()->suppressions);
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
suppression_ctx->Parse(__tsan_default_suppressions());
suppression_ctx->Parse(std_suppressions);
#endif
@@ -77,6 +78,8 @@ static const char *conv(ReportType typ) {
return kSuppressionMutex;
else if (typ == ReportTypeMutexDoubleLock)
return kSuppressionMutex;
+ else if (typ == ReportTypeMutexInvalidAccess)
+ return kSuppressionMutex;
else if (typ == ReportTypeMutexBadUnlock)
return kSuppressionMutex;
else if (typ == ReportTypeMutexBadReadLock)
@@ -89,7 +92,7 @@ static const char *conv(ReportType typ) {
return kSuppressionNone;
else if (typ == ReportTypeDeadlock)
return kSuppressionDeadlock;
- Printf("ThreadSanitizer: unknown report type %d\n", typ),
+ Printf("ThreadSanitizer: unknown report type %d\n", typ);
Die();
}
@@ -156,8 +159,8 @@ void PrintMatchedSuppressions() {
Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count,
(int)internal_getpid());
for (uptr i = 0; i < matched.size(); i++) {
- Printf("%d %s:%s\n", matched[i]->hit_count, matched[i]->type,
- matched[i]->templ);
+ Printf("%d %s:%s\n", atomic_load_relaxed(&matched[i]->hit_count),
+ matched[i]->type, matched[i]->templ);
}
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_symbolize.cc b/libsanitizer/tsan/tsan_symbolize.cc
index 0e54562e385..7b04782d1ad 100644
--- a/libsanitizer/tsan/tsan_symbolize.cc
+++ b/libsanitizer/tsan/tsan_symbolize.cc
@@ -36,10 +36,10 @@ void ExitSymbolizer() {
// May be overriden by JIT/JAVA/etc,
// whatever produces PCs marked with kExternalPCBit.
-extern "C" bool WEAK __tsan_symbolize_external(uptr pc,
- char *func_buf, uptr func_siz,
- char *file_buf, uptr file_siz,
- int *line, int *col) {
+SANITIZER_WEAK_DEFAULT_IMPL
+bool __tsan_symbolize_external(uptr pc, char *func_buf, uptr func_siz,
+ char *file_buf, uptr file_siz, int *line,
+ int *col) {
return false;
}
@@ -69,7 +69,7 @@ ReportLocation *SymbolizeData(uptr addr) {
if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
return 0;
ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
- ent->global = info;
+ internal_memcpy(&ent->global, &info, sizeof(info));
return ent;
}
diff --git a/libsanitizer/tsan/tsan_sync.cc b/libsanitizer/tsan/tsan_sync.cc
index 91ad8c8b228..0ee2295e69e 100644
--- a/libsanitizer/tsan/tsan_sync.cc
+++ b/libsanitizer/tsan/tsan_sync.cc
@@ -28,13 +28,13 @@ void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
this->next = 0;
creation_stack_id = 0;
- if (kCppMode) // Go does not use them
+ if (!SANITIZER_GO) // Go does not use them
creation_stack_id = CurrentStackId(thr, pc);
if (common_flags()->detect_deadlocks)
DDMutexInit(thr, pc, this);
}
-void SyncVar::Reset(ThreadState *thr) {
+void SyncVar::Reset(Processor *proc) {
uid = 0;
creation_stack_id = 0;
owner_tid = kInvalidTid;
@@ -45,12 +45,12 @@ void SyncVar::Reset(ThreadState *thr) {
is_broken = 0;
is_linker_init = 0;
- if (thr == 0) {
+ if (proc == 0) {
CHECK_EQ(clock.size(), 0);
CHECK_EQ(read_clock.size(), 0);
} else {
- clock.Reset(&thr->clock_cache);
- read_clock.Reset(&thr->clock_cache);
+ clock.Reset(&proc->clock_cache);
+ read_clock.Reset(&proc->clock_cache);
}
}
@@ -59,7 +59,7 @@ MetaMap::MetaMap() {
}
void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
- u32 idx = block_alloc_.Alloc(&thr->block_cache);
+ u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
MBlock *b = block_alloc_.Map(idx);
b->siz = sz;
b->tid = thr->tid;
@@ -69,16 +69,16 @@ void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
*meta = idx | kFlagBlock;
}
-uptr MetaMap::FreeBlock(ThreadState *thr, uptr pc, uptr p) {
+uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
MBlock* b = GetBlock(p);
if (b == 0)
return 0;
uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
- FreeRange(thr, pc, p, sz);
+ FreeRange(proc, p, sz);
return sz;
}
-bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
bool has_something = false;
u32 *meta = MemToMeta(p);
u32 *end = MemToMeta(p + sz);
@@ -94,14 +94,14 @@ bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
has_something = true;
while (idx != 0) {
if (idx & kFlagBlock) {
- block_alloc_.Free(&thr->block_cache, idx & ~kFlagMask);
+ block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
break;
} else if (idx & kFlagSync) {
DCHECK(idx & kFlagSync);
SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
u32 next = s->next;
- s->Reset(thr);
- sync_alloc_.Free(&thr->sync_cache, idx & ~kFlagMask);
+ s->Reset(proc);
+ sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
idx = next;
} else {
CHECK(0);
@@ -117,24 +117,30 @@ bool MetaMap::FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
// which can be huge. The function probes pages one-by-one until it finds a page
// without meta objects, at this point it stops freeing meta objects. Because
// thread stacks grow top-down, we do the same starting from end as well.
-void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+ if (SANITIZER_GO) {
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
+ // so we do the optimization only for C/C++.
+ FreeRange(proc, p, sz);
+ return;
+ }
const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
if (sz <= 4 * kPageSize) {
// If the range is small, just do the normal free procedure.
- FreeRange(thr, pc, p, sz);
+ FreeRange(proc, p, sz);
return;
}
// First, round both ends of the range to page size.
uptr diff = RoundUp(p, kPageSize) - p;
if (diff != 0) {
- FreeRange(thr, pc, p, diff);
+ FreeRange(proc, p, diff);
p += diff;
sz -= diff;
}
diff = p + sz - RoundDown(p + sz, kPageSize);
if (diff != 0) {
- FreeRange(thr, pc, p + sz - diff, diff);
+ FreeRange(proc, p + sz - diff, diff);
sz -= diff;
}
// Now we must have a non-empty page-aligned range.
@@ -144,18 +150,21 @@ void MetaMap::ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz) {
const uptr p0 = p;
const uptr sz0 = sz;
// Probe start of the range.
- while (sz > 0) {
- bool has_something = FreeRange(thr, pc, p, kPageSize);
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p, kPageSize);
p += kPageSize;
sz -= kPageSize;
- if (!has_something)
+ if (!has_something && checked > (128 << 10))
break;
}
// Probe end of the range.
- while (sz > 0) {
- bool has_something = FreeRange(thr, pc, p - kPageSize, kPageSize);
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
sz -= kPageSize;
- if (!has_something)
+ // Stacks grow down, so sync object are most likely at the end of the region
+ // (if it is a stack). The very end of the stack is TLS and tsan increases
+ // TLS by at least 256K, so check at least 512K.
+ if (!has_something && checked > (512 << 10))
break;
}
// Finally, page out the whole range (including the parts that we've just
@@ -187,8 +196,8 @@ SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
return GetAndLock(thr, pc, addr, write_lock, true);
}
-SyncVar* MetaMap::GetIfExistsAndLock(uptr addr) {
- return GetAndLock(0, 0, addr, true, false);
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
+ return GetAndLock(0, 0, addr, write_lock, false);
}
SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
@@ -208,8 +217,8 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
if (s->addr == addr) {
if (myidx != 0) {
- mys->Reset(thr);
- sync_alloc_.Free(&thr->sync_cache, myidx);
+ mys->Reset(thr->proc());
+ sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
}
if (write_lock)
s->mtx.Lock();
@@ -228,7 +237,7 @@ SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
if (myidx == 0) {
const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
- myidx = sync_alloc_.Alloc(&thr->sync_cache);
+ myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
mys = sync_alloc_.Map(myidx);
mys->Init(thr, pc, addr, uid);
}
@@ -277,9 +286,9 @@ void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
}
}
-void MetaMap::OnThreadIdle(ThreadState *thr) {
- block_alloc_.FlushCache(&thr->block_cache);
- sync_alloc_.FlushCache(&thr->sync_cache);
+void MetaMap::OnProcIdle(Processor *proc) {
+ block_alloc_.FlushCache(&proc->block_cache);
+ sync_alloc_.FlushCache(&proc->sync_cache);
}
} // namespace __tsan
diff --git a/libsanitizer/tsan/tsan_sync.h b/libsanitizer/tsan/tsan_sync.h
index 50bc872275d..48d7027b428 100644
--- a/libsanitizer/tsan/tsan_sync.h
+++ b/libsanitizer/tsan/tsan_sync.h
@@ -45,19 +45,19 @@ struct SyncVar {
SyncClock clock;
void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
- void Reset(ThreadState *thr);
+ void Reset(Processor *proc);
u64 GetId() const {
- // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits.
- return GetLsb((u64)addr | (uid << 47), 61);
+ // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
+ return GetLsb((u64)addr | (uid << 48), 60);
}
bool CheckId(u64 uid) const {
CHECK_EQ(uid, GetLsb(uid, 14));
return GetLsb(this->uid, 14) == uid;
}
static uptr SplitId(u64 id, u64 *uid) {
- *uid = id >> 47;
- return (uptr)GetLsb(id, 47);
+ *uid = id >> 48;
+ return (uptr)GetLsb(id, 48);
}
};
@@ -70,18 +70,18 @@ class MetaMap {
MetaMap();
void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
- uptr FreeBlock(ThreadState *thr, uptr pc, uptr p);
- bool FreeRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
- void ResetRange(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(Processor *proc, uptr p);
+ bool FreeRange(Processor *proc, uptr p, uptr sz);
+ void ResetRange(Processor *proc, uptr p, uptr sz);
MBlock* GetBlock(uptr p);
SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
uptr addr, bool write_lock);
- SyncVar* GetIfExistsAndLock(uptr addr);
+ SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
void MoveMemory(uptr src, uptr dst, uptr sz);
- void OnThreadIdle(ThreadState *thr);
+ void OnProcIdle(Processor *proc);
private:
static const u32 kFlagMask = 3u << 30;
diff --git a/libsanitizer/tsan/tsan_trace.h b/libsanitizer/tsan/tsan_trace.h
index a27efa9fd7a..1ea733bfd07 100644
--- a/libsanitizer/tsan/tsan_trace.h
+++ b/libsanitizer/tsan/tsan_trace.h
@@ -40,7 +40,7 @@ enum EventType {
typedef u64 Event;
struct TraceHeader {
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
BufferedStackTrace stack0; // Start stack for the trace.
#else
VarSizeStackTrace stack0;
@@ -53,7 +53,7 @@ struct TraceHeader {
struct Trace {
Mutex mtx;
-#ifndef SANITIZER_GO
+#if !SANITIZER_GO
// Must be last to catch overflow as paging fault.
// Go shadow stack is dynamically allocated.
uptr shadow_stack[kShadowStackSize];
diff --git a/libsanitizer/ubsan/Makefile.in b/libsanitizer/ubsan/Makefile.in
index 51d4da58516..a816d095f6a 100644
--- a/libsanitizer/ubsan/Makefile.in
+++ b/libsanitizer/ubsan/Makefile.in
@@ -208,6 +208,7 @@ PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
RPC_DEFS = @RPC_DEFS@
+SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS = @SANITIZER_COMMON_TARGET_DEPENDENT_OBJECTS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
diff --git a/libsanitizer/ubsan/ubsan_checks.inc b/libsanitizer/ubsan/ubsan_checks.inc
index 935d82c5079..ea85877198a 100644
--- a/libsanitizer/ubsan/ubsan_checks.inc
+++ b/libsanitizer/ubsan/ubsan_checks.inc
@@ -12,40 +12,32 @@
# error "Define UBSAN_CHECK prior to including this file!"
#endif
-// UBSAN_CHECK(Name, SummaryKind, FlagName)
-// SummaryKind and FlagName should be string literals.
+// UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName)
+// SummaryKind and FSanitizeFlagName should be string literals.
-UBSAN_CHECK(GenericUB, "undefined-behavior", "-fsanitize=undefined")
-UBSAN_CHECK(NullPointerUse, "null-pointer-use", "-fsanitize=null")
-UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use",
- "-fsanitize=alignment")
-UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size",
- "-fsanitize=object-size")
+UBSAN_CHECK(GenericUB, "undefined-behavior", "undefined")
+UBSAN_CHECK(NullPointerUse, "null-pointer-use", "null")
+UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use", "alignment")
+UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size", "object-size")
UBSAN_CHECK(SignedIntegerOverflow, "signed-integer-overflow",
- "-fsanitize=signed-integer-overflow")
+ "signed-integer-overflow")
UBSAN_CHECK(UnsignedIntegerOverflow, "unsigned-integer-overflow",
- "-fsanitize=unsigned-integer-overflow")
+ "unsigned-integer-overflow")
UBSAN_CHECK(IntegerDivideByZero, "integer-divide-by-zero",
- "-fsanitize=integer-divide-by-zero")
-UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero",
- "-fsanitize=float-divide-by-zero")
-UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "-fsanitize=shift-base")
-UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent",
- "-fsanitize=shift-exponent")
-UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "-fsanitize=bounds")
-UBSAN_CHECK(UnreachableCall, "unreachable-call", "-fsanitize=unreachable")
-UBSAN_CHECK(MissingReturn, "missing-return", "-fsanitize=return")
-UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index",
- "-fsanitize=vla-bound")
-UBSAN_CHECK(FloatCastOverflow, "float-cast-overflow",
- "-fsanitize=float-cast-overflow")
-UBSAN_CHECK(InvalidBoolLoad, "invalid-bool-load", "-fsanitize=bool")
-UBSAN_CHECK(InvalidEnumLoad, "invalid-enum-load", "-fsanitize=enum")
-UBSAN_CHECK(FunctionTypeMismatch, "function-type-mismatch",
- "-fsanitize=function")
+ "integer-divide-by-zero")
+UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero", "float-divide-by-zero")
+UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "shift-base")
+UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent", "shift-exponent")
+UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "bounds")
+UBSAN_CHECK(UnreachableCall, "unreachable-call", "unreachable")
+UBSAN_CHECK(MissingReturn, "missing-return", "return")
+UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index", "vla-bound")
+UBSAN_CHECK(FloatCastOverflow, "float-cast-overflow", "float-cast-overflow")
+UBSAN_CHECK(InvalidBoolLoad, "invalid-bool-load", "bool")
+UBSAN_CHECK(InvalidEnumLoad, "invalid-enum-load", "enum")
+UBSAN_CHECK(FunctionTypeMismatch, "function-type-mismatch", "function")
UBSAN_CHECK(InvalidNullReturn, "invalid-null-return",
- "-fsanitize=returns-nonnull-attribute")
-UBSAN_CHECK(InvalidNullArgument, "invalid-null-argument",
- "-fsanitize=nonnull-attribute")
-UBSAN_CHECK(DynamicTypeMismatch, "dynamic-type-mismatch", "-fsanitize=vptr")
-UBSAN_CHECK(CFIBadType, "cfi-bad-type", "-fsanitize=cfi")
+ "returns-nonnull-attribute")
+UBSAN_CHECK(InvalidNullArgument, "invalid-null-argument", "nonnull-attribute")
+UBSAN_CHECK(DynamicTypeMismatch, "dynamic-type-mismatch", "vptr")
+UBSAN_CHECK(CFIBadType, "cfi-bad-type", "cfi")
diff --git a/libsanitizer/ubsan/ubsan_diag.cc b/libsanitizer/ubsan/ubsan_diag.cc
index 1197f837f75..b6e982a170e 100644
--- a/libsanitizer/ubsan/ubsan_diag.cc
+++ b/libsanitizer/ubsan/ubsan_diag.cc
@@ -43,7 +43,7 @@ static void MaybePrintStackTrace(uptr pc, uptr bp) {
static const char *ConvertTypeToString(ErrorType Type) {
switch (Type) {
-#define UBSAN_CHECK(Name, SummaryKind, FlagName) \
+#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) \
case ErrorType::Name: \
return SummaryKind;
#include "ubsan_checks.inc"
@@ -52,6 +52,17 @@ static const char *ConvertTypeToString(ErrorType Type) {
UNREACHABLE("unknown ErrorType!");
}
+static const char *ConvertTypeToFlagName(ErrorType Type) {
+ switch (Type) {
+#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) \
+ case ErrorType::Name: \
+ return FSanitizeFlagName;
+#include "ubsan_checks.inc"
+#undef UBSAN_CHECK
+ }
+ UNREACHABLE("unknown ErrorType!");
+}
+
static void MaybeReportErrorSummary(Location Loc, ErrorType Type) {
if (!common_flags()->print_summary)
return;
@@ -111,108 +122,98 @@ Diag &Diag::operator<<(const Value &V) {
}
/// Hexadecimal printing for numbers too large for Printf to handle directly.
-static void PrintHex(UIntMax Val) {
+static void RenderHex(InternalScopedString *Buffer, UIntMax Val) {
#if HAVE_INT128_T
- Printf("0x%08x%08x%08x%08x",
- (unsigned int)(Val >> 96),
- (unsigned int)(Val >> 64),
- (unsigned int)(Val >> 32),
- (unsigned int)(Val));
+ Buffer->append("0x%08x%08x%08x%08x", (unsigned int)(Val >> 96),
+ (unsigned int)(Val >> 64), (unsigned int)(Val >> 32),
+ (unsigned int)(Val));
#else
UNREACHABLE("long long smaller than 64 bits?");
#endif
}
-static void renderLocation(Location Loc) {
- InternalScopedString LocBuffer(1024);
+static void RenderLocation(InternalScopedString *Buffer, Location Loc) {
switch (Loc.getKind()) {
case Location::LK_Source: {
SourceLocation SLoc = Loc.getSourceLocation();
if (SLoc.isInvalid())
- LocBuffer.append("<unknown>");
+ Buffer->append("<unknown>");
else
- RenderSourceLocation(&LocBuffer, SLoc.getFilename(), SLoc.getLine(),
+ RenderSourceLocation(Buffer, SLoc.getFilename(), SLoc.getLine(),
SLoc.getColumn(), common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
- break;
+ return;
}
case Location::LK_Memory:
- LocBuffer.append("%p", Loc.getMemoryLocation());
- break;
+ Buffer->append("%p", Loc.getMemoryLocation());
+ return;
case Location::LK_Symbolized: {
const AddressInfo &Info = Loc.getSymbolizedStack()->info;
- if (Info.file) {
- RenderSourceLocation(&LocBuffer, Info.file, Info.line, Info.column,
+ if (Info.file)
+ RenderSourceLocation(Buffer, Info.file, Info.line, Info.column,
common_flags()->symbolize_vs_style,
common_flags()->strip_path_prefix);
- } else if (Info.module) {
- RenderModuleLocation(&LocBuffer, Info.module, Info.module_offset,
+ else if (Info.module)
+ RenderModuleLocation(Buffer, Info.module, Info.module_offset,
common_flags()->strip_path_prefix);
- } else {
- LocBuffer.append("%p", Info.address);
- }
- break;
+ else
+ Buffer->append("%p", Info.address);
+ return;
}
case Location::LK_Null:
- LocBuffer.append("<unknown>");
- break;
+ Buffer->append("<unknown>");
+ return;
}
- Printf("%s:", LocBuffer.data());
}
-static void renderText(const char *Message, const Diag::Arg *Args) {
+static void RenderText(InternalScopedString *Buffer, const char *Message,
+ const Diag::Arg *Args) {
for (const char *Msg = Message; *Msg; ++Msg) {
if (*Msg != '%') {
- char Buffer[64];
- unsigned I;
- for (I = 0; Msg[I] && Msg[I] != '%' && I != 63; ++I)
- Buffer[I] = Msg[I];
- Buffer[I] = '\0';
- Printf(Buffer);
- Msg += I - 1;
- } else {
- const Diag::Arg &A = Args[*++Msg - '0'];
- switch (A.Kind) {
- case Diag::AK_String:
- Printf("%s", A.String);
- break;
- case Diag::AK_TypeName: {
- if (SANITIZER_WINDOWS)
- // The Windows implementation demangles names early.
- Printf("'%s'", A.String);
- else
- Printf("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
- break;
- }
- case Diag::AK_SInt:
- // 'long long' is guaranteed to be at least 64 bits wide.
- if (A.SInt >= INT64_MIN && A.SInt <= INT64_MAX)
- Printf("%lld", (long long)A.SInt);
- else
- PrintHex(A.SInt);
- break;
- case Diag::AK_UInt:
- if (A.UInt <= UINT64_MAX)
- Printf("%llu", (unsigned long long)A.UInt);
- else
- PrintHex(A.UInt);
- break;
- case Diag::AK_Float: {
- // FIXME: Support floating-point formatting in sanitizer_common's
- // printf, and stop using snprintf here.
- char Buffer[32];
+ Buffer->append("%c", *Msg);
+ continue;
+ }
+ const Diag::Arg &A = Args[*++Msg - '0'];
+ switch (A.Kind) {
+ case Diag::AK_String:
+ Buffer->append("%s", A.String);
+ break;
+ case Diag::AK_TypeName: {
+ if (SANITIZER_WINDOWS)
+ // The Windows implementation demangles names early.
+ Buffer->append("'%s'", A.String);
+ else
+ Buffer->append("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
+ break;
+ }
+ case Diag::AK_SInt:
+ // 'long long' is guaranteed to be at least 64 bits wide.
+ if (A.SInt >= INT64_MIN && A.SInt <= INT64_MAX)
+ Buffer->append("%lld", (long long)A.SInt);
+ else
+ RenderHex(Buffer, A.SInt);
+ break;
+ case Diag::AK_UInt:
+ if (A.UInt <= UINT64_MAX)
+ Buffer->append("%llu", (unsigned long long)A.UInt);
+ else
+ RenderHex(Buffer, A.UInt);
+ break;
+ case Diag::AK_Float: {
+ // FIXME: Support floating-point formatting in sanitizer_common's
+ // printf, and stop using snprintf here.
+ char FloatBuffer[32];
#if SANITIZER_WINDOWS
- sprintf_s(Buffer, sizeof(Buffer), "%Lg", (long double)A.Float);
+ sprintf_s(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
#else
- snprintf(Buffer, sizeof(Buffer), "%Lg", (long double)A.Float);
+ snprintf(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
#endif
- Printf("%s", Buffer);
- break;
- }
- case Diag::AK_Pointer:
- Printf("%p", A.Pointer);
- break;
- }
+ Buffer->append("%s", FloatBuffer);
+ break;
+ }
+ case Diag::AK_Pointer:
+ Buffer->append("%p", A.Pointer);
+ break;
}
}
}
@@ -240,9 +241,9 @@ static inline uptr addNoOverflow(uptr LHS, uptr RHS) {
}
/// Render a snippet of the address space near a location.
-static void renderMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
- Range *Ranges, unsigned NumRanges,
- const Diag::Arg *Args) {
+static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
+ Range *Ranges, unsigned NumRanges,
+ const Diag::Arg *Args) {
// Show at least the 8 bytes surrounding Loc.
const unsigned MinBytesNearLoc = 4;
MemoryLocation Min = subtractNoOverflow(Loc, MinBytesNearLoc);
@@ -265,14 +266,15 @@ static void renderMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
}
// Emit data.
+ InternalScopedString Buffer(1024);
for (uptr P = Min; P != Max; ++P) {
unsigned char C = *reinterpret_cast<const unsigned char*>(P);
- Printf("%s%02x", (P % 8 == 0) ? " " : " ", C);
+ Buffer.append("%s%02x", (P % 8 == 0) ? " " : " ", C);
}
- Printf("\n");
+ Buffer.append("\n");
// Emit highlights.
- Printf(Decor.Highlight());
+ Buffer.append(Decor.Highlight());
Range *InRange = upperBound(Min, Ranges, NumRanges);
for (uptr P = Min; P != Max; ++P) {
char Pad = ' ', Byte = ' ';
@@ -284,10 +286,13 @@ static void renderMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
Pad = '~';
if (InRange && InRange->getStart().getMemoryLocation() <= P)
Byte = '~';
- char Buffer[] = { Pad, Pad, P == Loc ? '^' : Byte, Byte, 0 };
- Printf((P % 8 == 0) ? Buffer : &Buffer[1]);
+ if (P % 8 == 0)
+ Buffer.append("%c", Pad);
+ Buffer.append("%c", Pad);
+ Buffer.append("%c", P == Loc ? '^' : Byte);
+ Buffer.append("%c", Byte);
}
- Printf("%s\n", Decor.EndHighlight());
+ Buffer.append("%s\n", Decor.EndHighlight());
// Go over the line again, and print names for the ranges.
InRange = 0;
@@ -302,9 +307,9 @@ static void renderMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
if (InRange && InRange->getStart().getMemoryLocation() == P) {
while (Spaces--)
- Printf(" ");
- renderText(InRange->getText(), Args);
- Printf("\n");
+ Buffer.append(" ");
+ RenderText(&Buffer, InRange->getText(), Args);
+ Buffer.append("\n");
// FIXME: We only support naming one range for now!
break;
}
@@ -312,6 +317,7 @@ static void renderMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
Spaces += 2;
}
+ Printf("%s", Buffer.data());
// FIXME: Print names for anything we can identify within the line:
//
// * If we can identify the memory itself as belonging to a particular
@@ -328,28 +334,30 @@ Diag::~Diag() {
// All diagnostics should be printed under report mutex.
CommonSanitizerReportMutex.CheckLocked();
Decorator Decor;
- Printf(Decor.Bold());
+ InternalScopedString Buffer(1024);
- renderLocation(Loc);
+ Buffer.append(Decor.Bold());
+ RenderLocation(&Buffer, Loc);
+ Buffer.append(":");
switch (Level) {
case DL_Error:
- Printf("%s runtime error: %s%s",
- Decor.Warning(), Decor.EndWarning(), Decor.Bold());
+ Buffer.append("%s runtime error: %s%s", Decor.Warning(), Decor.EndWarning(),
+ Decor.Bold());
break;
case DL_Note:
- Printf("%s note: %s", Decor.Note(), Decor.EndNote());
+ Buffer.append("%s note: %s", Decor.Note(), Decor.EndNote());
break;
}
- renderText(Message, Args);
+ RenderText(&Buffer, Message, Args);
- Printf("%s\n", Decor.Default());
+ Buffer.append("%s\n", Decor.Default());
+ Printf("%s", Buffer.data());
if (Loc.isMemoryLocation())
- renderMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges,
- NumRanges, Args);
+ PrintMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges, NumRanges, Args);
}
ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc,
@@ -363,14 +371,19 @@ ScopedReport::~ScopedReport() {
MaybePrintStackTrace(Opts.pc, Opts.bp);
MaybeReportErrorSummary(SummaryLoc, Type);
CommonSanitizerReportMutex.Unlock();
- if (Opts.DieAfterReport || flags()->halt_on_error)
+ if (flags()->halt_on_error)
Die();
}
ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
static SuppressionContext *suppression_ctx = nullptr;
static const char kVptrCheck[] = "vptr_check";
-static const char *kSuppressionTypes[] = { kVptrCheck };
+static const char *kSuppressionTypes[] = {
+#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) FSanitizeFlagName,
+#include "ubsan_checks.inc"
+#undef UBSAN_CHECK
+ kVptrCheck,
+};
void __ubsan::InitializeSuppressions() {
CHECK_EQ(nullptr, suppression_ctx);
@@ -386,4 +399,28 @@ bool __ubsan::IsVptrCheckSuppressed(const char *TypeName) {
return suppression_ctx->Match(TypeName, kVptrCheck, &s);
}
+bool __ubsan::IsPCSuppressed(ErrorType ET, uptr PC, const char *Filename) {
+ InitAsStandaloneIfNecessary();
+ CHECK(suppression_ctx);
+ const char *SuppType = ConvertTypeToFlagName(ET);
+ // Fast path: don't symbolize PC if there is no suppressions for given UB
+ // type.
+ if (!suppression_ctx->HasSuppressionType(SuppType))
+ return false;
+ Suppression *s = nullptr;
+ // Suppress by file name known to runtime.
+ if (Filename != nullptr && suppression_ctx->Match(Filename, SuppType, &s))
+ return true;
+ // Suppress by module name.
+ if (const char *Module = Symbolizer::GetOrInit()->GetModuleNameForPc(PC)) {
+ if (suppression_ctx->Match(Module, SuppType, &s))
+ return true;
+ }
+ // Suppress by function or source file name from debug info.
+ SymbolizedStackHolder Stack(Symbolizer::GetOrInit()->SymbolizePC(PC));
+ const AddressInfo &AI = Stack.get()->info;
+ return suppression_ctx->Match(AI.function, SuppType, &s) ||
+ suppression_ctx->Match(AI.file, SuppType, &s);
+}
+
#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_diag.h b/libsanitizer/ubsan/ubsan_diag.h
index 7103d522953..3456aaa72c5 100644
--- a/libsanitizer/ubsan/ubsan_diag.h
+++ b/libsanitizer/ubsan/ubsan_diag.h
@@ -209,23 +209,25 @@ public:
};
struct ReportOptions {
- /// If DieAfterReport is specified, UBSan will terminate the program after the
- /// report is printed.
- bool DieAfterReport;
+ // If FromUnrecoverableHandler is specified, UBSan runtime handler is not
+ // expected to return.
+ bool FromUnrecoverableHandler;
/// pc/bp are used to unwind the stack trace.
uptr pc;
uptr bp;
};
enum class ErrorType {
-#define UBSAN_CHECK(Name, SummaryKind, FlagName) Name,
+#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) Name,
#include "ubsan_checks.inc"
#undef UBSAN_CHECK
};
-#define GET_REPORT_OPTIONS(die_after_report) \
+bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET);
+
+#define GET_REPORT_OPTIONS(unrecoverable_handler) \
GET_CALLER_PC_BP; \
- ReportOptions Opts = {die_after_report, pc, bp}
+ ReportOptions Opts = {unrecoverable_handler, pc, bp}
/// \brief Instantiate this class before printing diagnostics in the error
/// report. This class ensures that reports from different threads and from
@@ -236,14 +238,15 @@ class ScopedReport {
ErrorType Type;
public:
- ScopedReport(ReportOptions Opts, Location SummaryLoc,
- ErrorType Type = ErrorType::GenericUB);
- void setErrorType(ErrorType T) { Type = T; }
+ ScopedReport(ReportOptions Opts, Location SummaryLoc, ErrorType Type);
~ScopedReport();
};
void InitializeSuppressions();
bool IsVptrCheckSuppressed(const char *TypeName);
+// Sometimes UBSan runtime can know filename from handlers arguments, even if
+// debug info is missing.
+bool IsPCSuppressed(ErrorType ET, uptr PC, const char *Filename);
} // namespace __ubsan
diff --git a/libsanitizer/ubsan/ubsan_flags.cc b/libsanitizer/ubsan/ubsan_flags.cc
index 4e7db7a6a57..19f5de1727e 100644
--- a/libsanitizer/ubsan/ubsan_flags.cc
+++ b/libsanitizer/ubsan/ubsan_flags.cc
@@ -57,7 +57,7 @@ void InitializeFlags() {
parser.ParseString(MaybeCallUbsanDefaultOptions());
// Override from environment variable.
parser.ParseString(GetEnv("UBSAN_OPTIONS"));
- SetVerbosity(common_flags()->verbosity);
+ InitializeCommonFlags();
if (Verbosity()) ReportUnrecognizedFlags();
if (common_flags()->help) parser.PrintFlagDescriptions();
diff --git a/libsanitizer/ubsan/ubsan_handlers.cc b/libsanitizer/ubsan/ubsan_handlers.cc
index 8530fcffc88..0e343d32307 100644
--- a/libsanitizer/ubsan/ubsan_handlers.cc
+++ b/libsanitizer/ubsan/ubsan_handlers.cc
@@ -19,17 +19,20 @@
using namespace __sanitizer;
using namespace __ubsan;
-static bool ignoreReport(SourceLocation SLoc, ReportOptions Opts) {
- // If source location is already acquired, we don't need to print an error
- // report for the second time. However, if we're in an unrecoverable handler,
- // it's possible that location was required by concurrently running thread.
- // In this case, we should continue the execution to ensure that any of
- // threads will grab the report mutex and print the report before
- // crashing the program.
- return SLoc.isDisabled() && !Opts.DieAfterReport;
+namespace __ubsan {
+bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET) {
+ // We are not allowed to skip error report: if we are in unrecoverable
+ // handler, we have to terminate the program right now, and therefore
+ // have to print some diagnostic.
+ //
+ // Even if source location is disabled, it doesn't mean that we have
+ // already report an error to the user: some concurrently running
+ // thread could have acquired it, but not yet printed the report.
+ if (Opts.FromUnrecoverableHandler)
+ return false;
+ return SLoc.isDisabled() || IsPCSuppressed(ET, Opts.pc, SLoc.getFilename());
}
-namespace __ubsan {
const char *TypeCheckKinds[] = {
"load of", "store to", "reference binding to", "member access within",
"member call on", "constructor call on", "downcast of", "downcast of",
@@ -39,8 +42,18 @@ const char *TypeCheckKinds[] = {
static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
ReportOptions Opts) {
Location Loc = Data->Loc.acquire();
- // Use the SourceLocation from Data to track deduplication, even if 'invalid'
- if (ignoreReport(Loc.getSourceLocation(), Opts))
+
+ ErrorType ET;
+ if (!Pointer)
+ ET = ErrorType::NullPointerUse;
+ else if (Data->Alignment && (Pointer & (Data->Alignment - 1)))
+ ET = ErrorType::MisalignedPointerUse;
+ else
+ ET = ErrorType::InsufficientObjectSize;
+
+ // Use the SourceLocation from Data to track deduplication, even if it's
+ // invalid.
+ if (ignoreReport(Loc.getSourceLocation(), Opts, ET))
return;
SymbolizedStackHolder FallbackLoc;
@@ -49,24 +62,28 @@ static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
Loc = FallbackLoc;
}
- ScopedReport R(Opts, Loc);
+ ScopedReport R(Opts, Loc, ET);
- if (!Pointer) {
- R.setErrorType(ErrorType::NullPointerUse);
+ switch (ET) {
+ case ErrorType::NullPointerUse:
Diag(Loc, DL_Error, "%0 null pointer of type %1")
- << TypeCheckKinds[Data->TypeCheckKind] << Data->Type;
- } else if (Data->Alignment && (Pointer & (Data->Alignment - 1))) {
- R.setErrorType(ErrorType::MisalignedPointerUse);
+ << TypeCheckKinds[Data->TypeCheckKind] << Data->Type;
+ break;
+ case ErrorType::MisalignedPointerUse:
Diag(Loc, DL_Error, "%0 misaligned address %1 for type %3, "
"which requires %2 byte alignment")
- << TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer
- << Data->Alignment << Data->Type;
- } else {
- R.setErrorType(ErrorType::InsufficientObjectSize);
+ << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer
+ << Data->Alignment << Data->Type;
+ break;
+ case ErrorType::InsufficientObjectSize:
Diag(Loc, DL_Error, "%0 address %1 with insufficient space "
"for an object of type %2")
- << TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type;
+ << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Data->Type;
+ break;
+ default:
+ UNREACHABLE("unexpected error type!");
}
+
if (Pointer)
Diag(Pointer, DL_Note, "pointer points here");
}
@@ -89,12 +106,14 @@ static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS,
const char *Operator, T RHS,
ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ bool IsSigned = Data->Type.isSignedIntegerTy();
+ ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow
+ : ErrorType::UnsignedIntegerOverflow;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- bool IsSigned = Data->Type.isSignedIntegerTy();
- ScopedReport R(Opts, Loc, IsSigned ? ErrorType::SignedIntegerOverflow
- : ErrorType::UnsignedIntegerOverflow);
+ ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error, "%0 integer overflow: "
"%1 %2 %3 cannot be represented in type %4")
@@ -102,12 +121,13 @@ static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS,
<< Value(Data->Type, LHS) << Operator << RHS << Data->Type;
}
-#define UBSAN_OVERFLOW_HANDLER(handler_name, op, abort) \
+#define UBSAN_OVERFLOW_HANDLER(handler_name, op, unrecoverable) \
void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \
ValueHandle RHS) { \
- GET_REPORT_OPTIONS(abort); \
+ GET_REPORT_OPTIONS(unrecoverable); \
handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \
- if (abort) Die(); \
+ if (unrecoverable) \
+ Die(); \
}
UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false)
@@ -120,12 +140,14 @@ UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true)
static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal,
ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ bool IsSigned = Data->Type.isSignedIntegerTy();
+ ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow
+ : ErrorType::UnsignedIntegerOverflow;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- bool IsSigned = Data->Type.isSignedIntegerTy();
- ScopedReport R(Opts, Loc, IsSigned ? ErrorType::SignedIntegerOverflow
- : ErrorType::UnsignedIntegerOverflow);
+ ScopedReport R(Opts, Loc, ET);
if (IsSigned)
Diag(Loc, DL_Error,
@@ -152,22 +174,30 @@ void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data,
static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS,
ValueHandle RHS, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ Value LHSVal(Data->Type, LHS);
+ Value RHSVal(Data->Type, RHS);
+
+ ErrorType ET;
+ if (RHSVal.isMinusOne())
+ ET = ErrorType::SignedIntegerOverflow;
+ else if (Data->Type.isIntegerTy())
+ ET = ErrorType::IntegerDivideByZero;
+ else
+ ET = ErrorType::FloatDivideByZero;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- ScopedReport R(Opts, Loc);
+ ScopedReport R(Opts, Loc, ET);
- Value LHSVal(Data->Type, LHS);
- Value RHSVal(Data->Type, RHS);
- if (RHSVal.isMinusOne()) {
- R.setErrorType(ErrorType::SignedIntegerOverflow);
- Diag(Loc, DL_Error,
- "division of %0 by -1 cannot be represented in type %1")
- << LHSVal << Data->Type;
- } else {
- R.setErrorType(Data->Type.isIntegerTy() ? ErrorType::IntegerDivideByZero
- : ErrorType::FloatDivideByZero);
+ switch (ET) {
+ case ErrorType::SignedIntegerOverflow:
+ Diag(Loc, DL_Error, "division of %0 by -1 cannot be represented in type %1")
+ << LHSVal << Data->Type;
+ break;
+ default:
Diag(Loc, DL_Error, "division by zero");
+ break;
}
}
@@ -188,29 +218,34 @@ static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data,
ValueHandle LHS, ValueHandle RHS,
ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ Value LHSVal(Data->LHSType, LHS);
+ Value RHSVal(Data->RHSType, RHS);
+
+ ErrorType ET;
+ if (RHSVal.isNegative() ||
+ RHSVal.getPositiveIntValue() >= Data->LHSType.getIntegerBitWidth())
+ ET = ErrorType::InvalidShiftExponent;
+ else
+ ET = ErrorType::InvalidShiftBase;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- ScopedReport R(Opts, Loc);
+ ScopedReport R(Opts, Loc, ET);
- Value LHSVal(Data->LHSType, LHS);
- Value RHSVal(Data->RHSType, RHS);
- if (RHSVal.isNegative()) {
- R.setErrorType(ErrorType::InvalidShiftExponent);
- Diag(Loc, DL_Error, "shift exponent %0 is negative") << RHSVal;
- } else if (RHSVal.getPositiveIntValue() >=
- Data->LHSType.getIntegerBitWidth()) {
- R.setErrorType(ErrorType::InvalidShiftExponent);
- Diag(Loc, DL_Error, "shift exponent %0 is too large for %1-bit type %2")
- << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType;
- } else if (LHSVal.isNegative()) {
- R.setErrorType(ErrorType::InvalidShiftBase);
- Diag(Loc, DL_Error, "left shift of negative value %0") << LHSVal;
+ if (ET == ErrorType::InvalidShiftExponent) {
+ if (RHSVal.isNegative())
+ Diag(Loc, DL_Error, "shift exponent %0 is negative") << RHSVal;
+ else
+ Diag(Loc, DL_Error, "shift exponent %0 is too large for %1-bit type %2")
+ << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType;
} else {
- R.setErrorType(ErrorType::InvalidShiftBase);
- Diag(Loc, DL_Error,
- "left shift of %0 by %1 places cannot be represented in type %2")
- << LHSVal << RHSVal << Data->LHSType;
+ if (LHSVal.isNegative())
+ Diag(Loc, DL_Error, "left shift of negative value %0") << LHSVal;
+ else
+ Diag(Loc, DL_Error,
+ "left shift of %0 by %1 places cannot be represented in type %2")
+ << LHSVal << RHSVal << Data->LHSType;
}
}
@@ -232,10 +267,12 @@ void __ubsan::__ubsan_handle_shift_out_of_bounds_abort(
static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index,
ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ ErrorType ET = ErrorType::OutOfBoundsIndex;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- ScopedReport R(Opts, Loc, ErrorType::OutOfBoundsIndex);
+ ScopedReport R(Opts, Loc, ET);
Value IndexVal(Data->IndexType, Index);
Diag(Loc, DL_Error, "index %0 out of bounds for type %1")
@@ -282,10 +319,12 @@ void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) {
static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound,
ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ ErrorType ET = ErrorType::NonPositiveVLAIndex;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- ScopedReport R(Opts, Loc, ErrorType::NonPositiveVLAIndex);
+ ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error, "variable length array bound evaluates to "
"non-positive value %0")
@@ -327,6 +366,7 @@ static void handleFloatCastOverflow(void *DataPtr, ValueHandle From,
SymbolizedStackHolder CallerLoc;
Location Loc;
const TypeDescriptor *FromType, *ToType;
+ ErrorType ET = ErrorType::FloatCastOverflow;
if (looksLikeFloatCastOverflowDataV1(DataPtr)) {
auto Data = reinterpret_cast<FloatCastOverflowData *>(DataPtr);
@@ -337,14 +377,14 @@ static void handleFloatCastOverflow(void *DataPtr, ValueHandle From,
} else {
auto Data = reinterpret_cast<FloatCastOverflowDataV2 *>(DataPtr);
SourceLocation SLoc = Data->Loc.acquire();
- if (ignoreReport(SLoc, Opts))
+ if (ignoreReport(SLoc, Opts, ET))
return;
Loc = SLoc;
FromType = &Data->FromType;
ToType = &Data->ToType;
}
- ScopedReport R(Opts, Loc, ErrorType::FloatCastOverflow);
+ ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error,
"value %0 is outside the range of representable values of type %2")
@@ -365,14 +405,16 @@ void __ubsan::__ubsan_handle_float_cast_overflow_abort(void *Data,
static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val,
ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
- return;
-
// This check could be more precise if we used different handlers for
// -fsanitize=bool and -fsanitize=enum.
bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'"));
- ScopedReport R(Opts, Loc, IsBool ? ErrorType::InvalidBoolLoad
- : ErrorType::InvalidEnumLoad);
+ ErrorType ET =
+ IsBool ? ErrorType::InvalidBoolLoad : ErrorType::InvalidEnumLoad;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error,
"load of value %0, which is not a valid value for type %1")
@@ -395,10 +437,12 @@ static void handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
ValueHandle Function,
ReportOptions Opts) {
SourceLocation CallLoc = Data->Loc.acquire();
- if (ignoreReport(CallLoc, Opts))
+ ErrorType ET = ErrorType::FunctionTypeMismatch;
+
+ if (ignoreReport(CallLoc, Opts, ET))
return;
- ScopedReport R(Opts, CallLoc, ErrorType::FunctionTypeMismatch);
+ ScopedReport R(Opts, CallLoc, ET);
SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
const char *FName = FLoc.get()->info.function;
@@ -427,10 +471,12 @@ void __ubsan::__ubsan_handle_function_type_mismatch_abort(
static void handleNonNullReturn(NonNullReturnData *Data, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ ErrorType ET = ErrorType::InvalidNullReturn;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- ScopedReport R(Opts, Loc, ErrorType::InvalidNullReturn);
+ ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error, "null pointer returned from function declared to never "
"return null");
@@ -451,10 +497,12 @@ void __ubsan::__ubsan_handle_nonnull_return_abort(NonNullReturnData *Data) {
static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ ErrorType ET = ErrorType::InvalidNullArgument;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- ScopedReport R(Opts, Loc, ErrorType::InvalidNullArgument);
+ ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error, "null pointer passed as argument %0, which is declared to "
"never be null") << Data->ArgIndex;
@@ -473,13 +521,18 @@ void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) {
Die();
}
-static void handleCFIBadIcall(CFIBadIcallData *Data, ValueHandle Function,
+static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function,
ReportOptions Opts) {
+ if (Data->CheckKind != CFITCK_ICall)
+ Die();
+
SourceLocation Loc = Data->Loc.acquire();
- if (ignoreReport(Loc, Opts))
+ ErrorType ET = ErrorType::CFIBadType;
+
+ if (ignoreReport(Loc, Opts, ET))
return;
- ScopedReport R(Opts, Loc);
+ ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during "
"indirect function call")
@@ -492,16 +545,37 @@ static void handleCFIBadIcall(CFIBadIcallData *Data, ValueHandle Function,
Diag(FLoc, DL_Note, "%0 defined here") << FName;
}
-void __ubsan::__ubsan_handle_cfi_bad_icall(CFIBadIcallData *Data,
- ValueHandle Function) {
+namespace __ubsan {
+#ifdef UBSAN_CAN_USE_CXXABI
+SANITIZER_WEAK_ATTRIBUTE
+void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts);
+#else
+static void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts) {
+ Die();
+}
+#endif
+} // namespace __ubsan
+
+void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
+ ValueHandle Value,
+ uptr ValidVtable) {
GET_REPORT_OPTIONS(false);
- handleCFIBadIcall(Data, Function, Opts);
+ if (Data->CheckKind == CFITCK_ICall)
+ handleCFIBadIcall(Data, Value, Opts);
+ else
+ HandleCFIBadType(Data, Value, ValidVtable, Opts);
}
-void __ubsan::__ubsan_handle_cfi_bad_icall_abort(CFIBadIcallData *Data,
- ValueHandle Function) {
+void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data,
+ ValueHandle Value,
+ uptr ValidVtable) {
GET_REPORT_OPTIONS(true);
- handleCFIBadIcall(Data, Function, Opts);
+ if (Data->CheckKind == CFITCK_ICall)
+ handleCFIBadIcall(Data, Value, Opts);
+ else
+ HandleCFIBadType(Data, Value, ValidVtable, Opts);
Die();
}
diff --git a/libsanitizer/ubsan/ubsan_handlers.h b/libsanitizer/ubsan/ubsan_handlers.h
index 668535868e9..ef741ca58ef 100644
--- a/libsanitizer/ubsan/ubsan_handlers.h
+++ b/libsanitizer/ubsan/ubsan_handlers.h
@@ -146,14 +146,25 @@ struct NonNullArgData {
/// \brief Handle passing null pointer to function with nonnull attribute.
RECOVERABLE(nonnull_arg, NonNullArgData *Data)
-struct CFIBadIcallData {
+/// \brief Known CFI check kinds.
+/// Keep in sync with the enum of the same name in CodeGenFunction.h
+enum CFITypeCheckKind : unsigned char {
+ CFITCK_VCall,
+ CFITCK_NVCall,
+ CFITCK_DerivedCast,
+ CFITCK_UnrelatedCast,
+ CFITCK_ICall,
+};
+
+struct CFICheckFailData {
+ CFITypeCheckKind CheckKind;
SourceLocation Loc;
const TypeDescriptor &Type;
};
-/// \brief Handle control flow integrity failure for indirect function calls.
-RECOVERABLE(cfi_bad_icall, CFIBadIcallData *Data, ValueHandle Function)
-
+/// \brief Handle control flow integrity failures.
+RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
+ uptr VtableIsValid)
}
#endif // UBSAN_HANDLERS_H
diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.cc b/libsanitizer/ubsan/ubsan_handlers_cxx.cc
index b50b4d4636d..015a9ffee02 100644
--- a/libsanitizer/ubsan/ubsan_handlers_cxx.cc
+++ b/libsanitizer/ubsan/ubsan_handlers_cxx.cc
@@ -13,6 +13,7 @@
#include "ubsan_platform.h"
#if CAN_SANITIZE_UB
+#include "ubsan_handlers.h"
#include "ubsan_handlers_cxx.h"
#include "ubsan_diag.h"
#include "ubsan_type_hash.h"
@@ -27,34 +28,42 @@ namespace __ubsan {
extern const char *TypeCheckKinds[];
}
-static void HandleDynamicTypeCacheMiss(
+// Returns true if UBSan has printed an error report.
+static bool HandleDynamicTypeCacheMiss(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash,
ReportOptions Opts) {
if (checkDynamicType((void*)Pointer, Data->TypeInfo, Hash))
// Just a cache miss. The type matches after all.
- return;
+ return false;
// Check if error report should be suppressed.
DynamicTypeInfo DTI = getDynamicTypeInfoFromObject((void*)Pointer);
if (DTI.isValid() && IsVptrCheckSuppressed(DTI.getMostDerivedTypeName()))
- return;
+ return false;
SourceLocation Loc = Data->Loc.acquire();
- if (Loc.isDisabled())
- return;
+ ErrorType ET = ErrorType::DynamicTypeMismatch;
+ if (ignoreReport(Loc, Opts, ET))
+ return false;
- ScopedReport R(Opts, Loc, ErrorType::DynamicTypeMismatch);
+ ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error,
"%0 address %1 which does not point to an object of type %2")
<< TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type;
// If possible, say what type it actually points to.
- if (!DTI.isValid())
- Diag(Pointer, DL_Note, "object has invalid vptr")
- << TypeName(DTI.getMostDerivedTypeName())
- << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
- else if (!DTI.getOffset())
+ if (!DTI.isValid()) {
+ if (DTI.getOffset() < -VptrMaxOffsetToTop || DTI.getOffset() > VptrMaxOffsetToTop) {
+ Diag(Pointer, DL_Note, "object has a possibly invalid vptr: abs(offset to top) too big")
+ << TypeName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "possibly invalid vptr");
+ } else {
+ Diag(Pointer, DL_Note, "object has invalid vptr")
+ << TypeName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
+ }
+ } else if (!DTI.getOffset())
Diag(Pointer, DL_Note, "object is of type %0")
<< TypeName(DTI.getMostDerivedTypeName())
<< Range(Pointer, Pointer + sizeof(uptr), "vptr for %0");
@@ -67,6 +76,7 @@ static void HandleDynamicTypeCacheMiss(
<< TypeName(DTI.getSubobjectTypeName())
<< Range(Pointer, Pointer + sizeof(uptr),
"vptr for %2 base class of %1");
+ return true;
}
void __ubsan::__ubsan_handle_dynamic_type_cache_miss(
@@ -76,45 +86,60 @@ void __ubsan::__ubsan_handle_dynamic_type_cache_miss(
}
void __ubsan::__ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
- GET_REPORT_OPTIONS(true);
- HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts);
+ // Note: -fsanitize=vptr is always recoverable.
+ GET_REPORT_OPTIONS(false);
+ if (HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts))
+ Die();
}
-static void HandleCFIBadType(CFIBadTypeData *Data, ValueHandle Vtable,
- ReportOptions Opts) {
+namespace __ubsan {
+void HandleCFIBadType(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts) {
SourceLocation Loc = Data->Loc.acquire();
- ScopedReport R(Opts, Loc, ErrorType::CFIBadType);
- DynamicTypeInfo DTI = getDynamicTypeInfoFromVtable((void*)Vtable);
+ ErrorType ET = ErrorType::CFIBadType;
- static const char *TypeCheckKinds[] = {
- "virtual call",
- "non-virtual call",
- "base-to-derived cast",
- "cast to unrelated type",
- };
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+ DynamicTypeInfo DTI = ValidVtable
+ ? getDynamicTypeInfoFromVtable((void *)Vtable)
+ : DynamicTypeInfo(0, 0, 0);
+
+ const char *CheckKindStr;
+ switch (Data->CheckKind) {
+ case CFITCK_VCall:
+ CheckKindStr = "virtual call";
+ break;
+ case CFITCK_NVCall:
+ CheckKindStr = "non-virtual call";
+ break;
+ case CFITCK_DerivedCast:
+ CheckKindStr = "base-to-derived cast";
+ break;
+ case CFITCK_UnrelatedCast:
+ CheckKindStr = "cast to unrelated type";
+ break;
+ case CFITCK_ICall:
+ Die();
+ }
Diag(Loc, DL_Error, "control flow integrity check for type %0 failed during "
"%1 (vtable address %2)")
- << Data->Type << TypeCheckKinds[Data->TypeCheckKind] << (void *)Vtable;
+ << Data->Type << CheckKindStr << (void *)Vtable;
// If possible, say what type it actually points to.
- if (!DTI.isValid())
- Diag(Vtable, DL_Note, "invalid vtable");
- else
+ if (!DTI.isValid()) {
+ const char *module = Symbolizer::GetOrInit()->GetModuleNameForPc(Vtable);
+ if (module)
+ Diag(Vtable, DL_Note, "invalid vtable in module %0") << module;
+ else
+ Diag(Vtable, DL_Note, "invalid vtable");
+ } else {
Diag(Vtable, DL_Note, "vtable is of type %0")
<< TypeName(DTI.getMostDerivedTypeName());
+ }
}
+} // namespace __ubsan
-void __ubsan::__ubsan_handle_cfi_bad_type(CFIBadTypeData *Data,
- ValueHandle Vtable) {
- GET_REPORT_OPTIONS(false);
- HandleCFIBadType(Data, Vtable, Opts);
-}
-
-void __ubsan::__ubsan_handle_cfi_bad_type_abort(CFIBadTypeData *Data,
- ValueHandle Vtable) {
- GET_REPORT_OPTIONS(true);
- HandleCFIBadType(Data, Vtable, Opts);
-}
-
-#endif // CAN_SANITIZE_UB
+#endif // CAN_SANITIZE_UB
diff --git a/libsanitizer/ubsan/ubsan_handlers_cxx.h b/libsanitizer/ubsan/ubsan_handlers_cxx.h
index b1486864298..37382359b1e 100644
--- a/libsanitizer/ubsan/ubsan_handlers_cxx.h
+++ b/libsanitizer/ubsan/ubsan_handlers_cxx.h
@@ -23,12 +23,6 @@ struct DynamicTypeCacheMissData {
unsigned char TypeCheckKind;
};
-struct CFIBadTypeData {
- SourceLocation Loc;
- const TypeDescriptor &Type;
- unsigned char TypeCheckKind;
-};
-
/// \brief Handle a runtime type check failure, caused by an incorrect vptr.
/// When this handler is called, all we know is that the type was not in the
/// cache; this does not necessarily imply the existence of a bug.
@@ -38,14 +32,6 @@ void __ubsan_handle_dynamic_type_cache_miss(
extern "C" SANITIZER_INTERFACE_ATTRIBUTE
void __ubsan_handle_dynamic_type_cache_miss_abort(
DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash);
-
-/// \brief Handle a control flow integrity check failure by printing a
-/// diagnostic.
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_cfi_bad_type(CFIBadTypeData *Data, ValueHandle Vtable);
-extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
-__ubsan_handle_cfi_bad_type_abort(CFIBadTypeData *Data, ValueHandle Vtable);
-
}
#endif // UBSAN_HANDLERS_H
diff --git a/libsanitizer/ubsan/ubsan_init.cc b/libsanitizer/ubsan/ubsan_init.cc
index 5da4122c07a..07f748109e7 100644
--- a/libsanitizer/ubsan/ubsan_init.cc
+++ b/libsanitizer/ubsan/ubsan_init.cc
@@ -37,6 +37,7 @@ static void CommonStandaloneInit() {
InitializeFlags();
CacheBinaryName();
__sanitizer_set_report_path(common_flags()->log_path);
+ AndroidLogInit();
InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
CommonInit();
ubsan_mode = UBSAN_MODE_STANDALONE;
diff --git a/libsanitizer/ubsan/ubsan_platform.h b/libsanitizer/ubsan/ubsan_platform.h
index 999bf815493..fa4e1a191aa 100644
--- a/libsanitizer/ubsan/ubsan_platform.h
+++ b/libsanitizer/ubsan/ubsan_platform.h
@@ -15,7 +15,8 @@
// Other platforms should be easy to add, and probably work as-is.
#if (defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__)) && \
(defined(__x86_64__) || defined(__i386__) || defined(__arm__) || \
- defined(__aarch64__) || defined(__mips__) || defined(__powerpc64__))
+ defined(__aarch64__) || defined(__mips__) || defined(__powerpc64__) || \
+ defined(__s390__))
# define CAN_SANITIZE_UB 1
#elif defined(_WIN32)
# define CAN_SANITIZE_UB 1
diff --git a/libsanitizer/ubsan/ubsan_type_hash.h b/libsanitizer/ubsan/ubsan_type_hash.h
index 2da070edffa..610fcb44ea7 100644
--- a/libsanitizer/ubsan/ubsan_type_hash.h
+++ b/libsanitizer/ubsan/ubsan_type_hash.h
@@ -51,6 +51,10 @@ bool checkDynamicType(void *Object, void *Type, HashValue Hash);
const unsigned VptrTypeCacheSize = 128;
+/// A sanity check for Vtable. Offsets to top must be reasonably small
+/// numbers (by absolute value). It's a weak check for Vtable corruption.
+const int VptrMaxOffsetToTop = 1<<20;
+
/// \brief A cache of the results of checkDynamicType. \c checkDynamicType would
/// return \c true (modulo hash collisions) if
/// \code
diff --git a/libsanitizer/ubsan/ubsan_type_hash_itanium.cc b/libsanitizer/ubsan/ubsan_type_hash_itanium.cc
index e4f133434d6..790b126815d 100644
--- a/libsanitizer/ubsan/ubsan_type_hash_itanium.cc
+++ b/libsanitizer/ubsan/ubsan_type_hash_itanium.cc
@@ -71,6 +71,8 @@ public:
namespace abi = __cxxabiv1;
+using namespace __sanitizer;
+
// We implement a simple two-level cache for type-checking results. For each
// (vptr,type) pair, a hash is computed. This hash is assumed to be globally
// unique; if it collides, we will get false negatives, but:
@@ -113,7 +115,9 @@ static __ubsan::HashValue *getTypeCacheHashTableBucket(__ubsan::HashValue V) {
static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
const abi::__class_type_info *Base,
sptr Offset) {
- if (Derived->__type_name == Base->__type_name)
+ if (Derived->__type_name == Base->__type_name ||
+ (SANITIZER_NON_UNIQUE_TYPEINFO &&
+ !internal_strcmp(Derived->__type_name, Base->__type_name)))
return Offset == 0;
if (const abi::__si_class_type_info *SI =
@@ -161,7 +165,7 @@ static const abi::__class_type_info *findBaseAtOffset(
dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
if (!VTI)
// No base class subobjects.
- return 0;
+ return nullptr;
for (unsigned int base = 0; base != VTI->base_count; ++base) {
sptr OffsetHere = VTI->base_info[base].__offset_flags >>
@@ -176,7 +180,7 @@ static const abi::__class_type_info *findBaseAtOffset(
return Base;
}
- return 0;
+ return nullptr;
}
namespace {
@@ -192,11 +196,11 @@ struct VtablePrefix {
VtablePrefix *getVtablePrefix(void *Vtable) {
VtablePrefix *Vptr = reinterpret_cast<VtablePrefix*>(Vtable);
if (!Vptr)
- return 0;
+ return nullptr;
VtablePrefix *Prefix = Vptr - 1;
if (!Prefix->TypeInfo)
// This can't possibly be a valid vtable.
- return 0;
+ return nullptr;
return Prefix;
}
@@ -217,6 +221,10 @@ bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) {
VtablePrefix *Vtable = getVtablePrefix(VtablePtr);
if (!Vtable)
return false;
+ if (Vtable->Offset < -VptrMaxOffsetToTop || Vtable->Offset > VptrMaxOffsetToTop) {
+ // Too large or too small offset are signs of Vtable corruption.
+ return false;
+ }
// Check that this is actually a type_info object for a class type.
abi::__class_type_info *Derived =
@@ -238,7 +246,9 @@ __ubsan::DynamicTypeInfo
__ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) {
VtablePrefix *Vtable = getVtablePrefix(VtablePtr);
if (!Vtable)
- return DynamicTypeInfo(0, 0, 0);
+ return DynamicTypeInfo(nullptr, 0, nullptr);
+ if (Vtable->Offset < -VptrMaxOffsetToTop || Vtable->Offset > VptrMaxOffsetToTop)
+ return DynamicTypeInfo(nullptr, Vtable->Offset, nullptr);
const abi::__class_type_info *ObjectType = findBaseAtOffset(
static_cast<const abi::__class_type_info*>(Vtable->TypeInfo),
-Vtable->Offset);
diff --git a/libsanitizer/ubsan/ubsan_value.cc b/libsanitizer/ubsan/ubsan_value.cc
index e327f6ffc2e..3e158f92e07 100644
--- a/libsanitizer/ubsan/ubsan_value.cc
+++ b/libsanitizer/ubsan/ubsan_value.cc
@@ -81,12 +81,12 @@ FloatMax Value::getFloatValue() const {
#endif
case 32: {
float Value;
-#if defined(__BIG_ENDIAN__)
+#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
// For big endian the float value is in the last 4 bytes.
// On some targets we may only have 4 bytes so we count backwards from
// the end of Val to account for both the 32-bit and 64-bit cases.
internal_memcpy(&Value, ((const char*)(&Val + 1)) - 4, 4);
-#else
+#else
internal_memcpy(&Value, &Val, 4);
#endif
return Value;